1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 4 #include <linux/acpi.h> 5 #include <linux/aer.h> 6 #include <linux/bitops.h> 7 #include <linux/debugfs.h> 8 #include <linux/init.h> 9 #include <linux/io.h> 10 #include <linux/iommu.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 #include <linux/seq_file.h> 15 #include <linux/topology.h> 16 #include <linux/uacce.h> 17 18 #include "sec.h" 19 20 #define SEC_VF_NUM 63 21 #define SEC_QUEUE_NUM_V1 4096 22 #define SEC_QUEUE_NUM_V2 1024 23 #define SEC_PF_PCI_DEVICE_ID 0xa255 24 #define SEC_VF_PCI_DEVICE_ID 0xa256 25 26 #define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF 27 #define SEC_BD_ERR_CHK_EN1 0x7ffff7fd 28 #define SEC_BD_ERR_CHK_EN3 0xffffbfff 29 30 #define SEC_SQE_SIZE 128 31 #define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH) 32 #define SEC_PF_DEF_Q_NUM 256 33 #define SEC_PF_DEF_Q_BASE 0 34 #define SEC_CTX_Q_NUM_DEF 2 35 #define SEC_CTX_Q_NUM_MAX 32 36 37 #define SEC_CTRL_CNT_CLR_CE 0x301120 38 #define SEC_CTRL_CNT_CLR_CE_BIT BIT(0) 39 #define SEC_ENGINE_PF_CFG_OFF 0x300000 40 #define SEC_ACC_COMMON_REG_OFF 0x1000 41 #define SEC_CORE_INT_SOURCE 0x301010 42 #define SEC_CORE_INT_MASK 0x301000 43 #define SEC_CORE_INT_STATUS 0x301008 44 #define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14 45 #define SEC_ECC_NUM(err) (((err) >> 16) & 0xFF) 46 #define SEC_ECC_ADDR(err) ((err) >> 0) 47 #define SEC_CORE_INT_DISABLE 0x0 48 #define SEC_CORE_INT_ENABLE 0x1ff 49 #define SEC_CORE_INT_CLEAR 0x1ff 50 #define SEC_SAA_ENABLE 0x17f 51 52 #define SEC_RAS_CE_REG 0x301050 53 #define SEC_RAS_FE_REG 0x301054 54 #define SEC_RAS_NFE_REG 0x301058 55 #define SEC_RAS_CE_ENB_MSK 0x88 56 #define SEC_RAS_FE_ENB_MSK 0x0 57 #define SEC_RAS_NFE_ENB_MSK 0x177 58 #define SEC_RAS_DISABLE 0x0 59 #define SEC_MEM_START_INIT_REG 0x0100 60 #define SEC_MEM_INIT_DONE_REG 0x0104 61 62 #define SEC_CONTROL_REG 0x0200 63 #define SEC_TRNG_EN_SHIFT 8 64 #define SEC_CLK_GATE_ENABLE BIT(3) 65 #define SEC_CLK_GATE_DISABLE (~BIT(3)) 66 #define SEC_AXI_SHUTDOWN_ENABLE BIT(12) 67 #define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF 68 69 #define SEC_INTERFACE_USER_CTRL0_REG 0x0220 70 #define SEC_INTERFACE_USER_CTRL1_REG 0x0224 71 #define SEC_SAA_EN_REG 0x0270 72 #define SEC_BD_ERR_CHK_EN_REG0 0x0380 73 #define SEC_BD_ERR_CHK_EN_REG1 0x0384 74 #define SEC_BD_ERR_CHK_EN_REG3 0x038c 75 76 #define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15)) 77 #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7)) 78 #define SEC_USER1_ENABLE_CONTEXT_SSV BIT(24) 79 #define SEC_USER1_ENABLE_DATA_SSV BIT(16) 80 #define SEC_USER1_WB_CONTEXT_SSV BIT(8) 81 #define SEC_USER1_WB_DATA_SSV BIT(0) 82 #define SEC_USER1_SVA_SET (SEC_USER1_ENABLE_CONTEXT_SSV | \ 83 SEC_USER1_ENABLE_DATA_SSV | \ 84 SEC_USER1_WB_CONTEXT_SSV | \ 85 SEC_USER1_WB_DATA_SSV) 86 #define SEC_USER1_SMMU_SVA (SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET) 87 #define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET) 88 #define SEC_CORE_INT_STATUS_M_ECC BIT(2) 89 90 #define SEC_DELAY_10_US 10 91 #define SEC_POLL_TIMEOUT_US 1000 92 #define SEC_DBGFS_VAL_MAX_LEN 20 93 #define SEC_SINGLE_PORT_MAX_TRANS 0x2060 94 95 #define SEC_SQE_MASK_OFFSET 64 96 #define SEC_SQE_MASK_LEN 48 97 98 #define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \ 99 SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF) 100 101 struct sec_hw_error { 102 u32 int_msk; 103 const char *msg; 104 }; 105 106 struct sec_dfx_item { 107 const char *name; 108 u32 offset; 109 }; 110 111 static const char sec_name[] = "hisi_sec2"; 112 static struct dentry *sec_debugfs_root; 113 114 static struct hisi_qm_list sec_devices = { 115 .register_to_crypto = sec_register_to_crypto, 116 .unregister_from_crypto = sec_unregister_from_crypto, 117 }; 118 119 static const struct sec_hw_error sec_hw_errors[] = { 120 {.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"}, 121 {.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"}, 122 {.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"}, 123 {.int_msk = BIT(3), .msg = "sec_ecc_1bit_err_rint"}, 124 {.int_msk = BIT(4), .msg = "sec_req_trng_timeout_rint"}, 125 {.int_msk = BIT(5), .msg = "sec_fsm_hbeat_rint"}, 126 {.int_msk = BIT(6), .msg = "sec_channel_req_rng_timeout_rint"}, 127 {.int_msk = BIT(7), .msg = "sec_bd_err_rint"}, 128 {.int_msk = BIT(8), .msg = "sec_chain_buff_err_rint"}, 129 { /* sentinel */ } 130 }; 131 132 static const char * const sec_dbg_file_name[] = { 133 [SEC_CURRENT_QM] = "current_qm", 134 [SEC_CLEAR_ENABLE] = "clear_enable", 135 }; 136 137 static struct sec_dfx_item sec_dfx_labels[] = { 138 {"send_cnt", offsetof(struct sec_dfx, send_cnt)}, 139 {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)}, 140 {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)}, 141 {"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)}, 142 {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)}, 143 {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)}, 144 {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)}, 145 }; 146 147 static const struct debugfs_reg32 sec_dfx_regs[] = { 148 {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010}, 149 {"SEC_SAA_EN ", 0x301270}, 150 {"SEC_BD_LATENCY_MIN ", 0x301600}, 151 {"SEC_BD_LATENCY_MAX ", 0x301608}, 152 {"SEC_BD_LATENCY_AVG ", 0x30160C}, 153 {"SEC_BD_NUM_IN_SAA0 ", 0x301670}, 154 {"SEC_BD_NUM_IN_SAA1 ", 0x301674}, 155 {"SEC_BD_NUM_IN_SEC ", 0x301680}, 156 {"SEC_ECC_1BIT_CNT ", 0x301C00}, 157 {"SEC_ECC_1BIT_INFO ", 0x301C04}, 158 {"SEC_ECC_2BIT_CNT ", 0x301C10}, 159 {"SEC_ECC_2BIT_INFO ", 0x301C14}, 160 {"SEC_BD_SAA0 ", 0x301C20}, 161 {"SEC_BD_SAA1 ", 0x301C24}, 162 {"SEC_BD_SAA2 ", 0x301C28}, 163 {"SEC_BD_SAA3 ", 0x301C2C}, 164 {"SEC_BD_SAA4 ", 0x301C30}, 165 {"SEC_BD_SAA5 ", 0x301C34}, 166 {"SEC_BD_SAA6 ", 0x301C38}, 167 {"SEC_BD_SAA7 ", 0x301C3C}, 168 {"SEC_BD_SAA8 ", 0x301C40}, 169 }; 170 171 static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp) 172 { 173 return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID); 174 } 175 176 static const struct kernel_param_ops sec_pf_q_num_ops = { 177 .set = sec_pf_q_num_set, 178 .get = param_get_int, 179 }; 180 181 static u32 pf_q_num = SEC_PF_DEF_Q_NUM; 182 module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444); 183 MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)"); 184 185 static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp) 186 { 187 u32 ctx_q_num; 188 int ret; 189 190 if (!val) 191 return -EINVAL; 192 193 ret = kstrtou32(val, 10, &ctx_q_num); 194 if (ret) 195 return -EINVAL; 196 197 if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) { 198 pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num); 199 return -EINVAL; 200 } 201 202 return param_set_int(val, kp); 203 } 204 205 static const struct kernel_param_ops sec_ctx_q_num_ops = { 206 .set = sec_ctx_q_num_set, 207 .get = param_get_int, 208 }; 209 static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF; 210 module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444); 211 MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (2 default, 2, 4, ..., 32)"); 212 213 static const struct kernel_param_ops vfs_num_ops = { 214 .set = vfs_num_set, 215 .get = param_get_int, 216 }; 217 218 static u32 vfs_num; 219 module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); 220 MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); 221 222 void sec_destroy_qps(struct hisi_qp **qps, int qp_num) 223 { 224 hisi_qm_free_qps(qps, qp_num); 225 kfree(qps); 226 } 227 228 struct hisi_qp **sec_create_qps(void) 229 { 230 int node = cpu_to_node(smp_processor_id()); 231 u32 ctx_num = ctx_q_num; 232 struct hisi_qp **qps; 233 int ret; 234 235 qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL); 236 if (!qps) 237 return NULL; 238 239 ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps); 240 if (!ret) 241 return qps; 242 243 kfree(qps); 244 return NULL; 245 } 246 247 static const struct kernel_param_ops sec_uacce_mode_ops = { 248 .set = uacce_mode_set, 249 .get = param_get_int, 250 }; 251 252 /* 253 * uacce_mode = 0 means sec only register to crypto, 254 * uacce_mode = 1 means sec both register to crypto and uacce. 255 */ 256 static u32 uacce_mode = UACCE_MODE_NOUACCE; 257 module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444); 258 MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); 259 260 static const struct pci_device_id sec_dev_ids[] = { 261 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) }, 262 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) }, 263 { 0, } 264 }; 265 MODULE_DEVICE_TABLE(pci, sec_dev_ids); 266 267 static u8 sec_get_endian(struct hisi_qm *qm) 268 { 269 u32 reg; 270 271 /* 272 * As for VF, it is a wrong way to get endian setting by 273 * reading a register of the engine 274 */ 275 if (qm->pdev->is_virtfn) { 276 dev_err_ratelimited(&qm->pdev->dev, 277 "cannot access a register in VF!\n"); 278 return SEC_LE; 279 } 280 reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF + 281 SEC_ACC_COMMON_REG_OFF + SEC_CONTROL_REG); 282 283 /* BD little endian mode */ 284 if (!(reg & BIT(0))) 285 return SEC_LE; 286 287 /* BD 32-bits big endian mode */ 288 else if (!(reg & BIT(1))) 289 return SEC_32BE; 290 291 /* BD 64-bits big endian mode */ 292 else 293 return SEC_64BE; 294 } 295 296 static int sec_engine_init(struct hisi_qm *qm) 297 { 298 int ret; 299 u32 reg; 300 301 /* disable clock gate control */ 302 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); 303 reg &= SEC_CLK_GATE_DISABLE; 304 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); 305 306 writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG)); 307 308 ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG), 309 reg, reg & 0x1, SEC_DELAY_10_US, 310 SEC_POLL_TIMEOUT_US); 311 if (ret) { 312 pci_err(qm->pdev, "fail to init sec mem\n"); 313 return ret; 314 } 315 316 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); 317 reg |= (0x1 << SEC_TRNG_EN_SHIFT); 318 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); 319 320 reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG)); 321 reg |= SEC_USER0_SMMU_NORMAL; 322 writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG)); 323 324 reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG)); 325 reg &= SEC_USER1_SMMU_MASK; 326 if (qm->use_sva && qm->ver == QM_HW_V2) 327 reg |= SEC_USER1_SMMU_SVA; 328 else 329 reg |= SEC_USER1_SMMU_NORMAL; 330 writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG)); 331 332 writel(SEC_SINGLE_PORT_MAX_TRANS, 333 qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS); 334 335 writel(SEC_SAA_ENABLE, SEC_ADDR(qm, SEC_SAA_EN_REG)); 336 337 /* Enable sm4 extra mode, as ctr/ecb */ 338 writel_relaxed(SEC_BD_ERR_CHK_EN0, 339 SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG0)); 340 /* Enable sm4 xts mode multiple iv */ 341 writel_relaxed(SEC_BD_ERR_CHK_EN1, 342 SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1)); 343 writel_relaxed(SEC_BD_ERR_CHK_EN3, 344 SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG3)); 345 346 /* config endian */ 347 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); 348 reg |= sec_get_endian(qm); 349 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); 350 351 return 0; 352 } 353 354 static int sec_set_user_domain_and_cache(struct hisi_qm *qm) 355 { 356 /* qm user domain */ 357 writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1); 358 writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE); 359 writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1); 360 writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE); 361 writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE); 362 363 /* qm cache */ 364 writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG); 365 writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE); 366 367 /* disable FLR triggered by BME(bus master enable) */ 368 writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG); 369 writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); 370 371 /* enable sqc,cqc writeback */ 372 writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | 373 CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | 374 FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL); 375 376 return sec_engine_init(qm); 377 } 378 379 /* sec_debug_regs_clear() - clear the sec debug regs */ 380 static void sec_debug_regs_clear(struct hisi_qm *qm) 381 { 382 int i; 383 384 /* clear current_qm */ 385 writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); 386 writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); 387 388 /* clear sec dfx regs */ 389 writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE); 390 for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) 391 readl(qm->io_base + sec_dfx_regs[i].offset); 392 393 /* clear rdclr_en */ 394 writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE); 395 396 hisi_qm_debug_regs_clear(qm); 397 } 398 399 static void sec_hw_error_enable(struct hisi_qm *qm) 400 { 401 u32 val; 402 403 if (qm->ver == QM_HW_V1) { 404 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); 405 pci_info(qm->pdev, "V1 not support hw error handle\n"); 406 return; 407 } 408 409 val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); 410 411 /* clear SEC hw error source if having */ 412 writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE); 413 414 /* enable SEC hw error interrupts */ 415 writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK); 416 417 /* enable RAS int */ 418 writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG); 419 writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG); 420 writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG); 421 422 /* enable SEC block master OOO when m-bit error occur */ 423 val = val | SEC_AXI_SHUTDOWN_ENABLE; 424 425 writel(val, SEC_ADDR(qm, SEC_CONTROL_REG)); 426 } 427 428 static void sec_hw_error_disable(struct hisi_qm *qm) 429 { 430 u32 val; 431 432 val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); 433 434 /* disable RAS int */ 435 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG); 436 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG); 437 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG); 438 439 /* disable SEC hw error interrupts */ 440 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); 441 442 /* disable SEC block master OOO when m-bit error occur */ 443 val = val & SEC_AXI_SHUTDOWN_DISABLE; 444 445 writel(val, SEC_ADDR(qm, SEC_CONTROL_REG)); 446 } 447 448 static u32 sec_current_qm_read(struct sec_debug_file *file) 449 { 450 struct hisi_qm *qm = file->qm; 451 452 return readl(qm->io_base + QM_DFX_MB_CNT_VF); 453 } 454 455 static int sec_current_qm_write(struct sec_debug_file *file, u32 val) 456 { 457 struct hisi_qm *qm = file->qm; 458 u32 vfq_num; 459 u32 tmp; 460 461 if (val > qm->vfs_num) 462 return -EINVAL; 463 464 /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ 465 if (!val) { 466 qm->debug.curr_qm_qp_num = qm->qp_num; 467 } else { 468 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num; 469 470 if (val == qm->vfs_num) 471 qm->debug.curr_qm_qp_num = 472 qm->ctrl_qp_num - qm->qp_num - 473 (qm->vfs_num - 1) * vfq_num; 474 else 475 qm->debug.curr_qm_qp_num = vfq_num; 476 } 477 478 writel(val, qm->io_base + QM_DFX_MB_CNT_VF); 479 writel(val, qm->io_base + QM_DFX_DB_CNT_VF); 480 481 tmp = val | 482 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); 483 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); 484 485 tmp = val | 486 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); 487 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); 488 489 return 0; 490 } 491 492 static u32 sec_clear_enable_read(struct sec_debug_file *file) 493 { 494 struct hisi_qm *qm = file->qm; 495 496 return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & 497 SEC_CTRL_CNT_CLR_CE_BIT; 498 } 499 500 static int sec_clear_enable_write(struct sec_debug_file *file, u32 val) 501 { 502 struct hisi_qm *qm = file->qm; 503 u32 tmp; 504 505 if (val != 1 && val) 506 return -EINVAL; 507 508 tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & 509 ~SEC_CTRL_CNT_CLR_CE_BIT) | val; 510 writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE); 511 512 return 0; 513 } 514 515 static ssize_t sec_debug_read(struct file *filp, char __user *buf, 516 size_t count, loff_t *pos) 517 { 518 struct sec_debug_file *file = filp->private_data; 519 char tbuf[SEC_DBGFS_VAL_MAX_LEN]; 520 u32 val; 521 int ret; 522 523 spin_lock_irq(&file->lock); 524 525 switch (file->index) { 526 case SEC_CURRENT_QM: 527 val = sec_current_qm_read(file); 528 break; 529 case SEC_CLEAR_ENABLE: 530 val = sec_clear_enable_read(file); 531 break; 532 default: 533 spin_unlock_irq(&file->lock); 534 return -EINVAL; 535 } 536 537 spin_unlock_irq(&file->lock); 538 ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val); 539 540 return simple_read_from_buffer(buf, count, pos, tbuf, ret); 541 } 542 543 static ssize_t sec_debug_write(struct file *filp, const char __user *buf, 544 size_t count, loff_t *pos) 545 { 546 struct sec_debug_file *file = filp->private_data; 547 char tbuf[SEC_DBGFS_VAL_MAX_LEN]; 548 unsigned long val; 549 int len, ret; 550 551 if (*pos != 0) 552 return 0; 553 554 if (count >= SEC_DBGFS_VAL_MAX_LEN) 555 return -ENOSPC; 556 557 len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1, 558 pos, buf, count); 559 if (len < 0) 560 return len; 561 562 tbuf[len] = '\0'; 563 if (kstrtoul(tbuf, 0, &val)) 564 return -EFAULT; 565 566 spin_lock_irq(&file->lock); 567 568 switch (file->index) { 569 case SEC_CURRENT_QM: 570 ret = sec_current_qm_write(file, val); 571 if (ret) 572 goto err_input; 573 break; 574 case SEC_CLEAR_ENABLE: 575 ret = sec_clear_enable_write(file, val); 576 if (ret) 577 goto err_input; 578 break; 579 default: 580 ret = -EINVAL; 581 goto err_input; 582 } 583 584 spin_unlock_irq(&file->lock); 585 586 return count; 587 588 err_input: 589 spin_unlock_irq(&file->lock); 590 return ret; 591 } 592 593 static const struct file_operations sec_dbg_fops = { 594 .owner = THIS_MODULE, 595 .open = simple_open, 596 .read = sec_debug_read, 597 .write = sec_debug_write, 598 }; 599 600 static int sec_debugfs_atomic64_get(void *data, u64 *val) 601 { 602 *val = atomic64_read((atomic64_t *)data); 603 604 return 0; 605 } 606 607 static int sec_debugfs_atomic64_set(void *data, u64 val) 608 { 609 if (val) 610 return -EINVAL; 611 612 atomic64_set((atomic64_t *)data, 0); 613 614 return 0; 615 } 616 617 DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get, 618 sec_debugfs_atomic64_set, "%lld\n"); 619 620 static int sec_core_debug_init(struct hisi_qm *qm) 621 { 622 struct sec_dev *sec = container_of(qm, struct sec_dev, qm); 623 struct device *dev = &qm->pdev->dev; 624 struct sec_dfx *dfx = &sec->debug.dfx; 625 struct debugfs_regset32 *regset; 626 struct dentry *tmp_d; 627 int i; 628 629 tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root); 630 631 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); 632 if (!regset) 633 return -ENOMEM; 634 635 regset->regs = sec_dfx_regs; 636 regset->nregs = ARRAY_SIZE(sec_dfx_regs); 637 regset->base = qm->io_base; 638 639 if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) 640 debugfs_create_regset32("regs", 0444, tmp_d, regset); 641 642 for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) { 643 atomic64_t *data = (atomic64_t *)((uintptr_t)dfx + 644 sec_dfx_labels[i].offset); 645 debugfs_create_file(sec_dfx_labels[i].name, 0644, 646 tmp_d, data, &sec_atomic64_ops); 647 } 648 649 return 0; 650 } 651 652 static int sec_debug_init(struct hisi_qm *qm) 653 { 654 struct sec_dev *sec = container_of(qm, struct sec_dev, qm); 655 int i; 656 657 if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) { 658 for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) { 659 spin_lock_init(&sec->debug.files[i].lock); 660 sec->debug.files[i].index = i; 661 sec->debug.files[i].qm = qm; 662 663 debugfs_create_file(sec_dbg_file_name[i], 0600, 664 qm->debug.debug_root, 665 sec->debug.files + i, 666 &sec_dbg_fops); 667 } 668 } 669 670 return sec_core_debug_init(qm); 671 } 672 673 static int sec_debugfs_init(struct hisi_qm *qm) 674 { 675 struct device *dev = &qm->pdev->dev; 676 int ret; 677 678 qm->debug.debug_root = debugfs_create_dir(dev_name(dev), 679 sec_debugfs_root); 680 qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET; 681 qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN; 682 hisi_qm_debug_init(qm); 683 684 ret = sec_debug_init(qm); 685 if (ret) 686 goto failed_to_create; 687 688 return 0; 689 690 failed_to_create: 691 debugfs_remove_recursive(sec_debugfs_root); 692 return ret; 693 } 694 695 static void sec_debugfs_exit(struct hisi_qm *qm) 696 { 697 debugfs_remove_recursive(qm->debug.debug_root); 698 } 699 700 static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) 701 { 702 const struct sec_hw_error *errs = sec_hw_errors; 703 struct device *dev = &qm->pdev->dev; 704 u32 err_val; 705 706 while (errs->msg) { 707 if (errs->int_msk & err_sts) { 708 dev_err(dev, "%s [error status=0x%x] found\n", 709 errs->msg, errs->int_msk); 710 711 if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) { 712 err_val = readl(qm->io_base + 713 SEC_CORE_SRAM_ECC_ERR_INFO); 714 dev_err(dev, "multi ecc sram num=0x%x\n", 715 SEC_ECC_NUM(err_val)); 716 } 717 } 718 errs++; 719 } 720 } 721 722 static u32 sec_get_hw_err_status(struct hisi_qm *qm) 723 { 724 return readl(qm->io_base + SEC_CORE_INT_STATUS); 725 } 726 727 static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) 728 { 729 writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE); 730 } 731 732 static void sec_open_axi_master_ooo(struct hisi_qm *qm) 733 { 734 u32 val; 735 736 val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); 737 writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG)); 738 writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG)); 739 } 740 741 static const struct hisi_qm_err_ini sec_err_ini = { 742 .hw_init = sec_set_user_domain_and_cache, 743 .hw_err_enable = sec_hw_error_enable, 744 .hw_err_disable = sec_hw_error_disable, 745 .get_dev_hw_err_status = sec_get_hw_err_status, 746 .clear_dev_hw_err_status = sec_clear_hw_err_status, 747 .log_dev_hw_err = sec_log_hw_error, 748 .open_axi_master_ooo = sec_open_axi_master_ooo, 749 .err_info = { 750 .ce = QM_BASE_CE, 751 .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT | 752 QM_ACC_WB_NOT_READY_TIMEOUT, 753 .fe = 0, 754 .ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC, 755 .dev_ce_mask = SEC_RAS_CE_ENB_MSK, 756 .msi_wr_port = BIT(0), 757 .acpi_rst = "SRST", 758 } 759 }; 760 761 static int sec_pf_probe_init(struct sec_dev *sec) 762 { 763 struct hisi_qm *qm = &sec->qm; 764 int ret; 765 766 if (qm->ver == QM_HW_V1) 767 qm->ctrl_qp_num = SEC_QUEUE_NUM_V1; 768 else 769 qm->ctrl_qp_num = SEC_QUEUE_NUM_V2; 770 771 qm->err_ini = &sec_err_ini; 772 773 ret = sec_set_user_domain_and_cache(qm); 774 if (ret) 775 return ret; 776 777 hisi_qm_dev_err_init(qm); 778 sec_debug_regs_clear(qm); 779 780 return 0; 781 } 782 783 static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) 784 { 785 int ret; 786 787 qm->pdev = pdev; 788 qm->ver = pdev->revision; 789 qm->algs = "cipher\ndigest\naead\n"; 790 qm->mode = uacce_mode; 791 qm->sqe_size = SEC_SQE_SIZE; 792 qm->dev_name = sec_name; 793 794 qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ? 795 QM_HW_PF : QM_HW_VF; 796 if (qm->fun_type == QM_HW_PF) { 797 qm->qp_base = SEC_PF_DEF_Q_BASE; 798 qm->qp_num = pf_q_num; 799 qm->debug.curr_qm_qp_num = pf_q_num; 800 qm->qm_list = &sec_devices; 801 } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { 802 /* 803 * have no way to get qm configure in VM in v1 hardware, 804 * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force 805 * to trigger only one VF in v1 hardware. 806 * v2 hardware has no such problem. 807 */ 808 qm->qp_base = SEC_PF_DEF_Q_NUM; 809 qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; 810 } 811 812 /* 813 * WQ_HIGHPRI: SEC request must be low delayed, 814 * so need a high priority workqueue. 815 * WQ_UNBOUND: SEC task is likely with long 816 * running CPU intensive workloads. 817 */ 818 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | 819 WQ_UNBOUND, num_online_cpus(), 820 pci_name(qm->pdev)); 821 if (!qm->wq) { 822 pci_err(qm->pdev, "fail to alloc workqueue\n"); 823 return -ENOMEM; 824 } 825 826 ret = hisi_qm_init(qm); 827 if (ret) 828 destroy_workqueue(qm->wq); 829 830 return ret; 831 } 832 833 static void sec_qm_uninit(struct hisi_qm *qm) 834 { 835 hisi_qm_uninit(qm); 836 } 837 838 static int sec_probe_init(struct sec_dev *sec) 839 { 840 struct hisi_qm *qm = &sec->qm; 841 int ret; 842 843 if (qm->fun_type == QM_HW_PF) { 844 ret = sec_pf_probe_init(sec); 845 if (ret) 846 return ret; 847 } 848 849 return 0; 850 } 851 852 static void sec_probe_uninit(struct hisi_qm *qm) 853 { 854 hisi_qm_dev_err_uninit(qm); 855 856 destroy_workqueue(qm->wq); 857 } 858 859 static void sec_iommu_used_check(struct sec_dev *sec) 860 { 861 struct iommu_domain *domain; 862 struct device *dev = &sec->qm.pdev->dev; 863 864 domain = iommu_get_domain_for_dev(dev); 865 866 /* Check if iommu is used */ 867 sec->iommu_used = false; 868 if (domain) { 869 if (domain->type & __IOMMU_DOMAIN_PAGING) 870 sec->iommu_used = true; 871 dev_info(dev, "SMMU Opened, the iommu type = %u\n", 872 domain->type); 873 } 874 } 875 876 static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) 877 { 878 struct sec_dev *sec; 879 struct hisi_qm *qm; 880 int ret; 881 882 sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL); 883 if (!sec) 884 return -ENOMEM; 885 886 qm = &sec->qm; 887 ret = sec_qm_init(qm, pdev); 888 if (ret) { 889 pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret); 890 return ret; 891 } 892 893 sec->ctx_q_num = ctx_q_num; 894 sec_iommu_used_check(sec); 895 896 ret = sec_probe_init(sec); 897 if (ret) { 898 pci_err(pdev, "Failed to probe!\n"); 899 goto err_qm_uninit; 900 } 901 902 ret = hisi_qm_start(qm); 903 if (ret) { 904 pci_err(pdev, "Failed to start sec qm!\n"); 905 goto err_probe_uninit; 906 } 907 908 ret = sec_debugfs_init(qm); 909 if (ret) 910 pci_warn(pdev, "Failed to init debugfs!\n"); 911 912 ret = hisi_qm_alg_register(qm, &sec_devices); 913 if (ret < 0) { 914 pr_err("Failed to register driver to crypto.\n"); 915 goto err_qm_stop; 916 } 917 918 if (qm->uacce) { 919 ret = uacce_register(qm->uacce); 920 if (ret) { 921 pci_err(pdev, "failed to register uacce (%d)!\n", ret); 922 goto err_alg_unregister; 923 } 924 } 925 926 if (qm->fun_type == QM_HW_PF && vfs_num) { 927 ret = hisi_qm_sriov_enable(pdev, vfs_num); 928 if (ret < 0) 929 goto err_alg_unregister; 930 } 931 932 return 0; 933 934 err_alg_unregister: 935 hisi_qm_alg_unregister(qm, &sec_devices); 936 err_qm_stop: 937 sec_debugfs_exit(qm); 938 hisi_qm_stop(qm, QM_NORMAL); 939 err_probe_uninit: 940 sec_probe_uninit(qm); 941 err_qm_uninit: 942 sec_qm_uninit(qm); 943 return ret; 944 } 945 946 static void sec_remove(struct pci_dev *pdev) 947 { 948 struct hisi_qm *qm = pci_get_drvdata(pdev); 949 950 hisi_qm_wait_task_finish(qm, &sec_devices); 951 hisi_qm_alg_unregister(qm, &sec_devices); 952 if (qm->fun_type == QM_HW_PF && qm->vfs_num) 953 hisi_qm_sriov_disable(pdev, true); 954 955 sec_debugfs_exit(qm); 956 957 (void)hisi_qm_stop(qm, QM_NORMAL); 958 959 if (qm->fun_type == QM_HW_PF) 960 sec_debug_regs_clear(qm); 961 962 sec_probe_uninit(qm); 963 964 sec_qm_uninit(qm); 965 } 966 967 static const struct pci_error_handlers sec_err_handler = { 968 .error_detected = hisi_qm_dev_err_detected, 969 .slot_reset = hisi_qm_dev_slot_reset, 970 .reset_prepare = hisi_qm_reset_prepare, 971 .reset_done = hisi_qm_reset_done, 972 }; 973 974 static struct pci_driver sec_pci_driver = { 975 .name = "hisi_sec2", 976 .id_table = sec_dev_ids, 977 .probe = sec_probe, 978 .remove = sec_remove, 979 .err_handler = &sec_err_handler, 980 .sriov_configure = hisi_qm_sriov_configure, 981 .shutdown = hisi_qm_dev_shutdown, 982 }; 983 984 static void sec_register_debugfs(void) 985 { 986 if (!debugfs_initialized()) 987 return; 988 989 sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL); 990 } 991 992 static void sec_unregister_debugfs(void) 993 { 994 debugfs_remove_recursive(sec_debugfs_root); 995 } 996 997 static int __init sec_init(void) 998 { 999 int ret; 1000 1001 hisi_qm_init_list(&sec_devices); 1002 sec_register_debugfs(); 1003 1004 ret = pci_register_driver(&sec_pci_driver); 1005 if (ret < 0) { 1006 sec_unregister_debugfs(); 1007 pr_err("Failed to register pci driver.\n"); 1008 return ret; 1009 } 1010 1011 return 0; 1012 } 1013 1014 static void __exit sec_exit(void) 1015 { 1016 pci_unregister_driver(&sec_pci_driver); 1017 sec_unregister_debugfs(); 1018 } 1019 1020 module_init(sec_init); 1021 module_exit(sec_exit); 1022 1023 MODULE_LICENSE("GPL v2"); 1024 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>"); 1025 MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>"); 1026 MODULE_AUTHOR("Kai Ye <yekai13@huawei.com>"); 1027 MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>"); 1028 MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator"); 1029