1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 4 #include <linux/acpi.h> 5 #include <linux/aer.h> 6 #include <linux/bitops.h> 7 #include <linux/debugfs.h> 8 #include <linux/init.h> 9 #include <linux/io.h> 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/pci.h> 13 #include <linux/seq_file.h> 14 #include <linux/topology.h> 15 16 #include "sec.h" 17 18 #define SEC_VF_NUM 63 19 #define SEC_QUEUE_NUM_V1 4096 20 #define SEC_QUEUE_NUM_V2 1024 21 #define SEC_PF_PCI_DEVICE_ID 0xa255 22 #define SEC_VF_PCI_DEVICE_ID 0xa256 23 24 #define SEC_XTS_MIV_ENABLE_REG 0x301384 25 #define SEC_XTS_MIV_ENABLE_MSK 0x7FFFFFFF 26 #define SEC_XTS_MIV_DISABLE_MSK 0xFFFFFFFF 27 #define SEC_BD_ERR_CHK_EN1 0xfffff7fd 28 #define SEC_BD_ERR_CHK_EN2 0xffffbfff 29 30 #define SEC_SQE_SIZE 128 31 #define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH) 32 #define SEC_PF_DEF_Q_NUM 64 33 #define SEC_PF_DEF_Q_BASE 0 34 #define SEC_CTX_Q_NUM_DEF 24 35 36 #define SEC_CTRL_CNT_CLR_CE 0x301120 37 #define SEC_CTRL_CNT_CLR_CE_BIT BIT(0) 38 #define SEC_ENGINE_PF_CFG_OFF 0x300000 39 #define SEC_ACC_COMMON_REG_OFF 0x1000 40 #define SEC_CORE_INT_SOURCE 0x301010 41 #define SEC_CORE_INT_MASK 0x301000 42 #define SEC_CORE_INT_STATUS 0x301008 43 #define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14 44 #define SEC_ECC_NUM(err) (((err) >> 16) & 0xFF) 45 #define SEC_ECC_ADDR(err) ((err) >> 0) 46 #define SEC_CORE_INT_DISABLE 0x0 47 #define SEC_CORE_INT_ENABLE 0x1ff 48 49 #define SEC_RAS_CE_REG 0x50 50 #define SEC_RAS_FE_REG 0x54 51 #define SEC_RAS_NFE_REG 0x58 52 #define SEC_RAS_CE_ENB_MSK 0x88 53 #define SEC_RAS_FE_ENB_MSK 0x0 54 #define SEC_RAS_NFE_ENB_MSK 0x177 55 #define SEC_RAS_DISABLE 0x0 56 #define SEC_MEM_START_INIT_REG 0x0100 57 #define SEC_MEM_INIT_DONE_REG 0x0104 58 #define SEC_QM_ABNORMAL_INT_MASK 0x100004 59 60 #define SEC_CONTROL_REG 0x0200 61 #define SEC_TRNG_EN_SHIFT 8 62 #define SEC_CLK_GATE_ENABLE BIT(3) 63 #define SEC_CLK_GATE_DISABLE (~BIT(3)) 64 #define SEC_AXI_SHUTDOWN_ENABLE BIT(12) 65 #define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF 66 67 #define SEC_INTERFACE_USER_CTRL0_REG 0x0220 68 #define SEC_INTERFACE_USER_CTRL1_REG 0x0224 69 #define SEC_BD_ERR_CHK_EN_REG1 0x0384 70 #define SEC_BD_ERR_CHK_EN_REG2 0x038c 71 72 #define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15)) 73 #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7)) 74 #define SEC_CORE_INT_STATUS_M_ECC BIT(2) 75 76 #define SEC_DELAY_10_US 10 77 #define SEC_POLL_TIMEOUT_US 1000 78 #define SEC_VF_CNT_MASK 0xffffffc0 79 #define SEC_DBGFS_VAL_MAX_LEN 20 80 81 #define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \ 82 SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF) 83 84 struct sec_hw_error { 85 u32 int_msk; 86 const char *msg; 87 }; 88 89 static const char sec_name[] = "hisi_sec2"; 90 static struct dentry *sec_debugfs_root; 91 static LIST_HEAD(sec_list); 92 static DEFINE_MUTEX(sec_list_lock); 93 94 static const struct sec_hw_error sec_hw_errors[] = { 95 {.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"}, 96 {.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"}, 97 {.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"}, 98 {.int_msk = BIT(3), .msg = "sec_ecc_1bit_err_rint"}, 99 {.int_msk = BIT(4), .msg = "sec_req_trng_timeout_rint"}, 100 {.int_msk = BIT(5), .msg = "sec_fsm_hbeat_rint"}, 101 {.int_msk = BIT(6), .msg = "sec_channel_req_rng_timeout_rint"}, 102 {.int_msk = BIT(7), .msg = "sec_bd_err_rint"}, 103 {.int_msk = BIT(8), .msg = "sec_chain_buff_err_rint"}, 104 { /* sentinel */ } 105 }; 106 107 struct sec_dev *sec_find_device(int node) 108 { 109 #define SEC_NUMA_MAX_DISTANCE 100 110 int min_distance = SEC_NUMA_MAX_DISTANCE; 111 int dev_node = 0, free_qp_num = 0; 112 struct sec_dev *sec, *ret = NULL; 113 struct hisi_qm *qm; 114 struct device *dev; 115 116 mutex_lock(&sec_list_lock); 117 list_for_each_entry(sec, &sec_list, list) { 118 qm = &sec->qm; 119 dev = &qm->pdev->dev; 120 #ifdef CONFIG_NUMA 121 dev_node = dev->numa_node; 122 if (dev_node < 0) 123 dev_node = 0; 124 #endif 125 if (node_distance(dev_node, node) < min_distance) { 126 free_qp_num = hisi_qm_get_free_qp_num(qm); 127 if (free_qp_num >= sec->ctx_q_num) { 128 ret = sec; 129 min_distance = node_distance(dev_node, node); 130 } 131 } 132 } 133 mutex_unlock(&sec_list_lock); 134 135 return ret; 136 } 137 138 static const char * const sec_dbg_file_name[] = { 139 [SEC_CURRENT_QM] = "current_qm", 140 [SEC_CLEAR_ENABLE] = "clear_enable", 141 }; 142 143 static struct debugfs_reg32 sec_dfx_regs[] = { 144 {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010}, 145 {"SEC_SAA_EN ", 0x301270}, 146 {"SEC_BD_LATENCY_MIN ", 0x301600}, 147 {"SEC_BD_LATENCY_MAX ", 0x301608}, 148 {"SEC_BD_LATENCY_AVG ", 0x30160C}, 149 {"SEC_BD_NUM_IN_SAA0 ", 0x301670}, 150 {"SEC_BD_NUM_IN_SAA1 ", 0x301674}, 151 {"SEC_BD_NUM_IN_SEC ", 0x301680}, 152 {"SEC_ECC_1BIT_CNT ", 0x301C00}, 153 {"SEC_ECC_1BIT_INFO ", 0x301C04}, 154 {"SEC_ECC_2BIT_CNT ", 0x301C10}, 155 {"SEC_ECC_2BIT_INFO ", 0x301C14}, 156 {"SEC_BD_SAA0 ", 0x301C20}, 157 {"SEC_BD_SAA1 ", 0x301C24}, 158 {"SEC_BD_SAA2 ", 0x301C28}, 159 {"SEC_BD_SAA3 ", 0x301C2C}, 160 {"SEC_BD_SAA4 ", 0x301C30}, 161 {"SEC_BD_SAA5 ", 0x301C34}, 162 {"SEC_BD_SAA6 ", 0x301C38}, 163 {"SEC_BD_SAA7 ", 0x301C3C}, 164 {"SEC_BD_SAA8 ", 0x301C40}, 165 }; 166 167 static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp) 168 { 169 struct pci_dev *pdev; 170 u32 n, q_num; 171 u8 rev_id; 172 int ret; 173 174 if (!val) 175 return -EINVAL; 176 177 pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, 178 SEC_PF_PCI_DEVICE_ID, NULL); 179 if (!pdev) { 180 q_num = min_t(u32, SEC_QUEUE_NUM_V1, SEC_QUEUE_NUM_V2); 181 pr_info("No device, suppose queue number is %d!\n", q_num); 182 } else { 183 rev_id = pdev->revision; 184 185 switch (rev_id) { 186 case QM_HW_V1: 187 q_num = SEC_QUEUE_NUM_V1; 188 break; 189 case QM_HW_V2: 190 q_num = SEC_QUEUE_NUM_V2; 191 break; 192 default: 193 return -EINVAL; 194 } 195 } 196 197 ret = kstrtou32(val, 10, &n); 198 if (ret || !n || n > q_num) 199 return -EINVAL; 200 201 return param_set_int(val, kp); 202 } 203 204 static const struct kernel_param_ops sec_pf_q_num_ops = { 205 .set = sec_pf_q_num_set, 206 .get = param_get_int, 207 }; 208 static u32 pf_q_num = SEC_PF_DEF_Q_NUM; 209 module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444); 210 MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)"); 211 212 static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp) 213 { 214 u32 ctx_q_num; 215 int ret; 216 217 if (!val) 218 return -EINVAL; 219 220 ret = kstrtou32(val, 10, &ctx_q_num); 221 if (ret) 222 return -EINVAL; 223 224 if (!ctx_q_num || ctx_q_num > QM_Q_DEPTH || ctx_q_num & 0x1) { 225 pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num); 226 return -EINVAL; 227 } 228 229 return param_set_int(val, kp); 230 } 231 232 static const struct kernel_param_ops sec_ctx_q_num_ops = { 233 .set = sec_ctx_q_num_set, 234 .get = param_get_int, 235 }; 236 static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF; 237 module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444); 238 MODULE_PARM_DESC(ctx_q_num, "Number of queue in ctx (2, 4, 6, ..., 1024)"); 239 240 static const struct pci_device_id sec_dev_ids[] = { 241 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) }, 242 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) }, 243 { 0, } 244 }; 245 MODULE_DEVICE_TABLE(pci, sec_dev_ids); 246 247 static inline void sec_add_to_list(struct sec_dev *sec) 248 { 249 mutex_lock(&sec_list_lock); 250 list_add_tail(&sec->list, &sec_list); 251 mutex_unlock(&sec_list_lock); 252 } 253 254 static inline void sec_remove_from_list(struct sec_dev *sec) 255 { 256 mutex_lock(&sec_list_lock); 257 list_del(&sec->list); 258 mutex_unlock(&sec_list_lock); 259 } 260 261 static u8 sec_get_endian(struct sec_dev *sec) 262 { 263 struct hisi_qm *qm = &sec->qm; 264 u32 reg; 265 266 /* 267 * As for VF, it is a wrong way to get endian setting by 268 * reading a register of the engine 269 */ 270 if (qm->pdev->is_virtfn) { 271 dev_err_ratelimited(&qm->pdev->dev, 272 "cannot access a register in VF!\n"); 273 return SEC_LE; 274 } 275 reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF + 276 SEC_ACC_COMMON_REG_OFF + SEC_CONTROL_REG); 277 278 /* BD little endian mode */ 279 if (!(reg & BIT(0))) 280 return SEC_LE; 281 282 /* BD 32-bits big endian mode */ 283 else if (!(reg & BIT(1))) 284 return SEC_32BE; 285 286 /* BD 64-bits big endian mode */ 287 else 288 return SEC_64BE; 289 } 290 291 static int sec_engine_init(struct sec_dev *sec) 292 { 293 struct hisi_qm *qm = &sec->qm; 294 int ret; 295 u32 reg; 296 297 /* disable clock gate control */ 298 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); 299 reg &= SEC_CLK_GATE_DISABLE; 300 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); 301 302 writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG)); 303 304 ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG), 305 reg, reg & 0x1, SEC_DELAY_10_US, 306 SEC_POLL_TIMEOUT_US); 307 if (ret) { 308 dev_err(&qm->pdev->dev, "fail to init sec mem\n"); 309 return ret; 310 } 311 312 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); 313 reg |= (0x1 << SEC_TRNG_EN_SHIFT); 314 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); 315 316 reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG)); 317 reg |= SEC_USER0_SMMU_NORMAL; 318 writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG)); 319 320 reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG)); 321 reg |= SEC_USER1_SMMU_NORMAL; 322 writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG)); 323 324 writel_relaxed(SEC_BD_ERR_CHK_EN1, 325 SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1)); 326 writel_relaxed(SEC_BD_ERR_CHK_EN2, 327 SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG2)); 328 329 /* enable clock gate control */ 330 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); 331 reg |= SEC_CLK_GATE_ENABLE; 332 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); 333 334 /* config endian */ 335 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); 336 reg |= sec_get_endian(sec); 337 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); 338 339 /* Enable sm4 xts mode multiple iv */ 340 writel_relaxed(SEC_XTS_MIV_ENABLE_MSK, 341 qm->io_base + SEC_XTS_MIV_ENABLE_REG); 342 343 return 0; 344 } 345 346 static int sec_set_user_domain_and_cache(struct sec_dev *sec) 347 { 348 struct hisi_qm *qm = &sec->qm; 349 350 /* qm user domain */ 351 writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1); 352 writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE); 353 writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1); 354 writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE); 355 writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE); 356 357 /* qm cache */ 358 writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG); 359 writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE); 360 361 /* disable FLR triggered by BME(bus master enable) */ 362 writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG); 363 writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); 364 365 /* enable sqc,cqc writeback */ 366 writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | 367 CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | 368 FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL); 369 370 return sec_engine_init(sec); 371 } 372 373 /* sec_debug_regs_clear() - clear the sec debug regs */ 374 static void sec_debug_regs_clear(struct hisi_qm *qm) 375 { 376 /* clear current_qm */ 377 writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); 378 writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); 379 380 /* clear rdclr_en */ 381 writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE); 382 383 hisi_qm_debug_regs_clear(qm); 384 } 385 386 static void sec_hw_error_enable(struct sec_dev *sec) 387 { 388 struct hisi_qm *qm = &sec->qm; 389 u32 val; 390 391 if (qm->ver == QM_HW_V1) { 392 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); 393 dev_info(&qm->pdev->dev, "V1 not support hw error handle\n"); 394 return; 395 } 396 397 val = readl(qm->io_base + SEC_CONTROL_REG); 398 399 /* clear SEC hw error source if having */ 400 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_SOURCE); 401 402 /* enable SEC hw error interrupts */ 403 writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK); 404 405 /* enable RAS int */ 406 writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG); 407 writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG); 408 writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG); 409 410 /* enable SEC block master OOO when m-bit error occur */ 411 val = val | SEC_AXI_SHUTDOWN_ENABLE; 412 413 writel(val, qm->io_base + SEC_CONTROL_REG); 414 } 415 416 static void sec_hw_error_disable(struct sec_dev *sec) 417 { 418 struct hisi_qm *qm = &sec->qm; 419 u32 val; 420 421 val = readl(qm->io_base + SEC_CONTROL_REG); 422 423 /* disable RAS int */ 424 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG); 425 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG); 426 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG); 427 428 /* disable SEC hw error interrupts */ 429 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); 430 431 /* disable SEC block master OOO when m-bit error occur */ 432 val = val & SEC_AXI_SHUTDOWN_DISABLE; 433 434 writel(val, qm->io_base + SEC_CONTROL_REG); 435 } 436 437 static void sec_hw_error_init(struct sec_dev *sec) 438 { 439 if (sec->qm.fun_type == QM_HW_VF) 440 return; 441 442 hisi_qm_hw_error_init(&sec->qm, QM_BASE_CE, 443 QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT 444 | QM_ACC_WB_NOT_READY_TIMEOUT, 0, 445 QM_DB_RANDOM_INVALID); 446 sec_hw_error_enable(sec); 447 } 448 449 static void sec_hw_error_uninit(struct sec_dev *sec) 450 { 451 if (sec->qm.fun_type == QM_HW_VF) 452 return; 453 454 sec_hw_error_disable(sec); 455 writel(GENMASK(12, 0), sec->qm.io_base + SEC_QM_ABNORMAL_INT_MASK); 456 } 457 458 static u32 sec_current_qm_read(struct sec_debug_file *file) 459 { 460 struct hisi_qm *qm = file->qm; 461 462 return readl(qm->io_base + QM_DFX_MB_CNT_VF); 463 } 464 465 static int sec_current_qm_write(struct sec_debug_file *file, u32 val) 466 { 467 struct hisi_qm *qm = file->qm; 468 struct sec_dev *sec = container_of(qm, struct sec_dev, qm); 469 u32 vfq_num; 470 u32 tmp; 471 472 if (val > sec->num_vfs) 473 return -EINVAL; 474 475 /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ 476 if (!val) { 477 qm->debug.curr_qm_qp_num = qm->qp_num; 478 } else { 479 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / sec->num_vfs; 480 481 if (val == sec->num_vfs) 482 qm->debug.curr_qm_qp_num = 483 qm->ctrl_qp_num - qm->qp_num - 484 (sec->num_vfs - 1) * vfq_num; 485 else 486 qm->debug.curr_qm_qp_num = vfq_num; 487 } 488 489 writel(val, qm->io_base + QM_DFX_MB_CNT_VF); 490 writel(val, qm->io_base + QM_DFX_DB_CNT_VF); 491 492 tmp = val | 493 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); 494 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); 495 496 tmp = val | 497 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); 498 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); 499 500 return 0; 501 } 502 503 static u32 sec_clear_enable_read(struct sec_debug_file *file) 504 { 505 struct hisi_qm *qm = file->qm; 506 507 return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & 508 SEC_CTRL_CNT_CLR_CE_BIT; 509 } 510 511 static int sec_clear_enable_write(struct sec_debug_file *file, u32 val) 512 { 513 struct hisi_qm *qm = file->qm; 514 u32 tmp; 515 516 if (val != 1 && val) 517 return -EINVAL; 518 519 tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & 520 ~SEC_CTRL_CNT_CLR_CE_BIT) | val; 521 writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE); 522 523 return 0; 524 } 525 526 static ssize_t sec_debug_read(struct file *filp, char __user *buf, 527 size_t count, loff_t *pos) 528 { 529 struct sec_debug_file *file = filp->private_data; 530 char tbuf[SEC_DBGFS_VAL_MAX_LEN]; 531 u32 val; 532 int ret; 533 534 spin_lock_irq(&file->lock); 535 536 switch (file->index) { 537 case SEC_CURRENT_QM: 538 val = sec_current_qm_read(file); 539 break; 540 case SEC_CLEAR_ENABLE: 541 val = sec_clear_enable_read(file); 542 break; 543 default: 544 spin_unlock_irq(&file->lock); 545 return -EINVAL; 546 } 547 548 spin_unlock_irq(&file->lock); 549 ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val); 550 551 return simple_read_from_buffer(buf, count, pos, tbuf, ret); 552 } 553 554 static ssize_t sec_debug_write(struct file *filp, const char __user *buf, 555 size_t count, loff_t *pos) 556 { 557 struct sec_debug_file *file = filp->private_data; 558 char tbuf[SEC_DBGFS_VAL_MAX_LEN]; 559 unsigned long val; 560 int len, ret; 561 562 if (*pos != 0) 563 return 0; 564 565 if (count >= SEC_DBGFS_VAL_MAX_LEN) 566 return -ENOSPC; 567 568 len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1, 569 pos, buf, count); 570 if (len < 0) 571 return len; 572 573 tbuf[len] = '\0'; 574 if (kstrtoul(tbuf, 0, &val)) 575 return -EFAULT; 576 577 spin_lock_irq(&file->lock); 578 579 switch (file->index) { 580 case SEC_CURRENT_QM: 581 ret = sec_current_qm_write(file, val); 582 if (ret) 583 goto err_input; 584 break; 585 case SEC_CLEAR_ENABLE: 586 ret = sec_clear_enable_write(file, val); 587 if (ret) 588 goto err_input; 589 break; 590 default: 591 ret = -EINVAL; 592 goto err_input; 593 } 594 595 spin_unlock_irq(&file->lock); 596 597 return count; 598 599 err_input: 600 spin_unlock_irq(&file->lock); 601 return ret; 602 } 603 604 static const struct file_operations sec_dbg_fops = { 605 .owner = THIS_MODULE, 606 .open = simple_open, 607 .read = sec_debug_read, 608 .write = sec_debug_write, 609 }; 610 611 static int sec_core_debug_init(struct sec_dev *sec) 612 { 613 struct hisi_qm *qm = &sec->qm; 614 struct device *dev = &qm->pdev->dev; 615 struct sec_dfx *dfx = &sec->debug.dfx; 616 struct debugfs_regset32 *regset; 617 struct dentry *tmp_d; 618 619 tmp_d = debugfs_create_dir("sec_dfx", sec->qm.debug.debug_root); 620 621 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); 622 if (!regset) 623 return -ENOENT; 624 625 regset->regs = sec_dfx_regs; 626 regset->nregs = ARRAY_SIZE(sec_dfx_regs); 627 regset->base = qm->io_base; 628 629 debugfs_create_regset32("regs", 0444, tmp_d, regset); 630 631 debugfs_create_u64("send_cnt", 0444, tmp_d, &dfx->send_cnt); 632 633 debugfs_create_u64("recv_cnt", 0444, tmp_d, &dfx->recv_cnt); 634 635 return 0; 636 } 637 638 static int sec_debug_init(struct sec_dev *sec) 639 { 640 int i; 641 642 for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) { 643 spin_lock_init(&sec->debug.files[i].lock); 644 sec->debug.files[i].index = i; 645 sec->debug.files[i].qm = &sec->qm; 646 647 debugfs_create_file(sec_dbg_file_name[i], 0600, 648 sec->qm.debug.debug_root, 649 sec->debug.files + i, 650 &sec_dbg_fops); 651 } 652 653 return sec_core_debug_init(sec); 654 } 655 656 static int sec_debugfs_init(struct sec_dev *sec) 657 { 658 struct hisi_qm *qm = &sec->qm; 659 struct device *dev = &qm->pdev->dev; 660 int ret; 661 662 qm->debug.debug_root = debugfs_create_dir(dev_name(dev), 663 sec_debugfs_root); 664 ret = hisi_qm_debug_init(qm); 665 if (ret) 666 goto failed_to_create; 667 668 if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) { 669 ret = sec_debug_init(sec); 670 if (ret) 671 goto failed_to_create; 672 } 673 674 return 0; 675 676 failed_to_create: 677 debugfs_remove_recursive(sec_debugfs_root); 678 679 return ret; 680 } 681 682 static void sec_debugfs_exit(struct sec_dev *sec) 683 { 684 debugfs_remove_recursive(sec->qm.debug.debug_root); 685 } 686 687 static int sec_pf_probe_init(struct sec_dev *sec) 688 { 689 struct hisi_qm *qm = &sec->qm; 690 int ret; 691 692 switch (qm->ver) { 693 case QM_HW_V1: 694 qm->ctrl_qp_num = SEC_QUEUE_NUM_V1; 695 break; 696 697 case QM_HW_V2: 698 qm->ctrl_qp_num = SEC_QUEUE_NUM_V2; 699 break; 700 701 default: 702 return -EINVAL; 703 } 704 705 ret = sec_set_user_domain_and_cache(sec); 706 if (ret) 707 return ret; 708 709 sec_hw_error_init(sec); 710 sec_debug_regs_clear(qm); 711 712 return 0; 713 } 714 715 static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) 716 { 717 enum qm_hw_ver rev_id; 718 719 rev_id = hisi_qm_get_hw_version(pdev); 720 if (rev_id == QM_HW_UNKNOWN) 721 return -ENODEV; 722 723 qm->pdev = pdev; 724 qm->ver = rev_id; 725 726 qm->sqe_size = SEC_SQE_SIZE; 727 qm->dev_name = sec_name; 728 qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ? 729 QM_HW_PF : QM_HW_VF; 730 qm->use_dma_api = true; 731 732 return hisi_qm_init(qm); 733 } 734 735 static void sec_qm_uninit(struct hisi_qm *qm) 736 { 737 hisi_qm_uninit(qm); 738 } 739 740 static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec) 741 { 742 if (qm->fun_type == QM_HW_PF) { 743 qm->qp_base = SEC_PF_DEF_Q_BASE; 744 qm->qp_num = pf_q_num; 745 qm->debug.curr_qm_qp_num = pf_q_num; 746 747 return sec_pf_probe_init(sec); 748 } else if (qm->fun_type == QM_HW_VF) { 749 /* 750 * have no way to get qm configure in VM in v1 hardware, 751 * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force 752 * to trigger only one VF in v1 hardware. 753 * v2 hardware has no such problem. 754 */ 755 if (qm->ver == QM_HW_V1) { 756 qm->qp_base = SEC_PF_DEF_Q_NUM; 757 qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; 758 } else if (qm->ver == QM_HW_V2) { 759 /* v2 starts to support get vft by mailbox */ 760 return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); 761 } 762 } else { 763 return -ENODEV; 764 } 765 766 return 0; 767 } 768 769 static void sec_probe_uninit(struct sec_dev *sec) 770 { 771 sec_hw_error_uninit(sec); 772 } 773 774 static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) 775 { 776 struct sec_dev *sec; 777 struct hisi_qm *qm; 778 int ret; 779 780 sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL); 781 if (!sec) 782 return -ENOMEM; 783 784 pci_set_drvdata(pdev, sec); 785 786 sec->ctx_q_num = ctx_q_num; 787 788 qm = &sec->qm; 789 790 ret = sec_qm_init(qm, pdev); 791 if (ret) { 792 pci_err(pdev, "Failed to pre init qm!\n"); 793 return ret; 794 } 795 796 ret = sec_probe_init(qm, sec); 797 if (ret) { 798 pci_err(pdev, "Failed to probe!\n"); 799 goto err_qm_uninit; 800 } 801 802 ret = hisi_qm_start(qm); 803 if (ret) { 804 pci_err(pdev, "Failed to start sec qm!\n"); 805 goto err_probe_uninit; 806 } 807 808 ret = sec_debugfs_init(sec); 809 if (ret) 810 pci_warn(pdev, "Failed to init debugfs!\n"); 811 812 sec_add_to_list(sec); 813 814 ret = sec_register_to_crypto(); 815 if (ret < 0) { 816 pr_err("Failed to register driver to crypto.\n"); 817 goto err_remove_from_list; 818 } 819 820 return 0; 821 822 err_remove_from_list: 823 sec_remove_from_list(sec); 824 sec_debugfs_exit(sec); 825 hisi_qm_stop(qm); 826 827 err_probe_uninit: 828 sec_probe_uninit(sec); 829 830 err_qm_uninit: 831 sec_qm_uninit(qm); 832 833 return ret; 834 } 835 836 /* now we only support equal assignment */ 837 static int sec_vf_q_assign(struct sec_dev *sec, u32 num_vfs) 838 { 839 struct hisi_qm *qm = &sec->qm; 840 u32 qp_num = qm->qp_num; 841 u32 q_base = qp_num; 842 u32 q_num, remain_q_num; 843 int i, j, ret; 844 845 if (!num_vfs) 846 return -EINVAL; 847 848 remain_q_num = qm->ctrl_qp_num - qp_num; 849 q_num = remain_q_num / num_vfs; 850 851 for (i = 1; i <= num_vfs; i++) { 852 if (i == num_vfs) 853 q_num += remain_q_num % num_vfs; 854 ret = hisi_qm_set_vft(qm, i, q_base, q_num); 855 if (ret) { 856 for (j = i; j > 0; j--) 857 hisi_qm_set_vft(qm, j, 0, 0); 858 return ret; 859 } 860 q_base += q_num; 861 } 862 863 return 0; 864 } 865 866 static int sec_clear_vft_config(struct sec_dev *sec) 867 { 868 struct hisi_qm *qm = &sec->qm; 869 u32 num_vfs = sec->num_vfs; 870 int ret; 871 u32 i; 872 873 for (i = 1; i <= num_vfs; i++) { 874 ret = hisi_qm_set_vft(qm, i, 0, 0); 875 if (ret) 876 return ret; 877 } 878 879 sec->num_vfs = 0; 880 881 return 0; 882 } 883 884 static int sec_sriov_enable(struct pci_dev *pdev, int max_vfs) 885 { 886 struct sec_dev *sec = pci_get_drvdata(pdev); 887 int pre_existing_vfs, ret; 888 u32 num_vfs; 889 890 pre_existing_vfs = pci_num_vf(pdev); 891 892 if (pre_existing_vfs) { 893 pci_err(pdev, "Can't enable VF. Please disable at first!\n"); 894 return 0; 895 } 896 897 num_vfs = min_t(u32, max_vfs, SEC_VF_NUM); 898 899 ret = sec_vf_q_assign(sec, num_vfs); 900 if (ret) { 901 pci_err(pdev, "Can't assign queues for VF!\n"); 902 return ret; 903 } 904 905 sec->num_vfs = num_vfs; 906 907 ret = pci_enable_sriov(pdev, num_vfs); 908 if (ret) { 909 pci_err(pdev, "Can't enable VF!\n"); 910 sec_clear_vft_config(sec); 911 return ret; 912 } 913 914 return num_vfs; 915 } 916 917 static int sec_sriov_disable(struct pci_dev *pdev) 918 { 919 struct sec_dev *sec = pci_get_drvdata(pdev); 920 921 if (pci_vfs_assigned(pdev)) { 922 pci_err(pdev, "Can't disable VFs while VFs are assigned!\n"); 923 return -EPERM; 924 } 925 926 /* remove in sec_pci_driver will be called to free VF resources */ 927 pci_disable_sriov(pdev); 928 929 return sec_clear_vft_config(sec); 930 } 931 932 static int sec_sriov_configure(struct pci_dev *pdev, int num_vfs) 933 { 934 if (num_vfs) 935 return sec_sriov_enable(pdev, num_vfs); 936 else 937 return sec_sriov_disable(pdev); 938 } 939 940 static void sec_remove(struct pci_dev *pdev) 941 { 942 struct sec_dev *sec = pci_get_drvdata(pdev); 943 struct hisi_qm *qm = &sec->qm; 944 945 sec_unregister_from_crypto(); 946 947 sec_remove_from_list(sec); 948 949 if (qm->fun_type == QM_HW_PF && sec->num_vfs) 950 (void)sec_sriov_disable(pdev); 951 952 sec_debugfs_exit(sec); 953 954 (void)hisi_qm_stop(qm); 955 956 if (qm->fun_type == QM_HW_PF) 957 sec_debug_regs_clear(qm); 958 959 sec_probe_uninit(sec); 960 961 sec_qm_uninit(qm); 962 } 963 964 static void sec_log_hw_error(struct sec_dev *sec, u32 err_sts) 965 { 966 const struct sec_hw_error *errs = sec_hw_errors; 967 struct device *dev = &sec->qm.pdev->dev; 968 u32 err_val; 969 970 while (errs->msg) { 971 if (errs->int_msk & err_sts) { 972 dev_err(dev, "%s [error status=0x%x] found\n", 973 errs->msg, errs->int_msk); 974 975 if (SEC_CORE_INT_STATUS_M_ECC & err_sts) { 976 err_val = readl(sec->qm.io_base + 977 SEC_CORE_SRAM_ECC_ERR_INFO); 978 dev_err(dev, "multi ecc sram num=0x%x\n", 979 SEC_ECC_NUM(err_val)); 980 dev_err(dev, "multi ecc sram addr=0x%x\n", 981 SEC_ECC_ADDR(err_val)); 982 } 983 } 984 errs++; 985 } 986 } 987 988 static pci_ers_result_t sec_hw_error_handle(struct sec_dev *sec) 989 { 990 u32 err_sts; 991 992 /* read err sts */ 993 err_sts = readl(sec->qm.io_base + SEC_CORE_INT_STATUS); 994 if (err_sts) { 995 sec_log_hw_error(sec, err_sts); 996 997 /* clear error interrupts */ 998 writel(err_sts, sec->qm.io_base + SEC_CORE_INT_SOURCE); 999 1000 return PCI_ERS_RESULT_NEED_RESET; 1001 } 1002 1003 return PCI_ERS_RESULT_RECOVERED; 1004 } 1005 1006 static pci_ers_result_t sec_process_hw_error(struct pci_dev *pdev) 1007 { 1008 struct sec_dev *sec = pci_get_drvdata(pdev); 1009 pci_ers_result_t qm_ret, sec_ret; 1010 1011 if (!sec) { 1012 pci_err(pdev, "Can't recover error during device init\n"); 1013 return PCI_ERS_RESULT_NONE; 1014 } 1015 1016 /* log qm error */ 1017 qm_ret = hisi_qm_hw_error_handle(&sec->qm); 1018 1019 /* log sec error */ 1020 sec_ret = sec_hw_error_handle(sec); 1021 1022 return (qm_ret == PCI_ERS_RESULT_NEED_RESET || 1023 sec_ret == PCI_ERS_RESULT_NEED_RESET) ? 1024 PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED; 1025 } 1026 1027 static pci_ers_result_t sec_error_detected(struct pci_dev *pdev, 1028 pci_channel_state_t state) 1029 { 1030 if (pdev->is_virtfn) 1031 return PCI_ERS_RESULT_NONE; 1032 1033 pci_info(pdev, "PCI error detected, state(=%d)!!\n", state); 1034 if (state == pci_channel_io_perm_failure) 1035 return PCI_ERS_RESULT_DISCONNECT; 1036 1037 return sec_process_hw_error(pdev); 1038 } 1039 1040 static const struct pci_error_handlers sec_err_handler = { 1041 .error_detected = sec_error_detected, 1042 }; 1043 1044 static struct pci_driver sec_pci_driver = { 1045 .name = "hisi_sec2", 1046 .id_table = sec_dev_ids, 1047 .probe = sec_probe, 1048 .remove = sec_remove, 1049 .err_handler = &sec_err_handler, 1050 .sriov_configure = sec_sriov_configure, 1051 }; 1052 1053 static void sec_register_debugfs(void) 1054 { 1055 if (!debugfs_initialized()) 1056 return; 1057 1058 sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL); 1059 } 1060 1061 static void sec_unregister_debugfs(void) 1062 { 1063 debugfs_remove_recursive(sec_debugfs_root); 1064 } 1065 1066 static int __init sec_init(void) 1067 { 1068 int ret; 1069 1070 sec_register_debugfs(); 1071 1072 ret = pci_register_driver(&sec_pci_driver); 1073 if (ret < 0) { 1074 sec_unregister_debugfs(); 1075 pr_err("Failed to register pci driver.\n"); 1076 return ret; 1077 } 1078 1079 return 0; 1080 } 1081 1082 static void __exit sec_exit(void) 1083 { 1084 pci_unregister_driver(&sec_pci_driver); 1085 sec_unregister_debugfs(); 1086 } 1087 1088 module_init(sec_init); 1089 module_exit(sec_exit); 1090 1091 MODULE_LICENSE("GPL v2"); 1092 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>"); 1093 MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>"); 1094 MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>"); 1095 MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator"); 1096