1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 4 #include <linux/acpi.h> 5 #include <linux/aer.h> 6 #include <linux/bitops.h> 7 #include <linux/debugfs.h> 8 #include <linux/init.h> 9 #include <linux/io.h> 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/pci.h> 13 #include <linux/seq_file.h> 14 #include <linux/topology.h> 15 16 #include "sec.h" 17 18 #define SEC_VF_NUM 63 19 #define SEC_QUEUE_NUM_V1 4096 20 #define SEC_QUEUE_NUM_V2 1024 21 #define SEC_PF_PCI_DEVICE_ID 0xa255 22 #define SEC_VF_PCI_DEVICE_ID 0xa256 23 24 #define SEC_XTS_MIV_ENABLE_REG 0x301384 25 #define SEC_XTS_MIV_ENABLE_MSK 0x7FFFFFFF 26 #define SEC_XTS_MIV_DISABLE_MSK 0xFFFFFFFF 27 #define SEC_BD_ERR_CHK_EN1 0xfffff7fd 28 #define SEC_BD_ERR_CHK_EN2 0xffffbfff 29 30 #define SEC_SQE_SIZE 128 31 #define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH) 32 #define SEC_PF_DEF_Q_NUM 64 33 #define SEC_PF_DEF_Q_BASE 0 34 #define SEC_CTX_Q_NUM_DEF 24 35 #define SEC_CTX_Q_NUM_MAX 32 36 37 #define SEC_CTRL_CNT_CLR_CE 0x301120 38 #define SEC_CTRL_CNT_CLR_CE_BIT BIT(0) 39 #define SEC_ENGINE_PF_CFG_OFF 0x300000 40 #define SEC_ACC_COMMON_REG_OFF 0x1000 41 #define SEC_CORE_INT_SOURCE 0x301010 42 #define SEC_CORE_INT_MASK 0x301000 43 #define SEC_CORE_INT_STATUS 0x301008 44 #define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14 45 #define SEC_ECC_NUM(err) (((err) >> 16) & 0xFF) 46 #define SEC_ECC_ADDR(err) ((err) >> 0) 47 #define SEC_CORE_INT_DISABLE 0x0 48 #define SEC_CORE_INT_ENABLE 0x1ff 49 50 #define SEC_RAS_CE_REG 0x50 51 #define SEC_RAS_FE_REG 0x54 52 #define SEC_RAS_NFE_REG 0x58 53 #define SEC_RAS_CE_ENB_MSK 0x88 54 #define SEC_RAS_FE_ENB_MSK 0x0 55 #define SEC_RAS_NFE_ENB_MSK 0x177 56 #define SEC_RAS_DISABLE 0x0 57 #define SEC_MEM_START_INIT_REG 0x0100 58 #define SEC_MEM_INIT_DONE_REG 0x0104 59 #define SEC_QM_ABNORMAL_INT_MASK 0x100004 60 61 #define SEC_CONTROL_REG 0x0200 62 #define SEC_TRNG_EN_SHIFT 8 63 #define SEC_CLK_GATE_ENABLE BIT(3) 64 #define SEC_CLK_GATE_DISABLE (~BIT(3)) 65 #define SEC_AXI_SHUTDOWN_ENABLE BIT(12) 66 #define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF 67 68 #define SEC_INTERFACE_USER_CTRL0_REG 0x0220 69 #define SEC_INTERFACE_USER_CTRL1_REG 0x0224 70 #define SEC_BD_ERR_CHK_EN_REG1 0x0384 71 #define SEC_BD_ERR_CHK_EN_REG2 0x038c 72 73 #define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15)) 74 #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7)) 75 #define SEC_CORE_INT_STATUS_M_ECC BIT(2) 76 77 #define SEC_DELAY_10_US 10 78 #define SEC_POLL_TIMEOUT_US 1000 79 #define SEC_VF_CNT_MASK 0xffffffc0 80 #define SEC_DBGFS_VAL_MAX_LEN 20 81 82 #define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \ 83 SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF) 84 85 struct sec_hw_error { 86 u32 int_msk; 87 const char *msg; 88 }; 89 90 static const char sec_name[] = "hisi_sec2"; 91 static struct dentry *sec_debugfs_root; 92 static LIST_HEAD(sec_list); 93 static DEFINE_MUTEX(sec_list_lock); 94 95 static const struct sec_hw_error sec_hw_errors[] = { 96 {.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"}, 97 {.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"}, 98 {.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"}, 99 {.int_msk = BIT(3), .msg = "sec_ecc_1bit_err_rint"}, 100 {.int_msk = BIT(4), .msg = "sec_req_trng_timeout_rint"}, 101 {.int_msk = BIT(5), .msg = "sec_fsm_hbeat_rint"}, 102 {.int_msk = BIT(6), .msg = "sec_channel_req_rng_timeout_rint"}, 103 {.int_msk = BIT(7), .msg = "sec_bd_err_rint"}, 104 {.int_msk = BIT(8), .msg = "sec_chain_buff_err_rint"}, 105 { /* sentinel */ } 106 }; 107 108 struct sec_dev *sec_find_device(int node) 109 { 110 #define SEC_NUMA_MAX_DISTANCE 100 111 int min_distance = SEC_NUMA_MAX_DISTANCE; 112 int dev_node = 0, free_qp_num = 0; 113 struct sec_dev *sec, *ret = NULL; 114 struct hisi_qm *qm; 115 struct device *dev; 116 117 mutex_lock(&sec_list_lock); 118 list_for_each_entry(sec, &sec_list, list) { 119 qm = &sec->qm; 120 dev = &qm->pdev->dev; 121 #ifdef CONFIG_NUMA 122 dev_node = dev->numa_node; 123 if (dev_node < 0) 124 dev_node = 0; 125 #endif 126 if (node_distance(dev_node, node) < min_distance) { 127 free_qp_num = hisi_qm_get_free_qp_num(qm); 128 if (free_qp_num >= sec->ctx_q_num) { 129 ret = sec; 130 min_distance = node_distance(dev_node, node); 131 } 132 } 133 } 134 mutex_unlock(&sec_list_lock); 135 136 return ret; 137 } 138 139 static const char * const sec_dbg_file_name[] = { 140 [SEC_CURRENT_QM] = "current_qm", 141 [SEC_CLEAR_ENABLE] = "clear_enable", 142 }; 143 144 static struct debugfs_reg32 sec_dfx_regs[] = { 145 {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010}, 146 {"SEC_SAA_EN ", 0x301270}, 147 {"SEC_BD_LATENCY_MIN ", 0x301600}, 148 {"SEC_BD_LATENCY_MAX ", 0x301608}, 149 {"SEC_BD_LATENCY_AVG ", 0x30160C}, 150 {"SEC_BD_NUM_IN_SAA0 ", 0x301670}, 151 {"SEC_BD_NUM_IN_SAA1 ", 0x301674}, 152 {"SEC_BD_NUM_IN_SEC ", 0x301680}, 153 {"SEC_ECC_1BIT_CNT ", 0x301C00}, 154 {"SEC_ECC_1BIT_INFO ", 0x301C04}, 155 {"SEC_ECC_2BIT_CNT ", 0x301C10}, 156 {"SEC_ECC_2BIT_INFO ", 0x301C14}, 157 {"SEC_BD_SAA0 ", 0x301C20}, 158 {"SEC_BD_SAA1 ", 0x301C24}, 159 {"SEC_BD_SAA2 ", 0x301C28}, 160 {"SEC_BD_SAA3 ", 0x301C2C}, 161 {"SEC_BD_SAA4 ", 0x301C30}, 162 {"SEC_BD_SAA5 ", 0x301C34}, 163 {"SEC_BD_SAA6 ", 0x301C38}, 164 {"SEC_BD_SAA7 ", 0x301C3C}, 165 {"SEC_BD_SAA8 ", 0x301C40}, 166 }; 167 168 static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp) 169 { 170 struct pci_dev *pdev; 171 u32 n, q_num; 172 u8 rev_id; 173 int ret; 174 175 if (!val) 176 return -EINVAL; 177 178 pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, 179 SEC_PF_PCI_DEVICE_ID, NULL); 180 if (!pdev) { 181 q_num = min_t(u32, SEC_QUEUE_NUM_V1, SEC_QUEUE_NUM_V2); 182 pr_info("No device, suppose queue number is %d!\n", q_num); 183 } else { 184 rev_id = pdev->revision; 185 186 switch (rev_id) { 187 case QM_HW_V1: 188 q_num = SEC_QUEUE_NUM_V1; 189 break; 190 case QM_HW_V2: 191 q_num = SEC_QUEUE_NUM_V2; 192 break; 193 default: 194 return -EINVAL; 195 } 196 } 197 198 ret = kstrtou32(val, 10, &n); 199 if (ret || !n || n > q_num) 200 return -EINVAL; 201 202 return param_set_int(val, kp); 203 } 204 205 static const struct kernel_param_ops sec_pf_q_num_ops = { 206 .set = sec_pf_q_num_set, 207 .get = param_get_int, 208 }; 209 static u32 pf_q_num = SEC_PF_DEF_Q_NUM; 210 module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444); 211 MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)"); 212 213 static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp) 214 { 215 u32 ctx_q_num; 216 int ret; 217 218 if (!val) 219 return -EINVAL; 220 221 ret = kstrtou32(val, 10, &ctx_q_num); 222 if (ret) 223 return -EINVAL; 224 225 if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) { 226 pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num); 227 return -EINVAL; 228 } 229 230 return param_set_int(val, kp); 231 } 232 233 static const struct kernel_param_ops sec_ctx_q_num_ops = { 234 .set = sec_ctx_q_num_set, 235 .get = param_get_int, 236 }; 237 static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF; 238 module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444); 239 MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)"); 240 241 static const struct pci_device_id sec_dev_ids[] = { 242 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) }, 243 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) }, 244 { 0, } 245 }; 246 MODULE_DEVICE_TABLE(pci, sec_dev_ids); 247 248 static inline void sec_add_to_list(struct sec_dev *sec) 249 { 250 mutex_lock(&sec_list_lock); 251 list_add_tail(&sec->list, &sec_list); 252 mutex_unlock(&sec_list_lock); 253 } 254 255 static inline void sec_remove_from_list(struct sec_dev *sec) 256 { 257 mutex_lock(&sec_list_lock); 258 list_del(&sec->list); 259 mutex_unlock(&sec_list_lock); 260 } 261 262 static u8 sec_get_endian(struct sec_dev *sec) 263 { 264 struct hisi_qm *qm = &sec->qm; 265 u32 reg; 266 267 /* 268 * As for VF, it is a wrong way to get endian setting by 269 * reading a register of the engine 270 */ 271 if (qm->pdev->is_virtfn) { 272 dev_err_ratelimited(&qm->pdev->dev, 273 "cannot access a register in VF!\n"); 274 return SEC_LE; 275 } 276 reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF + 277 SEC_ACC_COMMON_REG_OFF + SEC_CONTROL_REG); 278 279 /* BD little endian mode */ 280 if (!(reg & BIT(0))) 281 return SEC_LE; 282 283 /* BD 32-bits big endian mode */ 284 else if (!(reg & BIT(1))) 285 return SEC_32BE; 286 287 /* BD 64-bits big endian mode */ 288 else 289 return SEC_64BE; 290 } 291 292 static int sec_engine_init(struct sec_dev *sec) 293 { 294 struct hisi_qm *qm = &sec->qm; 295 int ret; 296 u32 reg; 297 298 /* disable clock gate control */ 299 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); 300 reg &= SEC_CLK_GATE_DISABLE; 301 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); 302 303 writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG)); 304 305 ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG), 306 reg, reg & 0x1, SEC_DELAY_10_US, 307 SEC_POLL_TIMEOUT_US); 308 if (ret) { 309 dev_err(&qm->pdev->dev, "fail to init sec mem\n"); 310 return ret; 311 } 312 313 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); 314 reg |= (0x1 << SEC_TRNG_EN_SHIFT); 315 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); 316 317 reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG)); 318 reg |= SEC_USER0_SMMU_NORMAL; 319 writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG)); 320 321 reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG)); 322 reg |= SEC_USER1_SMMU_NORMAL; 323 writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG)); 324 325 writel_relaxed(SEC_BD_ERR_CHK_EN1, 326 SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1)); 327 writel_relaxed(SEC_BD_ERR_CHK_EN2, 328 SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG2)); 329 330 /* enable clock gate control */ 331 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); 332 reg |= SEC_CLK_GATE_ENABLE; 333 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); 334 335 /* config endian */ 336 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); 337 reg |= sec_get_endian(sec); 338 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); 339 340 /* Enable sm4 xts mode multiple iv */ 341 writel_relaxed(SEC_XTS_MIV_ENABLE_MSK, 342 qm->io_base + SEC_XTS_MIV_ENABLE_REG); 343 344 return 0; 345 } 346 347 static int sec_set_user_domain_and_cache(struct sec_dev *sec) 348 { 349 struct hisi_qm *qm = &sec->qm; 350 351 /* qm user domain */ 352 writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1); 353 writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE); 354 writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1); 355 writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE); 356 writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE); 357 358 /* qm cache */ 359 writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG); 360 writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE); 361 362 /* disable FLR triggered by BME(bus master enable) */ 363 writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG); 364 writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); 365 366 /* enable sqc,cqc writeback */ 367 writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | 368 CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | 369 FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL); 370 371 return sec_engine_init(sec); 372 } 373 374 /* sec_debug_regs_clear() - clear the sec debug regs */ 375 static void sec_debug_regs_clear(struct hisi_qm *qm) 376 { 377 /* clear current_qm */ 378 writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); 379 writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); 380 381 /* clear rdclr_en */ 382 writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE); 383 384 hisi_qm_debug_regs_clear(qm); 385 } 386 387 static void sec_hw_error_enable(struct hisi_qm *qm) 388 { 389 u32 val; 390 391 if (qm->ver == QM_HW_V1) { 392 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); 393 dev_info(&qm->pdev->dev, "V1 not support hw error handle\n"); 394 return; 395 } 396 397 val = readl(qm->io_base + SEC_CONTROL_REG); 398 399 /* clear SEC hw error source if having */ 400 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_SOURCE); 401 402 /* enable SEC hw error interrupts */ 403 writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK); 404 405 /* enable RAS int */ 406 writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG); 407 writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG); 408 writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG); 409 410 /* enable SEC block master OOO when m-bit error occur */ 411 val = val | SEC_AXI_SHUTDOWN_ENABLE; 412 413 writel(val, qm->io_base + SEC_CONTROL_REG); 414 } 415 416 static void sec_hw_error_disable(struct hisi_qm *qm) 417 { 418 u32 val; 419 420 val = readl(qm->io_base + SEC_CONTROL_REG); 421 422 /* disable RAS int */ 423 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG); 424 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG); 425 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG); 426 427 /* disable SEC hw error interrupts */ 428 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); 429 430 /* disable SEC block master OOO when m-bit error occur */ 431 val = val & SEC_AXI_SHUTDOWN_DISABLE; 432 433 writel(val, qm->io_base + SEC_CONTROL_REG); 434 } 435 436 static u32 sec_current_qm_read(struct sec_debug_file *file) 437 { 438 struct hisi_qm *qm = file->qm; 439 440 return readl(qm->io_base + QM_DFX_MB_CNT_VF); 441 } 442 443 static int sec_current_qm_write(struct sec_debug_file *file, u32 val) 444 { 445 struct hisi_qm *qm = file->qm; 446 struct sec_dev *sec = container_of(qm, struct sec_dev, qm); 447 u32 vfq_num; 448 u32 tmp; 449 450 if (val > sec->num_vfs) 451 return -EINVAL; 452 453 /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ 454 if (!val) { 455 qm->debug.curr_qm_qp_num = qm->qp_num; 456 } else { 457 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / sec->num_vfs; 458 459 if (val == sec->num_vfs) 460 qm->debug.curr_qm_qp_num = 461 qm->ctrl_qp_num - qm->qp_num - 462 (sec->num_vfs - 1) * vfq_num; 463 else 464 qm->debug.curr_qm_qp_num = vfq_num; 465 } 466 467 writel(val, qm->io_base + QM_DFX_MB_CNT_VF); 468 writel(val, qm->io_base + QM_DFX_DB_CNT_VF); 469 470 tmp = val | 471 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); 472 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); 473 474 tmp = val | 475 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); 476 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); 477 478 return 0; 479 } 480 481 static u32 sec_clear_enable_read(struct sec_debug_file *file) 482 { 483 struct hisi_qm *qm = file->qm; 484 485 return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & 486 SEC_CTRL_CNT_CLR_CE_BIT; 487 } 488 489 static int sec_clear_enable_write(struct sec_debug_file *file, u32 val) 490 { 491 struct hisi_qm *qm = file->qm; 492 u32 tmp; 493 494 if (val != 1 && val) 495 return -EINVAL; 496 497 tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & 498 ~SEC_CTRL_CNT_CLR_CE_BIT) | val; 499 writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE); 500 501 return 0; 502 } 503 504 static ssize_t sec_debug_read(struct file *filp, char __user *buf, 505 size_t count, loff_t *pos) 506 { 507 struct sec_debug_file *file = filp->private_data; 508 char tbuf[SEC_DBGFS_VAL_MAX_LEN]; 509 u32 val; 510 int ret; 511 512 spin_lock_irq(&file->lock); 513 514 switch (file->index) { 515 case SEC_CURRENT_QM: 516 val = sec_current_qm_read(file); 517 break; 518 case SEC_CLEAR_ENABLE: 519 val = sec_clear_enable_read(file); 520 break; 521 default: 522 spin_unlock_irq(&file->lock); 523 return -EINVAL; 524 } 525 526 spin_unlock_irq(&file->lock); 527 ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val); 528 529 return simple_read_from_buffer(buf, count, pos, tbuf, ret); 530 } 531 532 static ssize_t sec_debug_write(struct file *filp, const char __user *buf, 533 size_t count, loff_t *pos) 534 { 535 struct sec_debug_file *file = filp->private_data; 536 char tbuf[SEC_DBGFS_VAL_MAX_LEN]; 537 unsigned long val; 538 int len, ret; 539 540 if (*pos != 0) 541 return 0; 542 543 if (count >= SEC_DBGFS_VAL_MAX_LEN) 544 return -ENOSPC; 545 546 len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1, 547 pos, buf, count); 548 if (len < 0) 549 return len; 550 551 tbuf[len] = '\0'; 552 if (kstrtoul(tbuf, 0, &val)) 553 return -EFAULT; 554 555 spin_lock_irq(&file->lock); 556 557 switch (file->index) { 558 case SEC_CURRENT_QM: 559 ret = sec_current_qm_write(file, val); 560 if (ret) 561 goto err_input; 562 break; 563 case SEC_CLEAR_ENABLE: 564 ret = sec_clear_enable_write(file, val); 565 if (ret) 566 goto err_input; 567 break; 568 default: 569 ret = -EINVAL; 570 goto err_input; 571 } 572 573 spin_unlock_irq(&file->lock); 574 575 return count; 576 577 err_input: 578 spin_unlock_irq(&file->lock); 579 return ret; 580 } 581 582 static const struct file_operations sec_dbg_fops = { 583 .owner = THIS_MODULE, 584 .open = simple_open, 585 .read = sec_debug_read, 586 .write = sec_debug_write, 587 }; 588 589 static int sec_debugfs_atomic64_get(void *data, u64 *val) 590 { 591 *val = atomic64_read((atomic64_t *)data); 592 return 0; 593 } 594 DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get, 595 NULL, "%lld\n"); 596 597 static int sec_core_debug_init(struct sec_dev *sec) 598 { 599 struct hisi_qm *qm = &sec->qm; 600 struct device *dev = &qm->pdev->dev; 601 struct sec_dfx *dfx = &sec->debug.dfx; 602 struct debugfs_regset32 *regset; 603 struct dentry *tmp_d; 604 605 tmp_d = debugfs_create_dir("sec_dfx", sec->qm.debug.debug_root); 606 607 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); 608 if (!regset) 609 return -ENOENT; 610 611 regset->regs = sec_dfx_regs; 612 regset->nregs = ARRAY_SIZE(sec_dfx_regs); 613 regset->base = qm->io_base; 614 615 debugfs_create_regset32("regs", 0444, tmp_d, regset); 616 617 debugfs_create_file("send_cnt", 0444, tmp_d, 618 &dfx->send_cnt, &sec_atomic64_ops); 619 620 debugfs_create_file("recv_cnt", 0444, tmp_d, 621 &dfx->recv_cnt, &sec_atomic64_ops); 622 623 return 0; 624 } 625 626 static int sec_debug_init(struct sec_dev *sec) 627 { 628 int i; 629 630 for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) { 631 spin_lock_init(&sec->debug.files[i].lock); 632 sec->debug.files[i].index = i; 633 sec->debug.files[i].qm = &sec->qm; 634 635 debugfs_create_file(sec_dbg_file_name[i], 0600, 636 sec->qm.debug.debug_root, 637 sec->debug.files + i, 638 &sec_dbg_fops); 639 } 640 641 return sec_core_debug_init(sec); 642 } 643 644 static int sec_debugfs_init(struct sec_dev *sec) 645 { 646 struct hisi_qm *qm = &sec->qm; 647 struct device *dev = &qm->pdev->dev; 648 int ret; 649 650 qm->debug.debug_root = debugfs_create_dir(dev_name(dev), 651 sec_debugfs_root); 652 ret = hisi_qm_debug_init(qm); 653 if (ret) 654 goto failed_to_create; 655 656 if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) { 657 ret = sec_debug_init(sec); 658 if (ret) 659 goto failed_to_create; 660 } 661 662 return 0; 663 664 failed_to_create: 665 debugfs_remove_recursive(sec_debugfs_root); 666 667 return ret; 668 } 669 670 static void sec_debugfs_exit(struct sec_dev *sec) 671 { 672 debugfs_remove_recursive(sec->qm.debug.debug_root); 673 } 674 675 static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) 676 { 677 const struct sec_hw_error *errs = sec_hw_errors; 678 struct device *dev = &qm->pdev->dev; 679 u32 err_val; 680 681 while (errs->msg) { 682 if (errs->int_msk & err_sts) { 683 dev_err(dev, "%s [error status=0x%x] found\n", 684 errs->msg, errs->int_msk); 685 686 if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) { 687 err_val = readl(qm->io_base + 688 SEC_CORE_SRAM_ECC_ERR_INFO); 689 dev_err(dev, "multi ecc sram num=0x%x\n", 690 SEC_ECC_NUM(err_val)); 691 dev_err(dev, "multi ecc sram addr=0x%x\n", 692 SEC_ECC_ADDR(err_val)); 693 } 694 } 695 errs++; 696 } 697 698 writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE); 699 } 700 701 static u32 sec_get_hw_err_status(struct hisi_qm *qm) 702 { 703 return readl(qm->io_base + SEC_CORE_INT_STATUS); 704 } 705 706 static const struct hisi_qm_err_ini sec_err_ini = { 707 .hw_err_enable = sec_hw_error_enable, 708 .hw_err_disable = sec_hw_error_disable, 709 .get_dev_hw_err_status = sec_get_hw_err_status, 710 .log_dev_hw_err = sec_log_hw_error, 711 .err_info = { 712 .ce = QM_BASE_CE, 713 .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT | 714 QM_ACC_WB_NOT_READY_TIMEOUT, 715 .fe = 0, 716 .msi = QM_DB_RANDOM_INVALID, 717 } 718 }; 719 720 static int sec_pf_probe_init(struct sec_dev *sec) 721 { 722 struct hisi_qm *qm = &sec->qm; 723 int ret; 724 725 switch (qm->ver) { 726 case QM_HW_V1: 727 qm->ctrl_qp_num = SEC_QUEUE_NUM_V1; 728 break; 729 730 case QM_HW_V2: 731 qm->ctrl_qp_num = SEC_QUEUE_NUM_V2; 732 break; 733 734 default: 735 return -EINVAL; 736 } 737 738 qm->err_ini = &sec_err_ini; 739 740 ret = sec_set_user_domain_and_cache(sec); 741 if (ret) 742 return ret; 743 744 hisi_qm_dev_err_init(qm); 745 sec_debug_regs_clear(qm); 746 747 return 0; 748 } 749 750 static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) 751 { 752 enum qm_hw_ver rev_id; 753 754 rev_id = hisi_qm_get_hw_version(pdev); 755 if (rev_id == QM_HW_UNKNOWN) 756 return -ENODEV; 757 758 qm->pdev = pdev; 759 qm->ver = rev_id; 760 761 qm->sqe_size = SEC_SQE_SIZE; 762 qm->dev_name = sec_name; 763 qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ? 764 QM_HW_PF : QM_HW_VF; 765 qm->use_dma_api = true; 766 767 return hisi_qm_init(qm); 768 } 769 770 static void sec_qm_uninit(struct hisi_qm *qm) 771 { 772 hisi_qm_uninit(qm); 773 } 774 775 static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec) 776 { 777 if (qm->fun_type == QM_HW_PF) { 778 qm->qp_base = SEC_PF_DEF_Q_BASE; 779 qm->qp_num = pf_q_num; 780 qm->debug.curr_qm_qp_num = pf_q_num; 781 782 return sec_pf_probe_init(sec); 783 } else if (qm->fun_type == QM_HW_VF) { 784 /* 785 * have no way to get qm configure in VM in v1 hardware, 786 * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force 787 * to trigger only one VF in v1 hardware. 788 * v2 hardware has no such problem. 789 */ 790 if (qm->ver == QM_HW_V1) { 791 qm->qp_base = SEC_PF_DEF_Q_NUM; 792 qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; 793 } else if (qm->ver == QM_HW_V2) { 794 /* v2 starts to support get vft by mailbox */ 795 return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); 796 } 797 } else { 798 return -ENODEV; 799 } 800 801 return 0; 802 } 803 804 static void sec_probe_uninit(struct hisi_qm *qm) 805 { 806 hisi_qm_dev_err_uninit(qm); 807 } 808 809 static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) 810 { 811 struct sec_dev *sec; 812 struct hisi_qm *qm; 813 int ret; 814 815 sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL); 816 if (!sec) 817 return -ENOMEM; 818 819 pci_set_drvdata(pdev, sec); 820 821 sec->ctx_q_num = ctx_q_num; 822 823 qm = &sec->qm; 824 825 ret = sec_qm_init(qm, pdev); 826 if (ret) { 827 pci_err(pdev, "Failed to pre init qm!\n"); 828 return ret; 829 } 830 831 ret = sec_probe_init(qm, sec); 832 if (ret) { 833 pci_err(pdev, "Failed to probe!\n"); 834 goto err_qm_uninit; 835 } 836 837 ret = hisi_qm_start(qm); 838 if (ret) { 839 pci_err(pdev, "Failed to start sec qm!\n"); 840 goto err_probe_uninit; 841 } 842 843 ret = sec_debugfs_init(sec); 844 if (ret) 845 pci_warn(pdev, "Failed to init debugfs!\n"); 846 847 sec_add_to_list(sec); 848 849 ret = sec_register_to_crypto(); 850 if (ret < 0) { 851 pr_err("Failed to register driver to crypto.\n"); 852 goto err_remove_from_list; 853 } 854 855 return 0; 856 857 err_remove_from_list: 858 sec_remove_from_list(sec); 859 sec_debugfs_exit(sec); 860 hisi_qm_stop(qm); 861 862 err_probe_uninit: 863 sec_probe_uninit(qm); 864 865 err_qm_uninit: 866 sec_qm_uninit(qm); 867 868 return ret; 869 } 870 871 /* now we only support equal assignment */ 872 static int sec_vf_q_assign(struct sec_dev *sec, u32 num_vfs) 873 { 874 struct hisi_qm *qm = &sec->qm; 875 u32 qp_num = qm->qp_num; 876 u32 q_base = qp_num; 877 u32 q_num, remain_q_num; 878 int i, j, ret; 879 880 if (!num_vfs) 881 return -EINVAL; 882 883 remain_q_num = qm->ctrl_qp_num - qp_num; 884 q_num = remain_q_num / num_vfs; 885 886 for (i = 1; i <= num_vfs; i++) { 887 if (i == num_vfs) 888 q_num += remain_q_num % num_vfs; 889 ret = hisi_qm_set_vft(qm, i, q_base, q_num); 890 if (ret) { 891 for (j = i; j > 0; j--) 892 hisi_qm_set_vft(qm, j, 0, 0); 893 return ret; 894 } 895 q_base += q_num; 896 } 897 898 return 0; 899 } 900 901 static int sec_clear_vft_config(struct sec_dev *sec) 902 { 903 struct hisi_qm *qm = &sec->qm; 904 u32 num_vfs = sec->num_vfs; 905 int ret; 906 u32 i; 907 908 for (i = 1; i <= num_vfs; i++) { 909 ret = hisi_qm_set_vft(qm, i, 0, 0); 910 if (ret) 911 return ret; 912 } 913 914 sec->num_vfs = 0; 915 916 return 0; 917 } 918 919 static int sec_sriov_enable(struct pci_dev *pdev, int max_vfs) 920 { 921 struct sec_dev *sec = pci_get_drvdata(pdev); 922 int pre_existing_vfs, ret; 923 u32 num_vfs; 924 925 pre_existing_vfs = pci_num_vf(pdev); 926 927 if (pre_existing_vfs) { 928 pci_err(pdev, "Can't enable VF. Please disable at first!\n"); 929 return 0; 930 } 931 932 num_vfs = min_t(u32, max_vfs, SEC_VF_NUM); 933 934 ret = sec_vf_q_assign(sec, num_vfs); 935 if (ret) { 936 pci_err(pdev, "Can't assign queues for VF!\n"); 937 return ret; 938 } 939 940 sec->num_vfs = num_vfs; 941 942 ret = pci_enable_sriov(pdev, num_vfs); 943 if (ret) { 944 pci_err(pdev, "Can't enable VF!\n"); 945 sec_clear_vft_config(sec); 946 return ret; 947 } 948 949 return num_vfs; 950 } 951 952 static int sec_sriov_disable(struct pci_dev *pdev) 953 { 954 struct sec_dev *sec = pci_get_drvdata(pdev); 955 956 if (pci_vfs_assigned(pdev)) { 957 pci_err(pdev, "Can't disable VFs while VFs are assigned!\n"); 958 return -EPERM; 959 } 960 961 /* remove in sec_pci_driver will be called to free VF resources */ 962 pci_disable_sriov(pdev); 963 964 return sec_clear_vft_config(sec); 965 } 966 967 static int sec_sriov_configure(struct pci_dev *pdev, int num_vfs) 968 { 969 if (num_vfs) 970 return sec_sriov_enable(pdev, num_vfs); 971 else 972 return sec_sriov_disable(pdev); 973 } 974 975 static void sec_remove(struct pci_dev *pdev) 976 { 977 struct sec_dev *sec = pci_get_drvdata(pdev); 978 struct hisi_qm *qm = &sec->qm; 979 980 sec_unregister_from_crypto(); 981 982 sec_remove_from_list(sec); 983 984 if (qm->fun_type == QM_HW_PF && sec->num_vfs) 985 (void)sec_sriov_disable(pdev); 986 987 sec_debugfs_exit(sec); 988 989 (void)hisi_qm_stop(qm); 990 991 if (qm->fun_type == QM_HW_PF) 992 sec_debug_regs_clear(qm); 993 994 sec_probe_uninit(qm); 995 996 sec_qm_uninit(qm); 997 } 998 999 static const struct pci_error_handlers sec_err_handler = { 1000 .error_detected = hisi_qm_dev_err_detected, 1001 }; 1002 1003 static struct pci_driver sec_pci_driver = { 1004 .name = "hisi_sec2", 1005 .id_table = sec_dev_ids, 1006 .probe = sec_probe, 1007 .remove = sec_remove, 1008 .err_handler = &sec_err_handler, 1009 .sriov_configure = sec_sriov_configure, 1010 }; 1011 1012 static void sec_register_debugfs(void) 1013 { 1014 if (!debugfs_initialized()) 1015 return; 1016 1017 sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL); 1018 } 1019 1020 static void sec_unregister_debugfs(void) 1021 { 1022 debugfs_remove_recursive(sec_debugfs_root); 1023 } 1024 1025 static int __init sec_init(void) 1026 { 1027 int ret; 1028 1029 sec_register_debugfs(); 1030 1031 ret = pci_register_driver(&sec_pci_driver); 1032 if (ret < 0) { 1033 sec_unregister_debugfs(); 1034 pr_err("Failed to register pci driver.\n"); 1035 return ret; 1036 } 1037 1038 return 0; 1039 } 1040 1041 static void __exit sec_exit(void) 1042 { 1043 pci_unregister_driver(&sec_pci_driver); 1044 sec_unregister_debugfs(); 1045 } 1046 1047 module_init(sec_init); 1048 module_exit(sec_exit); 1049 1050 MODULE_LICENSE("GPL v2"); 1051 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>"); 1052 MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>"); 1053 MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>"); 1054 MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator"); 1055