1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 #include <linux/acpi.h> 4 #include <linux/aer.h> 5 #include <linux/bitops.h> 6 #include <linux/debugfs.h> 7 #include <linux/init.h> 8 #include <linux/io.h> 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/seq_file.h> 13 #include <linux/topology.h> 14 #include "zip.h" 15 16 #define PCI_DEVICE_ID_ZIP_PF 0xa250 17 #define PCI_DEVICE_ID_ZIP_VF 0xa251 18 19 #define HZIP_VF_NUM 63 20 #define HZIP_QUEUE_NUM_V1 4096 21 #define HZIP_QUEUE_NUM_V2 1024 22 23 #define HZIP_CLOCK_GATE_CTRL 0x301004 24 #define COMP0_ENABLE BIT(0) 25 #define COMP1_ENABLE BIT(1) 26 #define DECOMP0_ENABLE BIT(2) 27 #define DECOMP1_ENABLE BIT(3) 28 #define DECOMP2_ENABLE BIT(4) 29 #define DECOMP3_ENABLE BIT(5) 30 #define DECOMP4_ENABLE BIT(6) 31 #define DECOMP5_ENABLE BIT(7) 32 #define ALL_COMP_DECOMP_EN (COMP0_ENABLE | COMP1_ENABLE | \ 33 DECOMP0_ENABLE | DECOMP1_ENABLE | \ 34 DECOMP2_ENABLE | DECOMP3_ENABLE | \ 35 DECOMP4_ENABLE | DECOMP5_ENABLE) 36 #define DECOMP_CHECK_ENABLE BIT(16) 37 #define HZIP_FSM_MAX_CNT 0x301008 38 39 #define HZIP_PORT_ARCA_CHE_0 0x301040 40 #define HZIP_PORT_ARCA_CHE_1 0x301044 41 #define HZIP_PORT_AWCA_CHE_0 0x301060 42 #define HZIP_PORT_AWCA_CHE_1 0x301064 43 #define CACHE_ALL_EN 0xffffffff 44 45 #define HZIP_BD_RUSER_32_63 0x301110 46 #define HZIP_SGL_RUSER_32_63 0x30111c 47 #define HZIP_DATA_RUSER_32_63 0x301128 48 #define HZIP_DATA_WUSER_32_63 0x301134 49 #define HZIP_BD_WUSER_32_63 0x301140 50 51 #define HZIP_QM_IDEL_STATUS 0x3040e4 52 53 #define HZIP_CORE_DEBUG_COMP_0 0x302000 54 #define HZIP_CORE_DEBUG_COMP_1 0x303000 55 #define HZIP_CORE_DEBUG_DECOMP_0 0x304000 56 #define HZIP_CORE_DEBUG_DECOMP_1 0x305000 57 #define HZIP_CORE_DEBUG_DECOMP_2 0x306000 58 #define HZIP_CORE_DEBUG_DECOMP_3 0x307000 59 #define HZIP_CORE_DEBUG_DECOMP_4 0x308000 60 #define HZIP_CORE_DEBUG_DECOMP_5 0x309000 61 62 #define HZIP_CORE_INT_SOURCE 0x3010A0 63 #define HZIP_CORE_INT_MASK_REG 0x3010A4 64 #define HZIP_CORE_INT_STATUS 0x3010AC 65 #define HZIP_CORE_INT_STATUS_M_ECC BIT(1) 66 #define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148 67 #define HZIP_CORE_INT_RAS_CE_ENB 0x301160 68 #define HZIP_CORE_INT_RAS_NFE_ENB 0x301164 69 #define HZIP_CORE_INT_RAS_FE_ENB 0x301168 70 #define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE 71 #define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16 72 #define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24 73 #define HZIP_CORE_INT_MASK_ALL GENMASK(10, 0) 74 #define HZIP_COMP_CORE_NUM 2 75 #define HZIP_DECOMP_CORE_NUM 6 76 #define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \ 77 HZIP_DECOMP_CORE_NUM) 78 #define HZIP_SQE_SIZE 128 79 #define HZIP_SQ_SIZE (HZIP_SQE_SIZE * QM_Q_DEPTH) 80 #define HZIP_PF_DEF_Q_NUM 64 81 #define HZIP_PF_DEF_Q_BASE 0 82 83 #define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000 84 #define SOFT_CTRL_CNT_CLR_CE_BIT BIT(0) 85 86 #define HZIP_BUF_SIZE 22 87 88 static const char hisi_zip_name[] = "hisi_zip"; 89 static struct dentry *hzip_debugfs_root; 90 static LIST_HEAD(hisi_zip_list); 91 static DEFINE_MUTEX(hisi_zip_list_lock); 92 93 struct hisi_zip_resource { 94 struct hisi_zip *hzip; 95 int distance; 96 struct list_head list; 97 }; 98 99 static void free_list(struct list_head *head) 100 { 101 struct hisi_zip_resource *res, *tmp; 102 103 list_for_each_entry_safe(res, tmp, head, list) { 104 list_del(&res->list); 105 kfree(res); 106 } 107 } 108 109 struct hisi_zip *find_zip_device(int node) 110 { 111 struct hisi_zip_resource *res, *tmp; 112 struct hisi_zip *ret = NULL; 113 struct hisi_zip *hisi_zip; 114 struct list_head *n; 115 struct device *dev; 116 LIST_HEAD(head); 117 118 mutex_lock(&hisi_zip_list_lock); 119 120 if (IS_ENABLED(CONFIG_NUMA)) { 121 list_for_each_entry(hisi_zip, &hisi_zip_list, list) { 122 res = kzalloc(sizeof(*res), GFP_KERNEL); 123 if (!res) 124 goto err; 125 126 dev = &hisi_zip->qm.pdev->dev; 127 res->hzip = hisi_zip; 128 res->distance = node_distance(dev_to_node(dev), node); 129 130 n = &head; 131 list_for_each_entry(tmp, &head, list) { 132 if (res->distance < tmp->distance) { 133 n = &tmp->list; 134 break; 135 } 136 } 137 list_add_tail(&res->list, n); 138 } 139 140 list_for_each_entry(tmp, &head, list) { 141 if (hisi_qm_get_free_qp_num(&tmp->hzip->qm)) { 142 ret = tmp->hzip; 143 break; 144 } 145 } 146 147 free_list(&head); 148 } else { 149 ret = list_first_entry(&hisi_zip_list, struct hisi_zip, list); 150 } 151 152 mutex_unlock(&hisi_zip_list_lock); 153 154 return ret; 155 156 err: 157 free_list(&head); 158 mutex_unlock(&hisi_zip_list_lock); 159 return NULL; 160 } 161 162 struct hisi_zip_hw_error { 163 u32 int_msk; 164 const char *msg; 165 }; 166 167 static const struct hisi_zip_hw_error zip_hw_error[] = { 168 { .int_msk = BIT(0), .msg = "zip_ecc_1bitt_err" }, 169 { .int_msk = BIT(1), .msg = "zip_ecc_2bit_err" }, 170 { .int_msk = BIT(2), .msg = "zip_axi_rresp_err" }, 171 { .int_msk = BIT(3), .msg = "zip_axi_bresp_err" }, 172 { .int_msk = BIT(4), .msg = "zip_src_addr_parse_err" }, 173 { .int_msk = BIT(5), .msg = "zip_dst_addr_parse_err" }, 174 { .int_msk = BIT(6), .msg = "zip_pre_in_addr_err" }, 175 { .int_msk = BIT(7), .msg = "zip_pre_in_data_err" }, 176 { .int_msk = BIT(8), .msg = "zip_com_inf_err" }, 177 { .int_msk = BIT(9), .msg = "zip_enc_inf_err" }, 178 { .int_msk = BIT(10), .msg = "zip_pre_out_err" }, 179 { /* sentinel */ } 180 }; 181 182 enum ctrl_debug_file_index { 183 HZIP_CURRENT_QM, 184 HZIP_CLEAR_ENABLE, 185 HZIP_DEBUG_FILE_NUM, 186 }; 187 188 static const char * const ctrl_debug_file_name[] = { 189 [HZIP_CURRENT_QM] = "current_qm", 190 [HZIP_CLEAR_ENABLE] = "clear_enable", 191 }; 192 193 struct ctrl_debug_file { 194 enum ctrl_debug_file_index index; 195 spinlock_t lock; 196 struct hisi_zip_ctrl *ctrl; 197 }; 198 199 /* 200 * One ZIP controller has one PF and multiple VFs, some global configurations 201 * which PF has need this structure. 202 * 203 * Just relevant for PF. 204 */ 205 struct hisi_zip_ctrl { 206 u32 num_vfs; 207 struct hisi_zip *hisi_zip; 208 struct dentry *debug_root; 209 struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM]; 210 }; 211 212 enum { 213 HZIP_COMP_CORE0, 214 HZIP_COMP_CORE1, 215 HZIP_DECOMP_CORE0, 216 HZIP_DECOMP_CORE1, 217 HZIP_DECOMP_CORE2, 218 HZIP_DECOMP_CORE3, 219 HZIP_DECOMP_CORE4, 220 HZIP_DECOMP_CORE5, 221 }; 222 223 static const u64 core_offsets[] = { 224 [HZIP_COMP_CORE0] = 0x302000, 225 [HZIP_COMP_CORE1] = 0x303000, 226 [HZIP_DECOMP_CORE0] = 0x304000, 227 [HZIP_DECOMP_CORE1] = 0x305000, 228 [HZIP_DECOMP_CORE2] = 0x306000, 229 [HZIP_DECOMP_CORE3] = 0x307000, 230 [HZIP_DECOMP_CORE4] = 0x308000, 231 [HZIP_DECOMP_CORE5] = 0x309000, 232 }; 233 234 static struct debugfs_reg32 hzip_dfx_regs[] = { 235 {"HZIP_GET_BD_NUM ", 0x00ull}, 236 {"HZIP_GET_RIGHT_BD ", 0x04ull}, 237 {"HZIP_GET_ERROR_BD ", 0x08ull}, 238 {"HZIP_DONE_BD_NUM ", 0x0cull}, 239 {"HZIP_WORK_CYCLE ", 0x10ull}, 240 {"HZIP_IDLE_CYCLE ", 0x18ull}, 241 {"HZIP_MAX_DELAY ", 0x20ull}, 242 {"HZIP_MIN_DELAY ", 0x24ull}, 243 {"HZIP_AVG_DELAY ", 0x28ull}, 244 {"HZIP_MEM_VISIBLE_DATA ", 0x30ull}, 245 {"HZIP_MEM_VISIBLE_ADDR ", 0x34ull}, 246 {"HZIP_COMSUMED_BYTE ", 0x38ull}, 247 {"HZIP_PRODUCED_BYTE ", 0x40ull}, 248 {"HZIP_COMP_INF ", 0x70ull}, 249 {"HZIP_PRE_OUT ", 0x78ull}, 250 {"HZIP_BD_RD ", 0x7cull}, 251 {"HZIP_BD_WR ", 0x80ull}, 252 {"HZIP_GET_BD_AXI_ERR_NUM ", 0x84ull}, 253 {"HZIP_GET_BD_PARSE_ERR_NUM ", 0x88ull}, 254 {"HZIP_ADD_BD_AXI_ERR_NUM ", 0x8cull}, 255 {"HZIP_DECOMP_STF_RELOAD_CURR_ST ", 0x94ull}, 256 {"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull}, 257 }; 258 259 static int pf_q_num_set(const char *val, const struct kernel_param *kp) 260 { 261 struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, 262 PCI_DEVICE_ID_ZIP_PF, NULL); 263 u32 n, q_num; 264 u8 rev_id; 265 int ret; 266 267 if (!val) 268 return -EINVAL; 269 270 if (!pdev) { 271 q_num = min_t(u32, HZIP_QUEUE_NUM_V1, HZIP_QUEUE_NUM_V2); 272 pr_info("No device found currently, suppose queue number is %d\n", 273 q_num); 274 } else { 275 rev_id = pdev->revision; 276 switch (rev_id) { 277 case QM_HW_V1: 278 q_num = HZIP_QUEUE_NUM_V1; 279 break; 280 case QM_HW_V2: 281 q_num = HZIP_QUEUE_NUM_V2; 282 break; 283 default: 284 return -EINVAL; 285 } 286 } 287 288 ret = kstrtou32(val, 10, &n); 289 if (ret != 0 || n > q_num || n == 0) 290 return -EINVAL; 291 292 return param_set_int(val, kp); 293 } 294 295 static const struct kernel_param_ops pf_q_num_ops = { 296 .set = pf_q_num_set, 297 .get = param_get_int, 298 }; 299 300 static u32 pf_q_num = HZIP_PF_DEF_Q_NUM; 301 module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444); 302 MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)"); 303 304 static int uacce_mode; 305 module_param(uacce_mode, int, 0); 306 307 static u32 vfs_num; 308 module_param(vfs_num, uint, 0444); 309 MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63)"); 310 311 static const struct pci_device_id hisi_zip_dev_ids[] = { 312 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) }, 313 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_VF) }, 314 { 0, } 315 }; 316 MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids); 317 318 static inline void hisi_zip_add_to_list(struct hisi_zip *hisi_zip) 319 { 320 mutex_lock(&hisi_zip_list_lock); 321 list_add_tail(&hisi_zip->list, &hisi_zip_list); 322 mutex_unlock(&hisi_zip_list_lock); 323 } 324 325 static inline void hisi_zip_remove_from_list(struct hisi_zip *hisi_zip) 326 { 327 mutex_lock(&hisi_zip_list_lock); 328 list_del(&hisi_zip->list); 329 mutex_unlock(&hisi_zip_list_lock); 330 } 331 332 static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip) 333 { 334 void __iomem *base = hisi_zip->qm.io_base; 335 336 /* qm user domain */ 337 writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1); 338 writel(ARUSER_M_CFG_ENABLE, base + QM_ARUSER_M_CFG_ENABLE); 339 writel(AXUSER_BASE, base + QM_AWUSER_M_CFG_1); 340 writel(AWUSER_M_CFG_ENABLE, base + QM_AWUSER_M_CFG_ENABLE); 341 writel(WUSER_M_CFG_ENABLE, base + QM_WUSER_M_CFG_ENABLE); 342 343 /* qm cache */ 344 writel(AXI_M_CFG, base + QM_AXI_M_CFG); 345 writel(AXI_M_CFG_ENABLE, base + QM_AXI_M_CFG_ENABLE); 346 /* disable FLR triggered by BME(bus master enable) */ 347 writel(PEH_AXUSER_CFG, base + QM_PEH_AXUSER_CFG); 348 writel(PEH_AXUSER_CFG_ENABLE, base + QM_PEH_AXUSER_CFG_ENABLE); 349 350 /* cache */ 351 writel(CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_0); 352 writel(CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_1); 353 writel(CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_0); 354 writel(CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_1); 355 356 /* user domain configurations */ 357 writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63); 358 writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63); 359 writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63); 360 writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63); 361 writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63); 362 363 /* let's open all compression/decompression cores */ 364 writel(DECOMP_CHECK_ENABLE | ALL_COMP_DECOMP_EN, 365 base + HZIP_CLOCK_GATE_CTRL); 366 367 /* enable sqc writeback */ 368 writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | 369 CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | 370 FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL); 371 } 372 373 static void hisi_zip_hw_error_enable(struct hisi_qm *qm) 374 { 375 if (qm->ver == QM_HW_V1) { 376 writel(HZIP_CORE_INT_MASK_ALL, 377 qm->io_base + HZIP_CORE_INT_MASK_REG); 378 dev_info(&qm->pdev->dev, "Does not support hw error handle\n"); 379 return; 380 } 381 382 /* clear ZIP hw error source if having */ 383 writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_SOURCE); 384 385 /* configure error type */ 386 writel(0x1, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB); 387 writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB); 388 writel(HZIP_CORE_INT_RAS_NFE_ENABLE, 389 qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); 390 391 /* enable ZIP hw error interrupts */ 392 writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG); 393 } 394 395 static void hisi_zip_hw_error_disable(struct hisi_qm *qm) 396 { 397 /* disable ZIP hw error interrupts */ 398 writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG); 399 } 400 401 static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file) 402 { 403 struct hisi_zip *hisi_zip = file->ctrl->hisi_zip; 404 405 return &hisi_zip->qm; 406 } 407 408 static u32 current_qm_read(struct ctrl_debug_file *file) 409 { 410 struct hisi_qm *qm = file_to_qm(file); 411 412 return readl(qm->io_base + QM_DFX_MB_CNT_VF); 413 } 414 415 static int current_qm_write(struct ctrl_debug_file *file, u32 val) 416 { 417 struct hisi_qm *qm = file_to_qm(file); 418 struct hisi_zip_ctrl *ctrl = file->ctrl; 419 u32 vfq_num; 420 u32 tmp; 421 422 if (val > ctrl->num_vfs) 423 return -EINVAL; 424 425 /* Calculate curr_qm_qp_num and store */ 426 if (val == 0) { 427 qm->debug.curr_qm_qp_num = qm->qp_num; 428 } else { 429 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / ctrl->num_vfs; 430 if (val == ctrl->num_vfs) 431 qm->debug.curr_qm_qp_num = qm->ctrl_qp_num - 432 qm->qp_num - (ctrl->num_vfs - 1) * vfq_num; 433 else 434 qm->debug.curr_qm_qp_num = vfq_num; 435 } 436 437 writel(val, qm->io_base + QM_DFX_MB_CNT_VF); 438 writel(val, qm->io_base + QM_DFX_DB_CNT_VF); 439 440 tmp = val | 441 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); 442 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); 443 444 tmp = val | 445 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); 446 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); 447 448 return 0; 449 } 450 451 static u32 clear_enable_read(struct ctrl_debug_file *file) 452 { 453 struct hisi_qm *qm = file_to_qm(file); 454 455 return readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) & 456 SOFT_CTRL_CNT_CLR_CE_BIT; 457 } 458 459 static int clear_enable_write(struct ctrl_debug_file *file, u32 val) 460 { 461 struct hisi_qm *qm = file_to_qm(file); 462 u32 tmp; 463 464 if (val != 1 && val != 0) 465 return -EINVAL; 466 467 tmp = (readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) & 468 ~SOFT_CTRL_CNT_CLR_CE_BIT) | val; 469 writel(tmp, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE); 470 471 return 0; 472 } 473 474 static ssize_t ctrl_debug_read(struct file *filp, char __user *buf, 475 size_t count, loff_t *pos) 476 { 477 struct ctrl_debug_file *file = filp->private_data; 478 char tbuf[HZIP_BUF_SIZE]; 479 u32 val; 480 int ret; 481 482 spin_lock_irq(&file->lock); 483 switch (file->index) { 484 case HZIP_CURRENT_QM: 485 val = current_qm_read(file); 486 break; 487 case HZIP_CLEAR_ENABLE: 488 val = clear_enable_read(file); 489 break; 490 default: 491 spin_unlock_irq(&file->lock); 492 return -EINVAL; 493 } 494 spin_unlock_irq(&file->lock); 495 ret = sprintf(tbuf, "%u\n", val); 496 return simple_read_from_buffer(buf, count, pos, tbuf, ret); 497 } 498 499 static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf, 500 size_t count, loff_t *pos) 501 { 502 struct ctrl_debug_file *file = filp->private_data; 503 char tbuf[HZIP_BUF_SIZE]; 504 unsigned long val; 505 int len, ret; 506 507 if (*pos != 0) 508 return 0; 509 510 if (count >= HZIP_BUF_SIZE) 511 return -ENOSPC; 512 513 len = simple_write_to_buffer(tbuf, HZIP_BUF_SIZE - 1, pos, buf, count); 514 if (len < 0) 515 return len; 516 517 tbuf[len] = '\0'; 518 if (kstrtoul(tbuf, 0, &val)) 519 return -EFAULT; 520 521 spin_lock_irq(&file->lock); 522 switch (file->index) { 523 case HZIP_CURRENT_QM: 524 ret = current_qm_write(file, val); 525 if (ret) 526 goto err_input; 527 break; 528 case HZIP_CLEAR_ENABLE: 529 ret = clear_enable_write(file, val); 530 if (ret) 531 goto err_input; 532 break; 533 default: 534 ret = -EINVAL; 535 goto err_input; 536 } 537 spin_unlock_irq(&file->lock); 538 539 return count; 540 541 err_input: 542 spin_unlock_irq(&file->lock); 543 return ret; 544 } 545 546 static const struct file_operations ctrl_debug_fops = { 547 .owner = THIS_MODULE, 548 .open = simple_open, 549 .read = ctrl_debug_read, 550 .write = ctrl_debug_write, 551 }; 552 553 static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl) 554 { 555 struct hisi_zip *hisi_zip = ctrl->hisi_zip; 556 struct hisi_qm *qm = &hisi_zip->qm; 557 struct device *dev = &qm->pdev->dev; 558 struct debugfs_regset32 *regset; 559 struct dentry *tmp_d; 560 char buf[HZIP_BUF_SIZE]; 561 int i; 562 563 for (i = 0; i < HZIP_CORE_NUM; i++) { 564 if (i < HZIP_COMP_CORE_NUM) 565 sprintf(buf, "comp_core%d", i); 566 else 567 sprintf(buf, "decomp_core%d", i - HZIP_COMP_CORE_NUM); 568 569 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); 570 if (!regset) 571 return -ENOENT; 572 573 regset->regs = hzip_dfx_regs; 574 regset->nregs = ARRAY_SIZE(hzip_dfx_regs); 575 regset->base = qm->io_base + core_offsets[i]; 576 577 tmp_d = debugfs_create_dir(buf, ctrl->debug_root); 578 debugfs_create_regset32("regs", 0444, tmp_d, regset); 579 } 580 581 return 0; 582 } 583 584 static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl) 585 { 586 int i; 587 588 for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) { 589 spin_lock_init(&ctrl->files[i].lock); 590 ctrl->files[i].ctrl = ctrl; 591 ctrl->files[i].index = i; 592 593 debugfs_create_file(ctrl_debug_file_name[i], 0600, 594 ctrl->debug_root, ctrl->files + i, 595 &ctrl_debug_fops); 596 } 597 598 return hisi_zip_core_debug_init(ctrl); 599 } 600 601 static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip) 602 { 603 struct hisi_qm *qm = &hisi_zip->qm; 604 struct device *dev = &qm->pdev->dev; 605 struct dentry *dev_d; 606 int ret; 607 608 dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root); 609 610 qm->debug.debug_root = dev_d; 611 ret = hisi_qm_debug_init(qm); 612 if (ret) 613 goto failed_to_create; 614 615 if (qm->fun_type == QM_HW_PF) { 616 hisi_zip->ctrl->debug_root = dev_d; 617 ret = hisi_zip_ctrl_debug_init(hisi_zip->ctrl); 618 if (ret) 619 goto failed_to_create; 620 } 621 622 return 0; 623 624 failed_to_create: 625 debugfs_remove_recursive(hzip_debugfs_root); 626 return ret; 627 } 628 629 static void hisi_zip_debug_regs_clear(struct hisi_zip *hisi_zip) 630 { 631 struct hisi_qm *qm = &hisi_zip->qm; 632 633 writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); 634 writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); 635 writel(0x0, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE); 636 637 hisi_qm_debug_regs_clear(qm); 638 } 639 640 static void hisi_zip_debugfs_exit(struct hisi_zip *hisi_zip) 641 { 642 struct hisi_qm *qm = &hisi_zip->qm; 643 644 debugfs_remove_recursive(qm->debug.debug_root); 645 646 if (qm->fun_type == QM_HW_PF) 647 hisi_zip_debug_regs_clear(hisi_zip); 648 } 649 650 static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts) 651 { 652 const struct hisi_zip_hw_error *err = zip_hw_error; 653 struct device *dev = &qm->pdev->dev; 654 u32 err_val; 655 656 while (err->msg) { 657 if (err->int_msk & err_sts) { 658 dev_err(dev, "%s [error status=0x%x] found\n", 659 err->msg, err->int_msk); 660 661 if (err->int_msk & HZIP_CORE_INT_STATUS_M_ECC) { 662 err_val = readl(qm->io_base + 663 HZIP_CORE_SRAM_ECC_ERR_INFO); 664 dev_err(dev, "hisi-zip multi ecc sram num=0x%x\n", 665 ((err_val >> 666 HZIP_SRAM_ECC_ERR_NUM_SHIFT) & 0xFF)); 667 dev_err(dev, "hisi-zip multi ecc sram addr=0x%x\n", 668 (err_val >> 669 HZIP_SRAM_ECC_ERR_ADDR_SHIFT)); 670 } 671 } 672 err++; 673 } 674 675 writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE); 676 } 677 678 static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm) 679 { 680 return readl(qm->io_base + HZIP_CORE_INT_STATUS); 681 } 682 683 static const struct hisi_qm_err_ini hisi_zip_err_ini = { 684 .hw_err_enable = hisi_zip_hw_error_enable, 685 .hw_err_disable = hisi_zip_hw_error_disable, 686 .get_dev_hw_err_status = hisi_zip_get_hw_err_status, 687 .log_dev_hw_err = hisi_zip_log_hw_error, 688 .err_info = { 689 .ce = QM_BASE_CE, 690 .nfe = QM_BASE_NFE | 691 QM_ACC_WB_NOT_READY_TIMEOUT, 692 .fe = 0, 693 .msi = QM_DB_RANDOM_INVALID, 694 } 695 }; 696 697 static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) 698 { 699 struct hisi_qm *qm = &hisi_zip->qm; 700 struct hisi_zip_ctrl *ctrl; 701 702 ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL); 703 if (!ctrl) 704 return -ENOMEM; 705 706 hisi_zip->ctrl = ctrl; 707 ctrl->hisi_zip = hisi_zip; 708 709 switch (qm->ver) { 710 case QM_HW_V1: 711 qm->ctrl_qp_num = HZIP_QUEUE_NUM_V1; 712 break; 713 714 case QM_HW_V2: 715 qm->ctrl_qp_num = HZIP_QUEUE_NUM_V2; 716 break; 717 718 default: 719 return -EINVAL; 720 } 721 722 qm->err_ini = &hisi_zip_err_ini; 723 724 hisi_zip_set_user_domain_and_cache(hisi_zip); 725 hisi_qm_dev_err_init(qm); 726 hisi_zip_debug_regs_clear(hisi_zip); 727 728 return 0; 729 } 730 731 /* Currently we only support equal assignment */ 732 static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, int num_vfs) 733 { 734 struct hisi_qm *qm = &hisi_zip->qm; 735 u32 qp_num = qm->qp_num; 736 u32 q_base = qp_num; 737 u32 q_num, remain_q_num, i; 738 int ret; 739 740 if (!num_vfs) 741 return -EINVAL; 742 743 remain_q_num = qm->ctrl_qp_num - qp_num; 744 if (remain_q_num < num_vfs) 745 return -EINVAL; 746 747 q_num = remain_q_num / num_vfs; 748 for (i = 1; i <= num_vfs; i++) { 749 if (i == num_vfs) 750 q_num += remain_q_num % num_vfs; 751 ret = hisi_qm_set_vft(qm, i, q_base, q_num); 752 if (ret) 753 return ret; 754 q_base += q_num; 755 } 756 757 return 0; 758 } 759 760 static int hisi_zip_clear_vft_config(struct hisi_zip *hisi_zip) 761 { 762 struct hisi_zip_ctrl *ctrl = hisi_zip->ctrl; 763 struct hisi_qm *qm = &hisi_zip->qm; 764 u32 i, num_vfs = ctrl->num_vfs; 765 int ret; 766 767 for (i = 1; i <= num_vfs; i++) { 768 ret = hisi_qm_set_vft(qm, i, 0, 0); 769 if (ret) 770 return ret; 771 } 772 773 ctrl->num_vfs = 0; 774 775 return 0; 776 } 777 778 static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs) 779 { 780 struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); 781 int pre_existing_vfs, num_vfs, ret; 782 783 pre_existing_vfs = pci_num_vf(pdev); 784 785 if (pre_existing_vfs) { 786 dev_err(&pdev->dev, 787 "Can't enable VF. Please disable pre-enabled VFs!\n"); 788 return 0; 789 } 790 791 num_vfs = min_t(int, max_vfs, HZIP_VF_NUM); 792 793 ret = hisi_zip_vf_q_assign(hisi_zip, num_vfs); 794 if (ret) { 795 dev_err(&pdev->dev, "Can't assign queues for VF!\n"); 796 return ret; 797 } 798 799 hisi_zip->ctrl->num_vfs = num_vfs; 800 801 ret = pci_enable_sriov(pdev, num_vfs); 802 if (ret) { 803 dev_err(&pdev->dev, "Can't enable VF!\n"); 804 hisi_zip_clear_vft_config(hisi_zip); 805 return ret; 806 } 807 808 return num_vfs; 809 } 810 811 static int hisi_zip_sriov_disable(struct pci_dev *pdev) 812 { 813 struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); 814 815 if (pci_vfs_assigned(pdev)) { 816 dev_err(&pdev->dev, 817 "Can't disable VFs while VFs are assigned!\n"); 818 return -EPERM; 819 } 820 821 /* remove in hisi_zip_pci_driver will be called to free VF resources */ 822 pci_disable_sriov(pdev); 823 824 return hisi_zip_clear_vft_config(hisi_zip); 825 } 826 827 static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) 828 { 829 struct hisi_zip *hisi_zip; 830 enum qm_hw_ver rev_id; 831 struct hisi_qm *qm; 832 int ret; 833 834 rev_id = hisi_qm_get_hw_version(pdev); 835 if (rev_id == QM_HW_UNKNOWN) 836 return -EINVAL; 837 838 hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL); 839 if (!hisi_zip) 840 return -ENOMEM; 841 pci_set_drvdata(pdev, hisi_zip); 842 843 qm = &hisi_zip->qm; 844 qm->pdev = pdev; 845 qm->ver = rev_id; 846 847 qm->sqe_size = HZIP_SQE_SIZE; 848 qm->dev_name = hisi_zip_name; 849 qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF : 850 QM_HW_VF; 851 switch (uacce_mode) { 852 case 0: 853 qm->use_dma_api = true; 854 break; 855 case 1: 856 qm->use_dma_api = false; 857 break; 858 case 2: 859 qm->use_dma_api = true; 860 break; 861 default: 862 return -EINVAL; 863 } 864 865 ret = hisi_qm_init(qm); 866 if (ret) { 867 dev_err(&pdev->dev, "Failed to init qm!\n"); 868 return ret; 869 } 870 871 if (qm->fun_type == QM_HW_PF) { 872 ret = hisi_zip_pf_probe_init(hisi_zip); 873 if (ret) 874 return ret; 875 876 qm->qp_base = HZIP_PF_DEF_Q_BASE; 877 qm->qp_num = pf_q_num; 878 } else if (qm->fun_type == QM_HW_VF) { 879 /* 880 * have no way to get qm configure in VM in v1 hardware, 881 * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force 882 * to trigger only one VF in v1 hardware. 883 * 884 * v2 hardware has no such problem. 885 */ 886 if (qm->ver == QM_HW_V1) { 887 qm->qp_base = HZIP_PF_DEF_Q_NUM; 888 qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM; 889 } else if (qm->ver == QM_HW_V2) 890 /* v2 starts to support get vft by mailbox */ 891 hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); 892 } 893 894 ret = hisi_qm_start(qm); 895 if (ret) 896 goto err_qm_uninit; 897 898 ret = hisi_zip_debugfs_init(hisi_zip); 899 if (ret) 900 dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret); 901 902 hisi_zip_add_to_list(hisi_zip); 903 904 if (qm->fun_type == QM_HW_PF && vfs_num > 0) { 905 ret = hisi_zip_sriov_enable(pdev, vfs_num); 906 if (ret < 0) 907 goto err_remove_from_list; 908 } 909 910 return 0; 911 912 err_remove_from_list: 913 hisi_zip_remove_from_list(hisi_zip); 914 hisi_zip_debugfs_exit(hisi_zip); 915 hisi_qm_stop(qm); 916 err_qm_uninit: 917 hisi_qm_uninit(qm); 918 return ret; 919 } 920 921 static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs) 922 { 923 if (num_vfs == 0) 924 return hisi_zip_sriov_disable(pdev); 925 else 926 return hisi_zip_sriov_enable(pdev, num_vfs); 927 } 928 929 static void hisi_zip_remove(struct pci_dev *pdev) 930 { 931 struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); 932 struct hisi_qm *qm = &hisi_zip->qm; 933 934 if (qm->fun_type == QM_HW_PF && hisi_zip->ctrl->num_vfs != 0) 935 hisi_zip_sriov_disable(pdev); 936 937 hisi_zip_debugfs_exit(hisi_zip); 938 hisi_qm_stop(qm); 939 940 hisi_qm_dev_err_uninit(qm); 941 hisi_qm_uninit(qm); 942 hisi_zip_remove_from_list(hisi_zip); 943 } 944 945 static const struct pci_error_handlers hisi_zip_err_handler = { 946 .error_detected = hisi_qm_dev_err_detected, 947 }; 948 949 static struct pci_driver hisi_zip_pci_driver = { 950 .name = "hisi_zip", 951 .id_table = hisi_zip_dev_ids, 952 .probe = hisi_zip_probe, 953 .remove = hisi_zip_remove, 954 .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ? 955 hisi_zip_sriov_configure : NULL, 956 .err_handler = &hisi_zip_err_handler, 957 }; 958 959 static void hisi_zip_register_debugfs(void) 960 { 961 if (!debugfs_initialized()) 962 return; 963 964 hzip_debugfs_root = debugfs_create_dir("hisi_zip", NULL); 965 } 966 967 static void hisi_zip_unregister_debugfs(void) 968 { 969 debugfs_remove_recursive(hzip_debugfs_root); 970 } 971 972 static int __init hisi_zip_init(void) 973 { 974 int ret; 975 976 hisi_zip_register_debugfs(); 977 978 ret = pci_register_driver(&hisi_zip_pci_driver); 979 if (ret < 0) { 980 pr_err("Failed to register pci driver.\n"); 981 goto err_pci; 982 } 983 984 if (uacce_mode == 0 || uacce_mode == 2) { 985 ret = hisi_zip_register_to_crypto(); 986 if (ret < 0) { 987 pr_err("Failed to register driver to crypto.\n"); 988 goto err_crypto; 989 } 990 } 991 992 return 0; 993 994 err_crypto: 995 pci_unregister_driver(&hisi_zip_pci_driver); 996 err_pci: 997 hisi_zip_unregister_debugfs(); 998 999 return ret; 1000 } 1001 1002 static void __exit hisi_zip_exit(void) 1003 { 1004 if (uacce_mode == 0 || uacce_mode == 2) 1005 hisi_zip_unregister_from_crypto(); 1006 pci_unregister_driver(&hisi_zip_pci_driver); 1007 hisi_zip_unregister_debugfs(); 1008 } 1009 1010 module_init(hisi_zip_init); 1011 module_exit(hisi_zip_exit); 1012 1013 MODULE_LICENSE("GPL v2"); 1014 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); 1015 MODULE_DESCRIPTION("Driver for HiSilicon ZIP accelerator"); 1016