1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 #include <linux/acpi.h> 4 #include <linux/aer.h> 5 #include <linux/bitops.h> 6 #include <linux/debugfs.h> 7 #include <linux/init.h> 8 #include <linux/io.h> 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/seq_file.h> 14 #include <linux/topology.h> 15 #include <linux/uacce.h> 16 #include "zip.h" 17 18 #define PCI_DEVICE_ID_HUAWEI_ZIP_PF 0xa250 19 20 #define HZIP_QUEUE_NUM_V1 4096 21 22 #define HZIP_CLOCK_GATE_CTRL 0x301004 23 #define COMP0_ENABLE BIT(0) 24 #define COMP1_ENABLE BIT(1) 25 #define DECOMP0_ENABLE BIT(2) 26 #define DECOMP1_ENABLE BIT(3) 27 #define DECOMP2_ENABLE BIT(4) 28 #define DECOMP3_ENABLE BIT(5) 29 #define DECOMP4_ENABLE BIT(6) 30 #define DECOMP5_ENABLE BIT(7) 31 #define HZIP_ALL_COMP_DECOMP_EN (COMP0_ENABLE | COMP1_ENABLE | \ 32 DECOMP0_ENABLE | DECOMP1_ENABLE | \ 33 DECOMP2_ENABLE | DECOMP3_ENABLE | \ 34 DECOMP4_ENABLE | DECOMP5_ENABLE) 35 #define HZIP_DECOMP_CHECK_ENABLE BIT(16) 36 #define HZIP_FSM_MAX_CNT 0x301008 37 38 #define HZIP_PORT_ARCA_CHE_0 0x301040 39 #define HZIP_PORT_ARCA_CHE_1 0x301044 40 #define HZIP_PORT_AWCA_CHE_0 0x301060 41 #define HZIP_PORT_AWCA_CHE_1 0x301064 42 #define HZIP_CACHE_ALL_EN 0xffffffff 43 44 #define HZIP_BD_RUSER_32_63 0x301110 45 #define HZIP_SGL_RUSER_32_63 0x30111c 46 #define HZIP_DATA_RUSER_32_63 0x301128 47 #define HZIP_DATA_WUSER_32_63 0x301134 48 #define HZIP_BD_WUSER_32_63 0x301140 49 50 #define HZIP_QM_IDEL_STATUS 0x3040e4 51 52 #define HZIP_CORE_DFX_BASE 0x301000 53 #define HZIP_CLOCK_GATED_CONTL 0X301004 54 #define HZIP_CORE_DFX_COMP_0 0x302000 55 #define HZIP_CORE_DFX_COMP_1 0x303000 56 #define HZIP_CORE_DFX_DECOMP_0 0x304000 57 #define HZIP_CORE_DFX_DECOMP_1 0x305000 58 #define HZIP_CORE_DFX_DECOMP_2 0x306000 59 #define HZIP_CORE_DFX_DECOMP_3 0x307000 60 #define HZIP_CORE_DFX_DECOMP_4 0x308000 61 #define HZIP_CORE_DFX_DECOMP_5 0x309000 62 #define HZIP_CORE_REGS_BASE_LEN 0xB0 63 #define HZIP_CORE_REGS_DFX_LEN 0x28 64 65 #define HZIP_CORE_INT_SOURCE 0x3010A0 66 #define HZIP_CORE_INT_MASK_REG 0x3010A4 67 #define HZIP_CORE_INT_SET 0x3010A8 68 #define HZIP_CORE_INT_STATUS 0x3010AC 69 #define HZIP_CORE_INT_STATUS_M_ECC BIT(1) 70 #define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148 71 #define HZIP_CORE_INT_RAS_CE_ENB 0x301160 72 #define HZIP_CORE_INT_RAS_CE_ENABLE 0x1 73 #define HZIP_CORE_INT_RAS_NFE_ENB 0x301164 74 #define HZIP_CORE_INT_RAS_FE_ENB 0x301168 75 #define HZIP_OOO_SHUTDOWN_SEL 0x30120C 76 #define HZIP_CORE_INT_RAS_NFE_ENABLE 0x1FFE 77 #define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16 78 #define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24 79 #define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0) 80 #define HZIP_COMP_CORE_NUM 2 81 #define HZIP_DECOMP_CORE_NUM 6 82 #define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \ 83 HZIP_DECOMP_CORE_NUM) 84 #define HZIP_SQE_SIZE 128 85 #define HZIP_SQ_SIZE (HZIP_SQE_SIZE * QM_Q_DEPTH) 86 #define HZIP_PF_DEF_Q_NUM 64 87 #define HZIP_PF_DEF_Q_BASE 0 88 89 #define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000 90 #define HZIP_SOFT_CTRL_CNT_CLR_CE_BIT BIT(0) 91 #define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C 92 #define HZIP_AXI_SHUTDOWN_ENABLE BIT(14) 93 #define HZIP_WR_PORT BIT(11) 94 95 #define HZIP_BUF_SIZE 22 96 #define HZIP_SQE_MASK_OFFSET 64 97 #define HZIP_SQE_MASK_LEN 48 98 99 #define HZIP_CNT_CLR_CE_EN BIT(0) 100 #define HZIP_RO_CNT_CLR_CE_EN BIT(2) 101 #define HZIP_RD_CNT_CLR_CE_EN (HZIP_CNT_CLR_CE_EN | \ 102 HZIP_RO_CNT_CLR_CE_EN) 103 104 #define HZIP_PREFETCH_CFG 0x3011B0 105 #define HZIP_SVA_TRANS 0x3011C4 106 #define HZIP_PREFETCH_ENABLE (~(BIT(26) | BIT(17) | BIT(0))) 107 #define HZIP_SVA_PREFETCH_DISABLE BIT(26) 108 #define HZIP_SVA_DISABLE_READY (BIT(26) | BIT(30)) 109 #define HZIP_SHAPER_RATE_COMPRESS 750 110 #define HZIP_SHAPER_RATE_DECOMPRESS 140 111 #define HZIP_DELAY_1_US 1 112 #define HZIP_POLL_TIMEOUT_US 1000 113 114 /* clock gating */ 115 #define HZIP_PEH_CFG_AUTO_GATE 0x3011A8 116 #define HZIP_PEH_CFG_AUTO_GATE_EN BIT(0) 117 #define HZIP_CORE_GATED_EN GENMASK(15, 8) 118 #define HZIP_CORE_GATED_OOO_EN BIT(29) 119 #define HZIP_CLOCK_GATED_EN (HZIP_CORE_GATED_EN | \ 120 HZIP_CORE_GATED_OOO_EN) 121 122 static const char hisi_zip_name[] = "hisi_zip"; 123 static struct dentry *hzip_debugfs_root; 124 125 struct hisi_zip_hw_error { 126 u32 int_msk; 127 const char *msg; 128 }; 129 130 struct zip_dfx_item { 131 const char *name; 132 u32 offset; 133 }; 134 135 static struct hisi_qm_list zip_devices = { 136 .register_to_crypto = hisi_zip_register_to_crypto, 137 .unregister_from_crypto = hisi_zip_unregister_from_crypto, 138 }; 139 140 static struct zip_dfx_item zip_dfx_files[] = { 141 {"send_cnt", offsetof(struct hisi_zip_dfx, send_cnt)}, 142 {"recv_cnt", offsetof(struct hisi_zip_dfx, recv_cnt)}, 143 {"send_busy_cnt", offsetof(struct hisi_zip_dfx, send_busy_cnt)}, 144 {"err_bd_cnt", offsetof(struct hisi_zip_dfx, err_bd_cnt)}, 145 }; 146 147 static const struct hisi_zip_hw_error zip_hw_error[] = { 148 { .int_msk = BIT(0), .msg = "zip_ecc_1bitt_err" }, 149 { .int_msk = BIT(1), .msg = "zip_ecc_2bit_err" }, 150 { .int_msk = BIT(2), .msg = "zip_axi_rresp_err" }, 151 { .int_msk = BIT(3), .msg = "zip_axi_bresp_err" }, 152 { .int_msk = BIT(4), .msg = "zip_src_addr_parse_err" }, 153 { .int_msk = BIT(5), .msg = "zip_dst_addr_parse_err" }, 154 { .int_msk = BIT(6), .msg = "zip_pre_in_addr_err" }, 155 { .int_msk = BIT(7), .msg = "zip_pre_in_data_err" }, 156 { .int_msk = BIT(8), .msg = "zip_com_inf_err" }, 157 { .int_msk = BIT(9), .msg = "zip_enc_inf_err" }, 158 { .int_msk = BIT(10), .msg = "zip_pre_out_err" }, 159 { .int_msk = BIT(11), .msg = "zip_axi_poison_err" }, 160 { .int_msk = BIT(12), .msg = "zip_sva_err" }, 161 { /* sentinel */ } 162 }; 163 164 enum ctrl_debug_file_index { 165 HZIP_CLEAR_ENABLE, 166 HZIP_DEBUG_FILE_NUM, 167 }; 168 169 static const char * const ctrl_debug_file_name[] = { 170 [HZIP_CLEAR_ENABLE] = "clear_enable", 171 }; 172 173 struct ctrl_debug_file { 174 enum ctrl_debug_file_index index; 175 spinlock_t lock; 176 struct hisi_zip_ctrl *ctrl; 177 }; 178 179 /* 180 * One ZIP controller has one PF and multiple VFs, some global configurations 181 * which PF has need this structure. 182 * 183 * Just relevant for PF. 184 */ 185 struct hisi_zip_ctrl { 186 struct hisi_zip *hisi_zip; 187 struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM]; 188 }; 189 190 enum { 191 HZIP_COMP_CORE0, 192 HZIP_COMP_CORE1, 193 HZIP_DECOMP_CORE0, 194 HZIP_DECOMP_CORE1, 195 HZIP_DECOMP_CORE2, 196 HZIP_DECOMP_CORE3, 197 HZIP_DECOMP_CORE4, 198 HZIP_DECOMP_CORE5, 199 }; 200 201 static const u64 core_offsets[] = { 202 [HZIP_COMP_CORE0] = 0x302000, 203 [HZIP_COMP_CORE1] = 0x303000, 204 [HZIP_DECOMP_CORE0] = 0x304000, 205 [HZIP_DECOMP_CORE1] = 0x305000, 206 [HZIP_DECOMP_CORE2] = 0x306000, 207 [HZIP_DECOMP_CORE3] = 0x307000, 208 [HZIP_DECOMP_CORE4] = 0x308000, 209 [HZIP_DECOMP_CORE5] = 0x309000, 210 }; 211 212 static const struct debugfs_reg32 hzip_dfx_regs[] = { 213 {"HZIP_GET_BD_NUM ", 0x00ull}, 214 {"HZIP_GET_RIGHT_BD ", 0x04ull}, 215 {"HZIP_GET_ERROR_BD ", 0x08ull}, 216 {"HZIP_DONE_BD_NUM ", 0x0cull}, 217 {"HZIP_WORK_CYCLE ", 0x10ull}, 218 {"HZIP_IDLE_CYCLE ", 0x18ull}, 219 {"HZIP_MAX_DELAY ", 0x20ull}, 220 {"HZIP_MIN_DELAY ", 0x24ull}, 221 {"HZIP_AVG_DELAY ", 0x28ull}, 222 {"HZIP_MEM_VISIBLE_DATA ", 0x30ull}, 223 {"HZIP_MEM_VISIBLE_ADDR ", 0x34ull}, 224 {"HZIP_CONSUMED_BYTE ", 0x38ull}, 225 {"HZIP_PRODUCED_BYTE ", 0x40ull}, 226 {"HZIP_COMP_INF ", 0x70ull}, 227 {"HZIP_PRE_OUT ", 0x78ull}, 228 {"HZIP_BD_RD ", 0x7cull}, 229 {"HZIP_BD_WR ", 0x80ull}, 230 {"HZIP_GET_BD_AXI_ERR_NUM ", 0x84ull}, 231 {"HZIP_GET_BD_PARSE_ERR_NUM ", 0x88ull}, 232 {"HZIP_ADD_BD_AXI_ERR_NUM ", 0x8cull}, 233 {"HZIP_DECOMP_STF_RELOAD_CURR_ST ", 0x94ull}, 234 {"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull}, 235 }; 236 237 static const struct debugfs_reg32 hzip_com_dfx_regs[] = { 238 {"HZIP_CLOCK_GATE_CTRL ", 0x301004}, 239 {"HZIP_CORE_INT_RAS_CE_ENB ", 0x301160}, 240 {"HZIP_CORE_INT_RAS_NFE_ENB ", 0x301164}, 241 {"HZIP_CORE_INT_RAS_FE_ENB ", 0x301168}, 242 {"HZIP_UNCOM_ERR_RAS_CTRL ", 0x30116C}, 243 }; 244 245 static const struct debugfs_reg32 hzip_dump_dfx_regs[] = { 246 {"HZIP_GET_BD_NUM ", 0x00ull}, 247 {"HZIP_GET_RIGHT_BD ", 0x04ull}, 248 {"HZIP_GET_ERROR_BD ", 0x08ull}, 249 {"HZIP_DONE_BD_NUM ", 0x0cull}, 250 {"HZIP_MAX_DELAY ", 0x20ull}, 251 }; 252 253 /* define the ZIP's dfx regs region and region length */ 254 static struct dfx_diff_registers hzip_diff_regs[] = { 255 { 256 .reg_offset = HZIP_CORE_DFX_BASE, 257 .reg_len = HZIP_CORE_REGS_BASE_LEN, 258 }, { 259 .reg_offset = HZIP_CORE_DFX_COMP_0, 260 .reg_len = HZIP_CORE_REGS_DFX_LEN, 261 }, { 262 .reg_offset = HZIP_CORE_DFX_COMP_1, 263 .reg_len = HZIP_CORE_REGS_DFX_LEN, 264 }, { 265 .reg_offset = HZIP_CORE_DFX_DECOMP_0, 266 .reg_len = HZIP_CORE_REGS_DFX_LEN, 267 }, { 268 .reg_offset = HZIP_CORE_DFX_DECOMP_1, 269 .reg_len = HZIP_CORE_REGS_DFX_LEN, 270 }, { 271 .reg_offset = HZIP_CORE_DFX_DECOMP_2, 272 .reg_len = HZIP_CORE_REGS_DFX_LEN, 273 }, { 274 .reg_offset = HZIP_CORE_DFX_DECOMP_3, 275 .reg_len = HZIP_CORE_REGS_DFX_LEN, 276 }, { 277 .reg_offset = HZIP_CORE_DFX_DECOMP_4, 278 .reg_len = HZIP_CORE_REGS_DFX_LEN, 279 }, { 280 .reg_offset = HZIP_CORE_DFX_DECOMP_5, 281 .reg_len = HZIP_CORE_REGS_DFX_LEN, 282 }, 283 }; 284 285 static int hzip_diff_regs_show(struct seq_file *s, void *unused) 286 { 287 struct hisi_qm *qm = s->private; 288 289 hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs, 290 ARRAY_SIZE(hzip_diff_regs)); 291 292 return 0; 293 } 294 DEFINE_SHOW_ATTRIBUTE(hzip_diff_regs); 295 static const struct kernel_param_ops zip_uacce_mode_ops = { 296 .set = uacce_mode_set, 297 .get = param_get_int, 298 }; 299 300 /* 301 * uacce_mode = 0 means zip only register to crypto, 302 * uacce_mode = 1 means zip both register to crypto and uacce. 303 */ 304 static u32 uacce_mode = UACCE_MODE_NOUACCE; 305 module_param_cb(uacce_mode, &zip_uacce_mode_ops, &uacce_mode, 0444); 306 MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); 307 308 static int pf_q_num_set(const char *val, const struct kernel_param *kp) 309 { 310 return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF); 311 } 312 313 static const struct kernel_param_ops pf_q_num_ops = { 314 .set = pf_q_num_set, 315 .get = param_get_int, 316 }; 317 318 static u32 pf_q_num = HZIP_PF_DEF_Q_NUM; 319 module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444); 320 MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)"); 321 322 static const struct kernel_param_ops vfs_num_ops = { 323 .set = vfs_num_set, 324 .get = param_get_int, 325 }; 326 327 static u32 vfs_num; 328 module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); 329 MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); 330 331 static const struct pci_device_id hisi_zip_dev_ids[] = { 332 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_PF) }, 333 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_VF) }, 334 { 0, } 335 }; 336 MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids); 337 338 int zip_create_qps(struct hisi_qp **qps, int qp_num, int node) 339 { 340 if (node == NUMA_NO_NODE) 341 node = cpu_to_node(smp_processor_id()); 342 343 return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps); 344 } 345 346 static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm) 347 { 348 u32 val; 349 int ret; 350 351 if (qm->ver < QM_HW_V3) 352 return; 353 354 /* Enable prefetch */ 355 val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG); 356 val &= HZIP_PREFETCH_ENABLE; 357 writel(val, qm->io_base + HZIP_PREFETCH_CFG); 358 359 ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_PREFETCH_CFG, 360 val, !(val & HZIP_SVA_PREFETCH_DISABLE), 361 HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US); 362 if (ret) 363 pci_err(qm->pdev, "failed to open sva prefetch\n"); 364 } 365 366 static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm) 367 { 368 u32 val; 369 int ret; 370 371 if (qm->ver < QM_HW_V3) 372 return; 373 374 val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG); 375 val |= HZIP_SVA_PREFETCH_DISABLE; 376 writel(val, qm->io_base + HZIP_PREFETCH_CFG); 377 378 ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_SVA_TRANS, 379 val, !(val & HZIP_SVA_DISABLE_READY), 380 HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US); 381 if (ret) 382 pci_err(qm->pdev, "failed to close sva prefetch\n"); 383 } 384 385 static void hisi_zip_enable_clock_gate(struct hisi_qm *qm) 386 { 387 u32 val; 388 389 if (qm->ver < QM_HW_V3) 390 return; 391 392 val = readl(qm->io_base + HZIP_CLOCK_GATE_CTRL); 393 val |= HZIP_CLOCK_GATED_EN; 394 writel(val, qm->io_base + HZIP_CLOCK_GATE_CTRL); 395 396 val = readl(qm->io_base + HZIP_PEH_CFG_AUTO_GATE); 397 val |= HZIP_PEH_CFG_AUTO_GATE_EN; 398 writel(val, qm->io_base + HZIP_PEH_CFG_AUTO_GATE); 399 } 400 401 static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) 402 { 403 void __iomem *base = qm->io_base; 404 405 /* qm user domain */ 406 writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1); 407 writel(ARUSER_M_CFG_ENABLE, base + QM_ARUSER_M_CFG_ENABLE); 408 writel(AXUSER_BASE, base + QM_AWUSER_M_CFG_1); 409 writel(AWUSER_M_CFG_ENABLE, base + QM_AWUSER_M_CFG_ENABLE); 410 writel(WUSER_M_CFG_ENABLE, base + QM_WUSER_M_CFG_ENABLE); 411 412 /* qm cache */ 413 writel(AXI_M_CFG, base + QM_AXI_M_CFG); 414 writel(AXI_M_CFG_ENABLE, base + QM_AXI_M_CFG_ENABLE); 415 416 /* disable FLR triggered by BME(bus master enable) */ 417 writel(PEH_AXUSER_CFG, base + QM_PEH_AXUSER_CFG); 418 writel(PEH_AXUSER_CFG_ENABLE, base + QM_PEH_AXUSER_CFG_ENABLE); 419 420 /* cache */ 421 writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_0); 422 writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_1); 423 writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_0); 424 writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_1); 425 426 /* user domain configurations */ 427 writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63); 428 writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63); 429 430 if (qm->use_sva && qm->ver == QM_HW_V2) { 431 writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63); 432 writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63); 433 writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_SGL_RUSER_32_63); 434 } else { 435 writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63); 436 writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63); 437 writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63); 438 } 439 440 /* let's open all compression/decompression cores */ 441 writel(HZIP_DECOMP_CHECK_ENABLE | HZIP_ALL_COMP_DECOMP_EN, 442 base + HZIP_CLOCK_GATE_CTRL); 443 444 /* enable sqc,cqc writeback */ 445 writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | 446 CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | 447 FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL); 448 449 hisi_zip_enable_clock_gate(qm); 450 451 return 0; 452 } 453 454 static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable) 455 { 456 u32 val1, val2; 457 458 val1 = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); 459 if (enable) { 460 val1 |= HZIP_AXI_SHUTDOWN_ENABLE; 461 val2 = HZIP_CORE_INT_RAS_NFE_ENABLE; 462 } else { 463 val1 &= ~HZIP_AXI_SHUTDOWN_ENABLE; 464 val2 = 0x0; 465 } 466 467 if (qm->ver > QM_HW_V2) 468 writel(val2, qm->io_base + HZIP_OOO_SHUTDOWN_SEL); 469 470 writel(val1, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); 471 } 472 473 static void hisi_zip_hw_error_enable(struct hisi_qm *qm) 474 { 475 if (qm->ver == QM_HW_V1) { 476 writel(HZIP_CORE_INT_MASK_ALL, 477 qm->io_base + HZIP_CORE_INT_MASK_REG); 478 dev_info(&qm->pdev->dev, "Does not support hw error handle\n"); 479 return; 480 } 481 482 /* clear ZIP hw error source if having */ 483 writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_SOURCE); 484 485 /* configure error type */ 486 writel(HZIP_CORE_INT_RAS_CE_ENABLE, 487 qm->io_base + HZIP_CORE_INT_RAS_CE_ENB); 488 writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB); 489 writel(HZIP_CORE_INT_RAS_NFE_ENABLE, 490 qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); 491 492 /* enable ZIP block master OOO when nfe occurs on Kunpeng930 */ 493 hisi_zip_master_ooo_ctrl(qm, true); 494 495 /* enable ZIP hw error interrupts */ 496 writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG); 497 } 498 499 static void hisi_zip_hw_error_disable(struct hisi_qm *qm) 500 { 501 /* disable ZIP hw error interrupts */ 502 writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG); 503 504 /* disable ZIP block master OOO when nfe occurs on Kunpeng930 */ 505 hisi_zip_master_ooo_ctrl(qm, false); 506 } 507 508 static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file) 509 { 510 struct hisi_zip *hisi_zip = file->ctrl->hisi_zip; 511 512 return &hisi_zip->qm; 513 } 514 515 static u32 clear_enable_read(struct hisi_qm *qm) 516 { 517 return readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) & 518 HZIP_SOFT_CTRL_CNT_CLR_CE_BIT; 519 } 520 521 static int clear_enable_write(struct hisi_qm *qm, u32 val) 522 { 523 u32 tmp; 524 525 if (val != 1 && val != 0) 526 return -EINVAL; 527 528 tmp = (readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) & 529 ~HZIP_SOFT_CTRL_CNT_CLR_CE_BIT) | val; 530 writel(tmp, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE); 531 532 return 0; 533 } 534 535 static ssize_t hisi_zip_ctrl_debug_read(struct file *filp, char __user *buf, 536 size_t count, loff_t *pos) 537 { 538 struct ctrl_debug_file *file = filp->private_data; 539 struct hisi_qm *qm = file_to_qm(file); 540 char tbuf[HZIP_BUF_SIZE]; 541 u32 val; 542 int ret; 543 544 ret = hisi_qm_get_dfx_access(qm); 545 if (ret) 546 return ret; 547 548 spin_lock_irq(&file->lock); 549 switch (file->index) { 550 case HZIP_CLEAR_ENABLE: 551 val = clear_enable_read(qm); 552 break; 553 default: 554 goto err_input; 555 } 556 spin_unlock_irq(&file->lock); 557 558 hisi_qm_put_dfx_access(qm); 559 ret = scnprintf(tbuf, sizeof(tbuf), "%u\n", val); 560 return simple_read_from_buffer(buf, count, pos, tbuf, ret); 561 562 err_input: 563 spin_unlock_irq(&file->lock); 564 hisi_qm_put_dfx_access(qm); 565 return -EINVAL; 566 } 567 568 static ssize_t hisi_zip_ctrl_debug_write(struct file *filp, 569 const char __user *buf, 570 size_t count, loff_t *pos) 571 { 572 struct ctrl_debug_file *file = filp->private_data; 573 struct hisi_qm *qm = file_to_qm(file); 574 char tbuf[HZIP_BUF_SIZE]; 575 unsigned long val; 576 int len, ret; 577 578 if (*pos != 0) 579 return 0; 580 581 if (count >= HZIP_BUF_SIZE) 582 return -ENOSPC; 583 584 len = simple_write_to_buffer(tbuf, HZIP_BUF_SIZE - 1, pos, buf, count); 585 if (len < 0) 586 return len; 587 588 tbuf[len] = '\0'; 589 if (kstrtoul(tbuf, 0, &val)) 590 return -EFAULT; 591 592 ret = hisi_qm_get_dfx_access(qm); 593 if (ret) 594 return ret; 595 596 spin_lock_irq(&file->lock); 597 switch (file->index) { 598 case HZIP_CLEAR_ENABLE: 599 ret = clear_enable_write(qm, val); 600 if (ret) 601 goto err_input; 602 break; 603 default: 604 ret = -EINVAL; 605 goto err_input; 606 } 607 608 ret = count; 609 610 err_input: 611 spin_unlock_irq(&file->lock); 612 hisi_qm_put_dfx_access(qm); 613 return ret; 614 } 615 616 static const struct file_operations ctrl_debug_fops = { 617 .owner = THIS_MODULE, 618 .open = simple_open, 619 .read = hisi_zip_ctrl_debug_read, 620 .write = hisi_zip_ctrl_debug_write, 621 }; 622 623 static int zip_debugfs_atomic64_set(void *data, u64 val) 624 { 625 if (val) 626 return -EINVAL; 627 628 atomic64_set((atomic64_t *)data, 0); 629 630 return 0; 631 } 632 633 static int zip_debugfs_atomic64_get(void *data, u64 *val) 634 { 635 *val = atomic64_read((atomic64_t *)data); 636 637 return 0; 638 } 639 640 DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops, zip_debugfs_atomic64_get, 641 zip_debugfs_atomic64_set, "%llu\n"); 642 643 static int hisi_zip_regs_show(struct seq_file *s, void *unused) 644 { 645 hisi_qm_regs_dump(s, s->private); 646 647 return 0; 648 } 649 650 DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs); 651 652 static int hisi_zip_core_debug_init(struct hisi_qm *qm) 653 { 654 struct device *dev = &qm->pdev->dev; 655 struct debugfs_regset32 *regset; 656 struct dentry *tmp_d; 657 char buf[HZIP_BUF_SIZE]; 658 int i; 659 660 for (i = 0; i < HZIP_CORE_NUM; i++) { 661 if (i < HZIP_COMP_CORE_NUM) 662 scnprintf(buf, sizeof(buf), "comp_core%d", i); 663 else 664 scnprintf(buf, sizeof(buf), "decomp_core%d", 665 i - HZIP_COMP_CORE_NUM); 666 667 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); 668 if (!regset) 669 return -ENOENT; 670 671 regset->regs = hzip_dfx_regs; 672 regset->nregs = ARRAY_SIZE(hzip_dfx_regs); 673 regset->base = qm->io_base + core_offsets[i]; 674 regset->dev = dev; 675 676 tmp_d = debugfs_create_dir(buf, qm->debug.debug_root); 677 debugfs_create_file("regs", 0444, tmp_d, regset, 678 &hisi_zip_regs_fops); 679 } 680 681 return 0; 682 } 683 684 static void hisi_zip_dfx_debug_init(struct hisi_qm *qm) 685 { 686 struct dfx_diff_registers *hzip_regs = qm->debug.acc_diff_regs; 687 struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm); 688 struct hisi_zip_dfx *dfx = &zip->dfx; 689 struct dentry *tmp_dir; 690 void *data; 691 int i; 692 693 tmp_dir = debugfs_create_dir("zip_dfx", qm->debug.debug_root); 694 for (i = 0; i < ARRAY_SIZE(zip_dfx_files); i++) { 695 data = (atomic64_t *)((uintptr_t)dfx + zip_dfx_files[i].offset); 696 debugfs_create_file(zip_dfx_files[i].name, 697 0644, tmp_dir, data, 698 &zip_atomic64_ops); 699 } 700 701 if (qm->fun_type == QM_HW_PF && hzip_regs) 702 debugfs_create_file("diff_regs", 0444, tmp_dir, 703 qm, &hzip_diff_regs_fops); 704 } 705 706 static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm) 707 { 708 struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm); 709 int i; 710 711 for (i = HZIP_CLEAR_ENABLE; i < HZIP_DEBUG_FILE_NUM; i++) { 712 spin_lock_init(&zip->ctrl->files[i].lock); 713 zip->ctrl->files[i].ctrl = zip->ctrl; 714 zip->ctrl->files[i].index = i; 715 716 debugfs_create_file(ctrl_debug_file_name[i], 0600, 717 qm->debug.debug_root, 718 zip->ctrl->files + i, 719 &ctrl_debug_fops); 720 } 721 722 return hisi_zip_core_debug_init(qm); 723 } 724 725 static int hisi_zip_debugfs_init(struct hisi_qm *qm) 726 { 727 struct device *dev = &qm->pdev->dev; 728 struct dentry *dev_d; 729 int ret; 730 731 dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root); 732 733 qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET; 734 qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN; 735 qm->debug.debug_root = dev_d; 736 ret = hisi_qm_diff_regs_init(qm, hzip_diff_regs, 737 ARRAY_SIZE(hzip_diff_regs)); 738 if (ret) { 739 dev_warn(dev, "Failed to init ZIP diff regs!\n"); 740 goto debugfs_remove; 741 } 742 743 hisi_qm_debug_init(qm); 744 745 if (qm->fun_type == QM_HW_PF) { 746 ret = hisi_zip_ctrl_debug_init(qm); 747 if (ret) 748 goto failed_to_create; 749 } 750 751 hisi_zip_dfx_debug_init(qm); 752 753 return 0; 754 755 failed_to_create: 756 hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hzip_diff_regs)); 757 debugfs_remove: 758 debugfs_remove_recursive(hzip_debugfs_root); 759 return ret; 760 } 761 762 /* hisi_zip_debug_regs_clear() - clear the zip debug regs */ 763 static void hisi_zip_debug_regs_clear(struct hisi_qm *qm) 764 { 765 int i, j; 766 767 /* enable register read_clear bit */ 768 writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE); 769 for (i = 0; i < ARRAY_SIZE(core_offsets); i++) 770 for (j = 0; j < ARRAY_SIZE(hzip_dfx_regs); j++) 771 readl(qm->io_base + core_offsets[i] + 772 hzip_dfx_regs[j].offset); 773 774 /* disable register read_clear bit */ 775 writel(0x0, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE); 776 777 hisi_qm_debug_regs_clear(qm); 778 } 779 780 static void hisi_zip_debugfs_exit(struct hisi_qm *qm) 781 { 782 hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hzip_diff_regs)); 783 784 debugfs_remove_recursive(qm->debug.debug_root); 785 786 if (qm->fun_type == QM_HW_PF) { 787 hisi_zip_debug_regs_clear(qm); 788 qm->debug.curr_qm_qp_num = 0; 789 } 790 } 791 792 static int hisi_zip_show_last_regs_init(struct hisi_qm *qm) 793 { 794 int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs); 795 int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs); 796 struct qm_debug *debug = &qm->debug; 797 void __iomem *io_base; 798 int i, j, idx; 799 800 debug->last_words = kcalloc(core_dfx_regs_num * HZIP_CORE_NUM + 801 com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL); 802 if (!debug->last_words) 803 return -ENOMEM; 804 805 for (i = 0; i < com_dfx_regs_num; i++) { 806 io_base = qm->io_base + hzip_com_dfx_regs[i].offset; 807 debug->last_words[i] = readl_relaxed(io_base); 808 } 809 810 for (i = 0; i < HZIP_CORE_NUM; i++) { 811 io_base = qm->io_base + core_offsets[i]; 812 for (j = 0; j < core_dfx_regs_num; j++) { 813 idx = com_dfx_regs_num + i * core_dfx_regs_num + j; 814 debug->last_words[idx] = readl_relaxed( 815 io_base + hzip_dump_dfx_regs[j].offset); 816 } 817 } 818 819 return 0; 820 } 821 822 static void hisi_zip_show_last_regs_uninit(struct hisi_qm *qm) 823 { 824 struct qm_debug *debug = &qm->debug; 825 826 if (qm->fun_type == QM_HW_VF || !debug->last_words) 827 return; 828 829 kfree(debug->last_words); 830 debug->last_words = NULL; 831 } 832 833 static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm) 834 { 835 int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs); 836 int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs); 837 struct qm_debug *debug = &qm->debug; 838 char buf[HZIP_BUF_SIZE]; 839 void __iomem *base; 840 int i, j, idx; 841 u32 val; 842 843 if (qm->fun_type == QM_HW_VF || !debug->last_words) 844 return; 845 846 for (i = 0; i < com_dfx_regs_num; i++) { 847 val = readl_relaxed(qm->io_base + hzip_com_dfx_regs[i].offset); 848 if (debug->last_words[i] != val) 849 pci_info(qm->pdev, "com_dfx: %s \t= 0x%08x => 0x%08x\n", 850 hzip_com_dfx_regs[i].name, debug->last_words[i], val); 851 } 852 853 for (i = 0; i < HZIP_CORE_NUM; i++) { 854 if (i < HZIP_COMP_CORE_NUM) 855 scnprintf(buf, sizeof(buf), "Comp_core-%d", i); 856 else 857 scnprintf(buf, sizeof(buf), "Decomp_core-%d", 858 i - HZIP_COMP_CORE_NUM); 859 base = qm->io_base + core_offsets[i]; 860 861 pci_info(qm->pdev, "==>%s:\n", buf); 862 /* dump last word for dfx regs during control resetting */ 863 for (j = 0; j < core_dfx_regs_num; j++) { 864 idx = com_dfx_regs_num + i * core_dfx_regs_num + j; 865 val = readl_relaxed(base + hzip_dump_dfx_regs[j].offset); 866 if (debug->last_words[idx] != val) 867 pci_info(qm->pdev, "%s \t= 0x%08x => 0x%08x\n", 868 hzip_dump_dfx_regs[j].name, debug->last_words[idx], val); 869 } 870 } 871 } 872 873 static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts) 874 { 875 const struct hisi_zip_hw_error *err = zip_hw_error; 876 struct device *dev = &qm->pdev->dev; 877 u32 err_val; 878 879 while (err->msg) { 880 if (err->int_msk & err_sts) { 881 dev_err(dev, "%s [error status=0x%x] found\n", 882 err->msg, err->int_msk); 883 884 if (err->int_msk & HZIP_CORE_INT_STATUS_M_ECC) { 885 err_val = readl(qm->io_base + 886 HZIP_CORE_SRAM_ECC_ERR_INFO); 887 dev_err(dev, "hisi-zip multi ecc sram num=0x%x\n", 888 ((err_val >> 889 HZIP_SRAM_ECC_ERR_NUM_SHIFT) & 0xFF)); 890 } 891 } 892 err++; 893 } 894 } 895 896 static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm) 897 { 898 return readl(qm->io_base + HZIP_CORE_INT_STATUS); 899 } 900 901 static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) 902 { 903 writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE); 904 } 905 906 static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm) 907 { 908 u32 val; 909 910 val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); 911 912 writel(val & ~HZIP_AXI_SHUTDOWN_ENABLE, 913 qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); 914 915 writel(val | HZIP_AXI_SHUTDOWN_ENABLE, 916 qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); 917 } 918 919 static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm) 920 { 921 u32 nfe_enb; 922 923 /* Disable ECC Mbit error report. */ 924 nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); 925 writel(nfe_enb & ~HZIP_CORE_INT_STATUS_M_ECC, 926 qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); 927 928 /* Inject zip ECC Mbit error to block master ooo. */ 929 writel(HZIP_CORE_INT_STATUS_M_ECC, 930 qm->io_base + HZIP_CORE_INT_SET); 931 } 932 933 static void hisi_zip_err_info_init(struct hisi_qm *qm) 934 { 935 struct hisi_qm_err_info *err_info = &qm->err_info; 936 937 err_info->ce = QM_BASE_CE; 938 err_info->fe = 0; 939 err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC; 940 err_info->dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE; 941 err_info->msi_wr_port = HZIP_WR_PORT; 942 err_info->acpi_rst = "ZRST"; 943 err_info->nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT; 944 945 if (qm->ver >= QM_HW_V3) 946 err_info->nfe |= QM_ACC_DO_TASK_TIMEOUT; 947 } 948 949 static const struct hisi_qm_err_ini hisi_zip_err_ini = { 950 .hw_init = hisi_zip_set_user_domain_and_cache, 951 .hw_err_enable = hisi_zip_hw_error_enable, 952 .hw_err_disable = hisi_zip_hw_error_disable, 953 .get_dev_hw_err_status = hisi_zip_get_hw_err_status, 954 .clear_dev_hw_err_status = hisi_zip_clear_hw_err_status, 955 .log_dev_hw_err = hisi_zip_log_hw_error, 956 .open_axi_master_ooo = hisi_zip_open_axi_master_ooo, 957 .close_axi_master_ooo = hisi_zip_close_axi_master_ooo, 958 .open_sva_prefetch = hisi_zip_open_sva_prefetch, 959 .close_sva_prefetch = hisi_zip_close_sva_prefetch, 960 .show_last_dfx_regs = hisi_zip_show_last_dfx_regs, 961 .err_info_init = hisi_zip_err_info_init, 962 }; 963 964 static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) 965 { 966 struct hisi_qm *qm = &hisi_zip->qm; 967 struct hisi_zip_ctrl *ctrl; 968 int ret; 969 970 ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL); 971 if (!ctrl) 972 return -ENOMEM; 973 974 hisi_zip->ctrl = ctrl; 975 ctrl->hisi_zip = hisi_zip; 976 qm->err_ini = &hisi_zip_err_ini; 977 qm->err_ini->err_info_init(qm); 978 979 hisi_zip_set_user_domain_and_cache(qm); 980 hisi_zip_open_sva_prefetch(qm); 981 hisi_qm_dev_err_init(qm); 982 hisi_zip_debug_regs_clear(qm); 983 984 ret = hisi_zip_show_last_regs_init(qm); 985 if (ret) 986 pci_err(qm->pdev, "Failed to init last word regs!\n"); 987 988 return ret; 989 } 990 991 static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) 992 { 993 int ret; 994 995 qm->pdev = pdev; 996 qm->ver = pdev->revision; 997 if (pdev->revision >= QM_HW_V3) 998 qm->algs = "zlib\ngzip\ndeflate\nlz77_zstd"; 999 else 1000 qm->algs = "zlib\ngzip"; 1001 qm->mode = uacce_mode; 1002 qm->sqe_size = HZIP_SQE_SIZE; 1003 qm->dev_name = hisi_zip_name; 1004 1005 qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_ZIP_PF) ? 1006 QM_HW_PF : QM_HW_VF; 1007 if (qm->fun_type == QM_HW_PF) { 1008 qm->qp_base = HZIP_PF_DEF_Q_BASE; 1009 qm->qp_num = pf_q_num; 1010 qm->debug.curr_qm_qp_num = pf_q_num; 1011 qm->qm_list = &zip_devices; 1012 } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { 1013 /* 1014 * have no way to get qm configure in VM in v1 hardware, 1015 * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force 1016 * to trigger only one VF in v1 hardware. 1017 * 1018 * v2 hardware has no such problem. 1019 */ 1020 qm->qp_base = HZIP_PF_DEF_Q_NUM; 1021 qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM; 1022 } 1023 1024 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | 1025 WQ_UNBOUND, num_online_cpus(), 1026 pci_name(qm->pdev)); 1027 if (!qm->wq) { 1028 pci_err(qm->pdev, "fail to alloc workqueue\n"); 1029 return -ENOMEM; 1030 } 1031 1032 ret = hisi_qm_init(qm); 1033 if (ret) 1034 destroy_workqueue(qm->wq); 1035 1036 return ret; 1037 } 1038 1039 static void hisi_zip_qm_uninit(struct hisi_qm *qm) 1040 { 1041 hisi_qm_uninit(qm); 1042 destroy_workqueue(qm->wq); 1043 } 1044 1045 static int hisi_zip_probe_init(struct hisi_zip *hisi_zip) 1046 { 1047 u32 type_rate = HZIP_SHAPER_RATE_COMPRESS; 1048 struct hisi_qm *qm = &hisi_zip->qm; 1049 int ret; 1050 1051 if (qm->fun_type == QM_HW_PF) { 1052 ret = hisi_zip_pf_probe_init(hisi_zip); 1053 if (ret) 1054 return ret; 1055 /* enable shaper type 0 */ 1056 if (qm->ver >= QM_HW_V3) { 1057 type_rate |= QM_SHAPER_ENABLE; 1058 1059 /* ZIP need to enable shaper type 1 */ 1060 type_rate |= HZIP_SHAPER_RATE_DECOMPRESS << QM_SHAPER_TYPE1_OFFSET; 1061 qm->type_rate = type_rate; 1062 } 1063 } 1064 1065 return 0; 1066 } 1067 1068 static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1069 { 1070 struct hisi_zip *hisi_zip; 1071 struct hisi_qm *qm; 1072 int ret; 1073 1074 hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL); 1075 if (!hisi_zip) 1076 return -ENOMEM; 1077 1078 qm = &hisi_zip->qm; 1079 1080 ret = hisi_zip_qm_init(qm, pdev); 1081 if (ret) { 1082 pci_err(pdev, "Failed to init ZIP QM (%d)!\n", ret); 1083 return ret; 1084 } 1085 1086 ret = hisi_zip_probe_init(hisi_zip); 1087 if (ret) { 1088 pci_err(pdev, "Failed to probe (%d)!\n", ret); 1089 goto err_qm_uninit; 1090 } 1091 1092 ret = hisi_qm_start(qm); 1093 if (ret) 1094 goto err_dev_err_uninit; 1095 1096 ret = hisi_zip_debugfs_init(qm); 1097 if (ret) 1098 pci_err(pdev, "failed to init debugfs (%d)!\n", ret); 1099 1100 ret = hisi_qm_alg_register(qm, &zip_devices); 1101 if (ret < 0) { 1102 pci_err(pdev, "failed to register driver to crypto!\n"); 1103 goto err_qm_stop; 1104 } 1105 1106 if (qm->uacce) { 1107 ret = uacce_register(qm->uacce); 1108 if (ret) { 1109 pci_err(pdev, "failed to register uacce (%d)!\n", ret); 1110 goto err_qm_alg_unregister; 1111 } 1112 } 1113 1114 if (qm->fun_type == QM_HW_PF && vfs_num > 0) { 1115 ret = hisi_qm_sriov_enable(pdev, vfs_num); 1116 if (ret < 0) 1117 goto err_qm_alg_unregister; 1118 } 1119 1120 hisi_qm_pm_init(qm); 1121 1122 return 0; 1123 1124 err_qm_alg_unregister: 1125 hisi_qm_alg_unregister(qm, &zip_devices); 1126 1127 err_qm_stop: 1128 hisi_zip_debugfs_exit(qm); 1129 hisi_qm_stop(qm, QM_NORMAL); 1130 1131 err_dev_err_uninit: 1132 hisi_zip_show_last_regs_uninit(qm); 1133 hisi_qm_dev_err_uninit(qm); 1134 1135 err_qm_uninit: 1136 hisi_zip_qm_uninit(qm); 1137 1138 return ret; 1139 } 1140 1141 static void hisi_zip_remove(struct pci_dev *pdev) 1142 { 1143 struct hisi_qm *qm = pci_get_drvdata(pdev); 1144 1145 hisi_qm_pm_uninit(qm); 1146 hisi_qm_wait_task_finish(qm, &zip_devices); 1147 hisi_qm_alg_unregister(qm, &zip_devices); 1148 1149 if (qm->fun_type == QM_HW_PF && qm->vfs_num) 1150 hisi_qm_sriov_disable(pdev, true); 1151 1152 hisi_zip_debugfs_exit(qm); 1153 hisi_qm_stop(qm, QM_NORMAL); 1154 hisi_zip_show_last_regs_uninit(qm); 1155 hisi_qm_dev_err_uninit(qm); 1156 hisi_zip_qm_uninit(qm); 1157 } 1158 1159 static const struct dev_pm_ops hisi_zip_pm_ops = { 1160 SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL) 1161 }; 1162 1163 static const struct pci_error_handlers hisi_zip_err_handler = { 1164 .error_detected = hisi_qm_dev_err_detected, 1165 .slot_reset = hisi_qm_dev_slot_reset, 1166 .reset_prepare = hisi_qm_reset_prepare, 1167 .reset_done = hisi_qm_reset_done, 1168 }; 1169 1170 static struct pci_driver hisi_zip_pci_driver = { 1171 .name = "hisi_zip", 1172 .id_table = hisi_zip_dev_ids, 1173 .probe = hisi_zip_probe, 1174 .remove = hisi_zip_remove, 1175 .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ? 1176 hisi_qm_sriov_configure : NULL, 1177 .err_handler = &hisi_zip_err_handler, 1178 .shutdown = hisi_qm_dev_shutdown, 1179 .driver.pm = &hisi_zip_pm_ops, 1180 }; 1181 1182 struct pci_driver *hisi_zip_get_pf_driver(void) 1183 { 1184 return &hisi_zip_pci_driver; 1185 } 1186 EXPORT_SYMBOL_GPL(hisi_zip_get_pf_driver); 1187 1188 static void hisi_zip_register_debugfs(void) 1189 { 1190 if (!debugfs_initialized()) 1191 return; 1192 1193 hzip_debugfs_root = debugfs_create_dir("hisi_zip", NULL); 1194 } 1195 1196 static void hisi_zip_unregister_debugfs(void) 1197 { 1198 debugfs_remove_recursive(hzip_debugfs_root); 1199 } 1200 1201 static int __init hisi_zip_init(void) 1202 { 1203 int ret; 1204 1205 hisi_qm_init_list(&zip_devices); 1206 hisi_zip_register_debugfs(); 1207 1208 ret = pci_register_driver(&hisi_zip_pci_driver); 1209 if (ret < 0) { 1210 hisi_zip_unregister_debugfs(); 1211 pr_err("Failed to register pci driver.\n"); 1212 } 1213 1214 return ret; 1215 } 1216 1217 static void __exit hisi_zip_exit(void) 1218 { 1219 pci_unregister_driver(&hisi_zip_pci_driver); 1220 hisi_zip_unregister_debugfs(); 1221 } 1222 1223 module_init(hisi_zip_init); 1224 module_exit(hisi_zip_exit); 1225 1226 MODULE_LICENSE("GPL v2"); 1227 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); 1228 MODULE_DESCRIPTION("Driver for HiSilicon ZIP accelerator"); 1229