1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 #include <asm/page.h> 4 #include <linux/acpi.h> 5 #include <linux/aer.h> 6 #include <linux/bitmap.h> 7 #include <linux/debugfs.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/idr.h> 10 #include <linux/io.h> 11 #include <linux/irqreturn.h> 12 #include <linux/log2.h> 13 #include <linux/seq_file.h> 14 #include <linux/slab.h> 15 #include <linux/uacce.h> 16 #include <linux/uaccess.h> 17 #include <uapi/misc/uacce/hisi_qm.h> 18 #include "qm.h" 19 20 /* eq/aeq irq enable */ 21 #define QM_VF_AEQ_INT_SOURCE 0x0 22 #define QM_VF_AEQ_INT_MASK 0x4 23 #define QM_VF_EQ_INT_SOURCE 0x8 24 #define QM_VF_EQ_INT_MASK 0xc 25 #define QM_IRQ_NUM_V1 1 26 #define QM_IRQ_NUM_PF_V2 4 27 #define QM_IRQ_NUM_VF_V2 2 28 #define QM_IRQ_NUM_VF_V3 3 29 30 #define QM_EQ_EVENT_IRQ_VECTOR 0 31 #define QM_AEQ_EVENT_IRQ_VECTOR 1 32 #define QM_CMD_EVENT_IRQ_VECTOR 2 33 #define QM_ABNORMAL_EVENT_IRQ_VECTOR 3 34 35 /* mailbox */ 36 #define QM_MB_CMD_SQC 0x0 37 #define QM_MB_CMD_CQC 0x1 38 #define QM_MB_CMD_EQC 0x2 39 #define QM_MB_CMD_AEQC 0x3 40 #define QM_MB_CMD_SQC_BT 0x4 41 #define QM_MB_CMD_CQC_BT 0x5 42 #define QM_MB_CMD_SQC_VFT_V2 0x6 43 #define QM_MB_CMD_STOP_QP 0x8 44 #define QM_MB_CMD_SRC 0xc 45 #define QM_MB_CMD_DST 0xd 46 47 #define QM_MB_CMD_SEND_BASE 0x300 48 #define QM_MB_EVENT_SHIFT 8 49 #define QM_MB_BUSY_SHIFT 13 50 #define QM_MB_OP_SHIFT 14 51 #define QM_MB_CMD_DATA_ADDR_L 0x304 52 #define QM_MB_CMD_DATA_ADDR_H 0x308 53 #define QM_MB_PING_ALL_VFS 0xffff 54 #define QM_MB_CMD_DATA_SHIFT 32 55 #define QM_MB_CMD_DATA_MASK GENMASK(31, 0) 56 57 /* sqc shift */ 58 #define QM_SQ_HOP_NUM_SHIFT 0 59 #define QM_SQ_PAGE_SIZE_SHIFT 4 60 #define QM_SQ_BUF_SIZE_SHIFT 8 61 #define QM_SQ_SQE_SIZE_SHIFT 12 62 #define QM_SQ_PRIORITY_SHIFT 0 63 #define QM_SQ_ORDERS_SHIFT 4 64 #define QM_SQ_TYPE_SHIFT 8 65 #define QM_QC_PASID_ENABLE 0x1 66 #define QM_QC_PASID_ENABLE_SHIFT 7 67 68 #define QM_SQ_TYPE_MASK GENMASK(3, 0) 69 #define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1) 70 71 /* cqc shift */ 72 #define QM_CQ_HOP_NUM_SHIFT 0 73 #define QM_CQ_PAGE_SIZE_SHIFT 4 74 #define QM_CQ_BUF_SIZE_SHIFT 8 75 #define QM_CQ_CQE_SIZE_SHIFT 12 76 #define QM_CQ_PHASE_SHIFT 0 77 #define QM_CQ_FLAG_SHIFT 1 78 79 #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1) 80 #define QM_QC_CQE_SIZE 4 81 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1) 82 83 /* eqc shift */ 84 #define QM_EQE_AEQE_SIZE (2UL << 12) 85 #define QM_EQC_PHASE_SHIFT 16 86 87 #define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1) 88 #define QM_EQE_CQN_MASK GENMASK(15, 0) 89 90 #define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1) 91 #define QM_AEQE_TYPE_SHIFT 17 92 93 #define QM_DOORBELL_CMD_SQ 0 94 #define QM_DOORBELL_CMD_CQ 1 95 #define QM_DOORBELL_CMD_EQ 2 96 #define QM_DOORBELL_CMD_AEQ 3 97 98 #define QM_DOORBELL_BASE_V1 0x340 99 #define QM_DB_CMD_SHIFT_V1 16 100 #define QM_DB_INDEX_SHIFT_V1 32 101 #define QM_DB_PRIORITY_SHIFT_V1 48 102 #define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000 103 #define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000 104 #define QM_QUE_ISO_CFG_V 0x0030 105 #define QM_PAGE_SIZE 0x0034 106 #define QM_QUE_ISO_EN 0x100154 107 #define QM_CAPBILITY 0x100158 108 #define QM_QP_NUN_MASK GENMASK(10, 0) 109 #define QM_QP_DB_INTERVAL 0x10000 110 #define QM_QP_MAX_NUM_SHIFT 11 111 #define QM_DB_CMD_SHIFT_V2 12 112 #define QM_DB_RAND_SHIFT_V2 16 113 #define QM_DB_INDEX_SHIFT_V2 32 114 #define QM_DB_PRIORITY_SHIFT_V2 48 115 116 #define QM_MEM_START_INIT 0x100040 117 #define QM_MEM_INIT_DONE 0x100044 118 #define QM_VFT_CFG_RDY 0x10006c 119 #define QM_VFT_CFG_OP_WR 0x100058 120 #define QM_VFT_CFG_TYPE 0x10005c 121 #define QM_SQC_VFT 0x0 122 #define QM_CQC_VFT 0x1 123 #define QM_VFT_CFG 0x100060 124 #define QM_VFT_CFG_OP_ENABLE 0x100054 125 126 #define QM_VFT_CFG_DATA_L 0x100064 127 #define QM_VFT_CFG_DATA_H 0x100068 128 #define QM_SQC_VFT_BUF_SIZE (7ULL << 8) 129 #define QM_SQC_VFT_SQC_SIZE (5ULL << 12) 130 #define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16) 131 #define QM_SQC_VFT_START_SQN_SHIFT 28 132 #define QM_SQC_VFT_VALID (1ULL << 44) 133 #define QM_SQC_VFT_SQN_SHIFT 45 134 #define QM_CQC_VFT_BUF_SIZE (7ULL << 8) 135 #define QM_CQC_VFT_SQC_SIZE (5ULL << 12) 136 #define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16) 137 #define QM_CQC_VFT_VALID (1ULL << 28) 138 139 #define QM_SQC_VFT_BASE_SHIFT_V2 28 140 #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0) 141 #define QM_SQC_VFT_NUM_SHIFT_V2 45 142 #define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0) 143 144 #define QM_DFX_CNT_CLR_CE 0x100118 145 146 #define QM_ABNORMAL_INT_SOURCE 0x100000 147 #define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(14, 0) 148 #define QM_ABNORMAL_INT_MASK 0x100004 149 #define QM_ABNORMAL_INT_MASK_VALUE 0x7fff 150 #define QM_ABNORMAL_INT_STATUS 0x100008 151 #define QM_ABNORMAL_INT_SET 0x10000c 152 #define QM_ABNORMAL_INF00 0x100010 153 #define QM_FIFO_OVERFLOW_TYPE 0xc0 154 #define QM_FIFO_OVERFLOW_TYPE_SHIFT 6 155 #define QM_FIFO_OVERFLOW_VF 0x3f 156 #define QM_ABNORMAL_INF01 0x100014 157 #define QM_DB_TIMEOUT_TYPE 0xc0 158 #define QM_DB_TIMEOUT_TYPE_SHIFT 6 159 #define QM_DB_TIMEOUT_VF 0x3f 160 #define QM_RAS_CE_ENABLE 0x1000ec 161 #define QM_RAS_FE_ENABLE 0x1000f0 162 #define QM_RAS_NFE_ENABLE 0x1000f4 163 #define QM_RAS_CE_THRESHOLD 0x1000f8 164 #define QM_RAS_CE_TIMES_PER_IRQ 1 165 #define QM_RAS_MSI_INT_SEL 0x1040f4 166 #define QM_OOO_SHUTDOWN_SEL 0x1040f8 167 168 #define QM_RESET_WAIT_TIMEOUT 400 169 #define QM_PEH_VENDOR_ID 0x1000d8 170 #define ACC_VENDOR_ID_VALUE 0x5a5a 171 #define QM_PEH_DFX_INFO0 0x1000fc 172 #define QM_PEH_DFX_INFO1 0x100100 173 #define QM_PEH_DFX_MASK (BIT(0) | BIT(2)) 174 #define QM_PEH_MSI_FINISH_MASK GENMASK(19, 16) 175 #define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3 176 #define ACC_PEH_MSI_DISABLE GENMASK(31, 0) 177 #define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1 178 #define ACC_MASTER_TRANS_RETURN_RW 3 179 #define ACC_MASTER_TRANS_RETURN 0x300150 180 #define ACC_MASTER_GLOBAL_CTRL 0x300000 181 #define ACC_AM_CFG_PORT_WR_EN 0x30001c 182 #define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT 183 #define ACC_AM_ROB_ECC_INT_STS 0x300104 184 #define ACC_ROB_ECC_ERR_MULTPL BIT(1) 185 #define QM_MSI_CAP_ENABLE BIT(16) 186 187 /* interfunction communication */ 188 #define QM_IFC_READY_STATUS 0x100128 189 #define QM_IFC_C_STS_M 0x10012C 190 #define QM_IFC_INT_SET_P 0x100130 191 #define QM_IFC_INT_CFG 0x100134 192 #define QM_IFC_INT_SOURCE_P 0x100138 193 #define QM_IFC_INT_SOURCE_V 0x0020 194 #define QM_IFC_INT_MASK 0x0024 195 #define QM_IFC_INT_STATUS 0x0028 196 #define QM_IFC_INT_SET_V 0x002C 197 #define QM_IFC_SEND_ALL_VFS GENMASK(6, 0) 198 #define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0) 199 #define QM_IFC_INT_SOURCE_MASK BIT(0) 200 #define QM_IFC_INT_DISABLE BIT(0) 201 #define QM_IFC_INT_STATUS_MASK BIT(0) 202 #define QM_IFC_INT_SET_MASK BIT(0) 203 #define QM_WAIT_DST_ACK 10 204 #define QM_MAX_PF_WAIT_COUNT 10 205 #define QM_MAX_VF_WAIT_COUNT 40 206 #define QM_VF_RESET_WAIT_US 20000 207 #define QM_VF_RESET_WAIT_CNT 3000 208 #define QM_VF_RESET_WAIT_TIMEOUT_US \ 209 (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT) 210 211 #define QM_DFX_MB_CNT_VF 0x104010 212 #define QM_DFX_DB_CNT_VF 0x104020 213 #define QM_DFX_SQE_CNT_VF_SQN 0x104030 214 #define QM_DFX_CQE_CNT_VF_CQN 0x104040 215 #define QM_DFX_QN_SHIFT 16 216 #define CURRENT_FUN_MASK GENMASK(5, 0) 217 #define CURRENT_Q_MASK GENMASK(31, 16) 218 219 #define POLL_PERIOD 10 220 #define POLL_TIMEOUT 1000 221 #define WAIT_PERIOD_US_MAX 200 222 #define WAIT_PERIOD_US_MIN 100 223 #define MAX_WAIT_COUNTS 1000 224 #define QM_CACHE_WB_START 0x204 225 #define QM_CACHE_WB_DONE 0x208 226 227 #define PCI_BAR_2 2 228 #define PCI_BAR_4 4 229 #define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0) 230 #define QMC_ALIGN(sz) ALIGN(sz, 32) 231 232 #define QM_DBG_READ_LEN 256 233 #define QM_DBG_WRITE_LEN 1024 234 #define QM_DBG_TMP_BUF_LEN 22 235 #define QM_PCI_COMMAND_INVALID ~0 236 237 #define WAIT_PERIOD 20 238 #define REMOVE_WAIT_DELAY 10 239 #define QM_SQE_ADDR_MASK GENMASK(7, 0) 240 #define QM_EQ_DEPTH (1024 * 2) 241 242 #define QM_DRIVER_REMOVING 0 243 #define QM_RST_SCHED 1 244 #define QM_RESETTING 2 245 #define QM_QOS_PARAM_NUM 2 246 #define QM_QOS_VAL_NUM 1 247 #define QM_QOS_BDF_PARAM_NUM 4 248 #define QM_QOS_MAX_VAL 1000 249 #define QM_QOS_RATE 100 250 #define QM_QOS_EXPAND_RATE 1000 251 #define QM_SHAPER_CIR_B_MASK GENMASK(7, 0) 252 #define QM_SHAPER_CIR_U_MASK GENMASK(10, 8) 253 #define QM_SHAPER_CIR_S_MASK GENMASK(14, 11) 254 #define QM_SHAPER_FACTOR_CIR_U_SHIFT 8 255 #define QM_SHAPER_FACTOR_CIR_S_SHIFT 11 256 #define QM_SHAPER_FACTOR_CBS_B_SHIFT 15 257 #define QM_SHAPER_FACTOR_CBS_S_SHIFT 19 258 #define QM_SHAPER_CBS_B 1 259 #define QM_SHAPER_CBS_S 16 260 #define QM_SHAPER_VFT_OFFSET 6 261 #define WAIT_FOR_QOS_VF 100 262 #define QM_QOS_MIN_ERROR_RATE 5 263 #define QM_QOS_TYPICAL_NUM 8 264 #define QM_SHAPER_MIN_CBS_S 8 265 #define QM_QOS_TICK 0x300U 266 #define QM_QOS_DIVISOR_CLK 0x1f40U 267 #define QM_QOS_MAX_CIR_B 200 268 #define QM_QOS_MIN_CIR_B 100 269 #define QM_QOS_MAX_CIR_U 6 270 #define QM_QOS_MAX_CIR_S 11 271 #define QM_QOS_VAL_MAX_LEN 32 272 273 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ 274 (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ 275 ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \ 276 ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \ 277 ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) 278 279 #define QM_MK_CQC_DW3_V2(cqe_sz) \ 280 ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) 281 282 #define QM_MK_SQC_W13(priority, orders, alg_type) \ 283 (((priority) << QM_SQ_PRIORITY_SHIFT) | \ 284 ((orders) << QM_SQ_ORDERS_SHIFT) | \ 285 (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT)) 286 287 #define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \ 288 (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \ 289 ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \ 290 ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \ 291 ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) 292 293 #define QM_MK_SQC_DW3_V2(sqe_sz) \ 294 ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) 295 296 #define INIT_QC_COMMON(qc, base, pasid) do { \ 297 (qc)->head = 0; \ 298 (qc)->tail = 0; \ 299 (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \ 300 (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \ 301 (qc)->dw3 = 0; \ 302 (qc)->w8 = 0; \ 303 (qc)->rsvd0 = 0; \ 304 (qc)->pasid = cpu_to_le16(pasid); \ 305 (qc)->w11 = 0; \ 306 (qc)->rsvd1 = 0; \ 307 } while (0) 308 309 enum vft_type { 310 SQC_VFT = 0, 311 CQC_VFT, 312 SHAPER_VFT, 313 }; 314 315 enum acc_err_result { 316 ACC_ERR_NONE, 317 ACC_ERR_NEED_RESET, 318 ACC_ERR_RECOVERED, 319 }; 320 321 enum qm_alg_type { 322 ALG_TYPE_0, 323 ALG_TYPE_1, 324 }; 325 326 enum qm_mb_cmd { 327 QM_PF_FLR_PREPARE = 0x01, 328 QM_PF_SRST_PREPARE, 329 QM_PF_RESET_DONE, 330 QM_VF_PREPARE_DONE, 331 QM_VF_PREPARE_FAIL, 332 QM_VF_START_DONE, 333 QM_VF_START_FAIL, 334 QM_PF_SET_QOS, 335 QM_VF_GET_QOS, 336 }; 337 338 struct qm_cqe { 339 __le32 rsvd0; 340 __le16 cmd_id; 341 __le16 rsvd1; 342 __le16 sq_head; 343 __le16 sq_num; 344 __le16 rsvd2; 345 __le16 w7; 346 }; 347 348 struct qm_eqe { 349 __le32 dw0; 350 }; 351 352 struct qm_aeqe { 353 __le32 dw0; 354 }; 355 356 struct qm_sqc { 357 __le16 head; 358 __le16 tail; 359 __le32 base_l; 360 __le32 base_h; 361 __le32 dw3; 362 __le16 w8; 363 __le16 rsvd0; 364 __le16 pasid; 365 __le16 w11; 366 __le16 cq_num; 367 __le16 w13; 368 __le32 rsvd1; 369 }; 370 371 struct qm_cqc { 372 __le16 head; 373 __le16 tail; 374 __le32 base_l; 375 __le32 base_h; 376 __le32 dw3; 377 __le16 w8; 378 __le16 rsvd0; 379 __le16 pasid; 380 __le16 w11; 381 __le32 dw6; 382 __le32 rsvd1; 383 }; 384 385 struct qm_eqc { 386 __le16 head; 387 __le16 tail; 388 __le32 base_l; 389 __le32 base_h; 390 __le32 dw3; 391 __le32 rsvd[2]; 392 __le32 dw6; 393 }; 394 395 struct qm_aeqc { 396 __le16 head; 397 __le16 tail; 398 __le32 base_l; 399 __le32 base_h; 400 __le32 dw3; 401 __le32 rsvd[2]; 402 __le32 dw6; 403 }; 404 405 struct qm_mailbox { 406 __le16 w0; 407 __le16 queue_num; 408 __le32 base_l; 409 __le32 base_h; 410 __le32 rsvd; 411 }; 412 413 struct qm_doorbell { 414 __le16 queue_num; 415 __le16 cmd; 416 __le16 index; 417 __le16 priority; 418 }; 419 420 struct hisi_qm_resource { 421 struct hisi_qm *qm; 422 int distance; 423 struct list_head list; 424 }; 425 426 struct hisi_qm_hw_ops { 427 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number); 428 void (*qm_db)(struct hisi_qm *qm, u16 qn, 429 u8 cmd, u16 index, u8 priority); 430 u32 (*get_irq_num)(struct hisi_qm *qm); 431 int (*debug_init)(struct hisi_qm *qm); 432 void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe); 433 void (*hw_error_uninit)(struct hisi_qm *qm); 434 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); 435 int (*stop_qp)(struct hisi_qp *qp); 436 int (*set_msi)(struct hisi_qm *qm, bool set); 437 int (*ping_all_vfs)(struct hisi_qm *qm, u64 cmd); 438 int (*ping_pf)(struct hisi_qm *qm, u64 cmd); 439 }; 440 441 struct qm_dfx_item { 442 const char *name; 443 u32 offset; 444 }; 445 446 static struct qm_dfx_item qm_dfx_files[] = { 447 {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)}, 448 {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)}, 449 {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)}, 450 {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)}, 451 {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)}, 452 }; 453 454 static const char * const qm_debug_file_name[] = { 455 [CURRENT_QM] = "current_qm", 456 [CURRENT_Q] = "current_q", 457 [CLEAR_ENABLE] = "clear_enable", 458 }; 459 460 struct hisi_qm_hw_error { 461 u32 int_msk; 462 const char *msg; 463 }; 464 465 static const struct hisi_qm_hw_error qm_hw_error[] = { 466 { .int_msk = BIT(0), .msg = "qm_axi_rresp" }, 467 { .int_msk = BIT(1), .msg = "qm_axi_bresp" }, 468 { .int_msk = BIT(2), .msg = "qm_ecc_mbit" }, 469 { .int_msk = BIT(3), .msg = "qm_ecc_1bit" }, 470 { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" }, 471 { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" }, 472 { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" }, 473 { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" }, 474 { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" }, 475 { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" }, 476 { .int_msk = BIT(10), .msg = "qm_db_timeout" }, 477 { .int_msk = BIT(11), .msg = "qm_of_fifo_of" }, 478 { .int_msk = BIT(12), .msg = "qm_db_random_invalid" }, 479 { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" }, 480 { .int_msk = BIT(14), .msg = "qm_flr_timeout" }, 481 { /* sentinel */ } 482 }; 483 484 static const char * const qm_db_timeout[] = { 485 "sq", "cq", "eq", "aeq", 486 }; 487 488 static const char * const qm_fifo_overflow[] = { 489 "cq", "eq", "aeq", 490 }; 491 492 static const char * const qm_s[] = { 493 "init", "start", "close", "stop", 494 }; 495 496 static const char * const qp_s[] = { 497 "none", "init", "start", "stop", "close", 498 }; 499 500 static const u32 typical_qos_val[QM_QOS_TYPICAL_NUM] = {100, 250, 500, 1000, 501 10000, 25000, 50000, 100000}; 502 static const u32 typical_qos_cbs_s[QM_QOS_TYPICAL_NUM] = {9, 10, 11, 12, 16, 503 17, 18, 19}; 504 505 static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new) 506 { 507 enum qm_state curr = atomic_read(&qm->status.flags); 508 bool avail = false; 509 510 switch (curr) { 511 case QM_INIT: 512 if (new == QM_START || new == QM_CLOSE) 513 avail = true; 514 break; 515 case QM_START: 516 if (new == QM_STOP) 517 avail = true; 518 break; 519 case QM_STOP: 520 if (new == QM_CLOSE || new == QM_START) 521 avail = true; 522 break; 523 default: 524 break; 525 } 526 527 dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n", 528 qm_s[curr], qm_s[new]); 529 530 if (!avail) 531 dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n", 532 qm_s[curr], qm_s[new]); 533 534 return avail; 535 } 536 537 static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp, 538 enum qp_state new) 539 { 540 enum qm_state qm_curr = atomic_read(&qm->status.flags); 541 enum qp_state qp_curr = 0; 542 bool avail = false; 543 544 if (qp) 545 qp_curr = atomic_read(&qp->qp_status.flags); 546 547 switch (new) { 548 case QP_INIT: 549 if (qm_curr == QM_START || qm_curr == QM_INIT) 550 avail = true; 551 break; 552 case QP_START: 553 if ((qm_curr == QM_START && qp_curr == QP_INIT) || 554 (qm_curr == QM_START && qp_curr == QP_STOP)) 555 avail = true; 556 break; 557 case QP_STOP: 558 if ((qm_curr == QM_START && qp_curr == QP_START) || 559 (qp_curr == QP_INIT)) 560 avail = true; 561 break; 562 case QP_CLOSE: 563 if ((qm_curr == QM_START && qp_curr == QP_INIT) || 564 (qm_curr == QM_START && qp_curr == QP_STOP) || 565 (qm_curr == QM_STOP && qp_curr == QP_STOP) || 566 (qm_curr == QM_STOP && qp_curr == QP_INIT)) 567 avail = true; 568 break; 569 default: 570 break; 571 } 572 573 dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n", 574 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]); 575 576 if (!avail) 577 dev_warn(&qm->pdev->dev, 578 "Can not change qp state from %s to %s in QM %s\n", 579 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]); 580 581 return avail; 582 } 583 584 static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd, 585 u64 base, u16 queue, bool op) 586 { 587 mailbox->w0 = cpu_to_le16((cmd) | 588 ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) | 589 (0x1 << QM_MB_BUSY_SHIFT)); 590 mailbox->queue_num = cpu_to_le16(queue); 591 mailbox->base_l = cpu_to_le32(lower_32_bits(base)); 592 mailbox->base_h = cpu_to_le32(upper_32_bits(base)); 593 mailbox->rsvd = 0; 594 } 595 596 /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ 597 static int qm_wait_mb_ready(struct hisi_qm *qm) 598 { 599 u32 val; 600 601 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, 602 val, !((val >> QM_MB_BUSY_SHIFT) & 603 0x1), POLL_PERIOD, POLL_TIMEOUT); 604 } 605 606 /* 128 bit should be written to hardware at one time to trigger a mailbox */ 607 static void qm_mb_write(struct hisi_qm *qm, const void *src) 608 { 609 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; 610 unsigned long tmp0 = 0, tmp1 = 0; 611 612 if (!IS_ENABLED(CONFIG_ARM64)) { 613 memcpy_toio(fun_base, src, 16); 614 wmb(); 615 return; 616 } 617 618 asm volatile("ldp %0, %1, %3\n" 619 "stp %0, %1, %2\n" 620 "dsb sy\n" 621 : "=&r" (tmp0), 622 "=&r" (tmp1), 623 "+Q" (*((char __iomem *)fun_base)) 624 : "Q" (*((char *)src)) 625 : "memory"); 626 } 627 628 static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) 629 { 630 if (unlikely(qm_wait_mb_ready(qm))) { 631 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); 632 goto mb_busy; 633 } 634 635 qm_mb_write(qm, mailbox); 636 637 if (unlikely(qm_wait_mb_ready(qm))) { 638 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); 639 goto mb_busy; 640 } 641 642 return 0; 643 644 mb_busy: 645 atomic64_inc(&qm->debug.dfx.mb_err_cnt); 646 return -EBUSY; 647 } 648 649 static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, 650 bool op) 651 { 652 struct qm_mailbox mailbox; 653 int ret; 654 655 dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n", 656 queue, cmd, (unsigned long long)dma_addr); 657 658 qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op); 659 660 mutex_lock(&qm->mailbox_lock); 661 ret = qm_mb_nolock(qm, &mailbox); 662 mutex_unlock(&qm->mailbox_lock); 663 664 return ret; 665 } 666 667 static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 668 { 669 u64 doorbell; 670 671 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) | 672 ((u64)index << QM_DB_INDEX_SHIFT_V1) | 673 ((u64)priority << QM_DB_PRIORITY_SHIFT_V1); 674 675 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1); 676 } 677 678 static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 679 { 680 void __iomem *io_base = qm->io_base; 681 u16 randata = 0; 682 u64 doorbell; 683 684 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ) 685 io_base = qm->db_io_base + (u64)qn * qm->db_interval + 686 QM_DOORBELL_SQ_CQ_BASE_V2; 687 else 688 io_base += QM_DOORBELL_EQ_AEQ_BASE_V2; 689 690 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) | 691 ((u64)randata << QM_DB_RAND_SHIFT_V2) | 692 ((u64)index << QM_DB_INDEX_SHIFT_V2) | 693 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2); 694 695 writeq(doorbell, io_base); 696 } 697 698 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 699 { 700 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n", 701 qn, cmd, index); 702 703 qm->ops->qm_db(qm, qn, cmd, index, priority); 704 } 705 706 static int qm_dev_mem_reset(struct hisi_qm *qm) 707 { 708 u32 val; 709 710 writel(0x1, qm->io_base + QM_MEM_START_INIT); 711 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, 712 val & BIT(0), POLL_PERIOD, 713 POLL_TIMEOUT); 714 } 715 716 static u32 qm_get_irq_num_v1(struct hisi_qm *qm) 717 { 718 return QM_IRQ_NUM_V1; 719 } 720 721 static u32 qm_get_irq_num_v2(struct hisi_qm *qm) 722 { 723 if (qm->fun_type == QM_HW_PF) 724 return QM_IRQ_NUM_PF_V2; 725 else 726 return QM_IRQ_NUM_VF_V2; 727 } 728 729 static u32 qm_get_irq_num_v3(struct hisi_qm *qm) 730 { 731 if (qm->fun_type == QM_HW_PF) 732 return QM_IRQ_NUM_PF_V2; 733 734 return QM_IRQ_NUM_VF_V3; 735 } 736 737 static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe) 738 { 739 u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; 740 741 return &qm->qp_array[cqn]; 742 } 743 744 static void qm_cq_head_update(struct hisi_qp *qp) 745 { 746 if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) { 747 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase; 748 qp->qp_status.cq_head = 0; 749 } else { 750 qp->qp_status.cq_head++; 751 } 752 } 753 754 static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm) 755 { 756 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) 757 return; 758 759 if (qp->event_cb) { 760 qp->event_cb(qp); 761 return; 762 } 763 764 if (qp->req_cb) { 765 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; 766 767 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { 768 dma_rmb(); 769 qp->req_cb(qp, qp->sqe + qm->sqe_size * 770 le16_to_cpu(cqe->sq_head)); 771 qm_cq_head_update(qp); 772 cqe = qp->cqe + qp->qp_status.cq_head; 773 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, 774 qp->qp_status.cq_head, 0); 775 atomic_dec(&qp->qp_status.used); 776 } 777 778 /* set c_flag */ 779 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, 780 qp->qp_status.cq_head, 1); 781 } 782 } 783 784 static void qm_work_process(struct work_struct *work) 785 { 786 struct hisi_qm *qm = container_of(work, struct hisi_qm, work); 787 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; 788 struct hisi_qp *qp; 789 int eqe_num = 0; 790 791 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { 792 eqe_num++; 793 qp = qm_to_hisi_qp(qm, eqe); 794 qm_poll_qp(qp, qm); 795 796 if (qm->status.eq_head == QM_EQ_DEPTH - 1) { 797 qm->status.eqc_phase = !qm->status.eqc_phase; 798 eqe = qm->eqe; 799 qm->status.eq_head = 0; 800 } else { 801 eqe++; 802 qm->status.eq_head++; 803 } 804 805 if (eqe_num == QM_EQ_DEPTH / 2 - 1) { 806 eqe_num = 0; 807 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 808 } 809 } 810 811 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 812 } 813 814 static irqreturn_t do_qm_irq(int irq, void *data) 815 { 816 struct hisi_qm *qm = (struct hisi_qm *)data; 817 818 /* the workqueue created by device driver of QM */ 819 if (qm->wq) 820 queue_work(qm->wq, &qm->work); 821 else 822 schedule_work(&qm->work); 823 824 return IRQ_HANDLED; 825 } 826 827 static irqreturn_t qm_irq(int irq, void *data) 828 { 829 struct hisi_qm *qm = data; 830 831 if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE)) 832 return do_qm_irq(irq, data); 833 834 atomic64_inc(&qm->debug.dfx.err_irq_cnt); 835 dev_err(&qm->pdev->dev, "invalid int source\n"); 836 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 837 838 return IRQ_NONE; 839 } 840 841 static irqreturn_t qm_mb_cmd_irq(int irq, void *data) 842 { 843 struct hisi_qm *qm = data; 844 u32 val; 845 846 val = readl(qm->io_base + QM_IFC_INT_STATUS); 847 val &= QM_IFC_INT_STATUS_MASK; 848 if (!val) 849 return IRQ_NONE; 850 851 schedule_work(&qm->cmd_process); 852 853 return IRQ_HANDLED; 854 } 855 856 static irqreturn_t qm_aeq_irq(int irq, void *data) 857 { 858 struct hisi_qm *qm = data; 859 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; 860 u32 type; 861 862 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); 863 if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE)) 864 return IRQ_NONE; 865 866 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { 867 type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT; 868 if (type < ARRAY_SIZE(qm_fifo_overflow)) 869 dev_err(&qm->pdev->dev, "%s overflow\n", 870 qm_fifo_overflow[type]); 871 else 872 dev_err(&qm->pdev->dev, "unknown error type %u\n", 873 type); 874 875 if (qm->status.aeq_head == QM_Q_DEPTH - 1) { 876 qm->status.aeqc_phase = !qm->status.aeqc_phase; 877 aeqe = qm->aeqe; 878 qm->status.aeq_head = 0; 879 } else { 880 aeqe++; 881 qm->status.aeq_head++; 882 } 883 884 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); 885 } 886 887 return IRQ_HANDLED; 888 } 889 890 static void qm_irq_unregister(struct hisi_qm *qm) 891 { 892 struct pci_dev *pdev = qm->pdev; 893 894 free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); 895 896 if (qm->ver > QM_HW_V1) { 897 free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); 898 899 if (qm->fun_type == QM_HW_PF) 900 free_irq(pci_irq_vector(pdev, 901 QM_ABNORMAL_EVENT_IRQ_VECTOR), qm); 902 } 903 904 if (qm->ver > QM_HW_V2) 905 free_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR), qm); 906 } 907 908 static void qm_init_qp_status(struct hisi_qp *qp) 909 { 910 struct hisi_qp_status *qp_status = &qp->qp_status; 911 912 qp_status->sq_tail = 0; 913 qp_status->cq_head = 0; 914 qp_status->cqc_phase = true; 915 atomic_set(&qp_status->used, 0); 916 } 917 918 static void qm_init_prefetch(struct hisi_qm *qm) 919 { 920 struct device *dev = &qm->pdev->dev; 921 u32 page_type = 0x0; 922 923 if (qm->ver < QM_HW_V3) 924 return; 925 926 switch (PAGE_SIZE) { 927 case SZ_4K: 928 page_type = 0x0; 929 break; 930 case SZ_16K: 931 page_type = 0x1; 932 break; 933 case SZ_64K: 934 page_type = 0x2; 935 break; 936 default: 937 dev_err(dev, "system page size is not support: %lu, default set to 4KB", 938 PAGE_SIZE); 939 } 940 941 writel(page_type, qm->io_base + QM_PAGE_SIZE); 942 } 943 944 /* 945 * the formula: 946 * IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps 947 * 948 * IR_b * (2 ^ IR_u) * 8 949 * IR(Mbps) * 10 ^ -3 = ------------------------- 950 * Tick * (2 ^ IR_s) 951 */ 952 static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s) 953 { 954 return ((cir_b * QM_QOS_DIVISOR_CLK) * (1 << cir_u)) / 955 (QM_QOS_TICK * (1 << cir_s)); 956 } 957 958 static u32 acc_shaper_calc_cbs_s(u32 ir) 959 { 960 int i; 961 962 if (ir < typical_qos_val[0]) 963 return QM_SHAPER_MIN_CBS_S; 964 965 for (i = 1; i < QM_QOS_TYPICAL_NUM; i++) { 966 if (ir >= typical_qos_val[i - 1] && ir < typical_qos_val[i]) 967 return typical_qos_cbs_s[i - 1]; 968 } 969 970 return typical_qos_cbs_s[QM_QOS_TYPICAL_NUM - 1]; 971 } 972 973 static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor) 974 { 975 u32 cir_b, cir_u, cir_s, ir_calc; 976 u32 error_rate; 977 978 factor->cbs_s = acc_shaper_calc_cbs_s(ir); 979 980 for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) { 981 for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) { 982 for (cir_s = 0; cir_s <= QM_QOS_MAX_CIR_S; cir_s++) { 983 /** the formula is changed to: 984 * IR_b * (2 ^ IR_u) * DIVISOR_CLK 985 * IR(Mbps) = ------------------------- 986 * 768 * (2 ^ IR_s) 987 */ 988 ir_calc = acc_shaper_para_calc(cir_b, cir_u, 989 cir_s); 990 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; 991 if (error_rate <= QM_QOS_MIN_ERROR_RATE) { 992 factor->cir_b = cir_b; 993 factor->cir_u = cir_u; 994 factor->cir_s = cir_s; 995 996 return 0; 997 } 998 } 999 } 1000 } 1001 1002 return -EINVAL; 1003 } 1004 1005 static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, 1006 u32 number, struct qm_shaper_factor *factor) 1007 { 1008 u64 tmp = 0; 1009 1010 if (number > 0) { 1011 switch (type) { 1012 case SQC_VFT: 1013 if (qm->ver == QM_HW_V1) { 1014 tmp = QM_SQC_VFT_BUF_SIZE | 1015 QM_SQC_VFT_SQC_SIZE | 1016 QM_SQC_VFT_INDEX_NUMBER | 1017 QM_SQC_VFT_VALID | 1018 (u64)base << QM_SQC_VFT_START_SQN_SHIFT; 1019 } else { 1020 tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT | 1021 QM_SQC_VFT_VALID | 1022 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT; 1023 } 1024 break; 1025 case CQC_VFT: 1026 if (qm->ver == QM_HW_V1) { 1027 tmp = QM_CQC_VFT_BUF_SIZE | 1028 QM_CQC_VFT_SQC_SIZE | 1029 QM_CQC_VFT_INDEX_NUMBER | 1030 QM_CQC_VFT_VALID; 1031 } else { 1032 tmp = QM_CQC_VFT_VALID; 1033 } 1034 break; 1035 case SHAPER_VFT: 1036 if (qm->ver >= QM_HW_V3) { 1037 tmp = factor->cir_b | 1038 (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) | 1039 (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) | 1040 (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) | 1041 (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT); 1042 } 1043 break; 1044 } 1045 } 1046 1047 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); 1048 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); 1049 } 1050 1051 static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, 1052 u32 fun_num, u32 base, u32 number) 1053 { 1054 struct qm_shaper_factor *factor = &qm->factor[fun_num]; 1055 unsigned int val; 1056 int ret; 1057 1058 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 1059 val & BIT(0), POLL_PERIOD, 1060 POLL_TIMEOUT); 1061 if (ret) 1062 return ret; 1063 1064 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); 1065 writel(type, qm->io_base + QM_VFT_CFG_TYPE); 1066 if (type == SHAPER_VFT) 1067 fun_num |= base << QM_SHAPER_VFT_OFFSET; 1068 1069 writel(fun_num, qm->io_base + QM_VFT_CFG); 1070 1071 qm_vft_data_cfg(qm, type, base, number, factor); 1072 1073 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); 1074 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); 1075 1076 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 1077 val & BIT(0), POLL_PERIOD, 1078 POLL_TIMEOUT); 1079 } 1080 1081 static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num) 1082 { 1083 int ret, i; 1084 1085 qm->factor[fun_num].func_qos = QM_QOS_MAX_VAL; 1086 ret = qm_get_shaper_para(QM_QOS_MAX_VAL * QM_QOS_RATE, &qm->factor[fun_num]); 1087 if (ret) { 1088 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n"); 1089 return ret; 1090 } 1091 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG); 1092 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) { 1093 /* The base number of queue reuse for different alg type */ 1094 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1); 1095 if (ret) 1096 return ret; 1097 } 1098 1099 return 0; 1100 } 1101 1102 /* The config should be conducted after qm_dev_mem_reset() */ 1103 static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, 1104 u32 number) 1105 { 1106 int ret, i; 1107 1108 for (i = SQC_VFT; i <= CQC_VFT; i++) { 1109 ret = qm_set_vft_common(qm, i, fun_num, base, number); 1110 if (ret) 1111 return ret; 1112 } 1113 1114 /* init default shaper qos val */ 1115 if (qm->ver >= QM_HW_V3) { 1116 ret = qm_shaper_init_vft(qm, fun_num); 1117 if (ret) 1118 goto back_sqc_cqc; 1119 } 1120 1121 return 0; 1122 back_sqc_cqc: 1123 for (i = SQC_VFT; i <= CQC_VFT; i++) { 1124 ret = qm_set_vft_common(qm, i, fun_num, 0, 0); 1125 if (ret) 1126 return ret; 1127 } 1128 return ret; 1129 } 1130 1131 static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) 1132 { 1133 u64 sqc_vft; 1134 int ret; 1135 1136 ret = qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); 1137 if (ret) 1138 return ret; 1139 1140 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | 1141 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); 1142 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); 1143 *number = (QM_SQC_VFT_NUM_MASK_v2 & 1144 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; 1145 1146 return 0; 1147 } 1148 1149 static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num) 1150 { 1151 u32 remain_q_num, vfq_num; 1152 u32 num_vfs = qm->vfs_num; 1153 1154 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs; 1155 if (vfq_num >= qm->max_qp_num) 1156 return qm->max_qp_num; 1157 1158 remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs; 1159 if (vfq_num + remain_q_num <= qm->max_qp_num) 1160 return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num; 1161 1162 /* 1163 * if vfq_num + remain_q_num > max_qp_num, the last VFs, 1164 * each with one more queue. 1165 */ 1166 return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num; 1167 } 1168 1169 static struct hisi_qm *file_to_qm(struct debugfs_file *file) 1170 { 1171 struct qm_debug *debug = file->debug; 1172 1173 return container_of(debug, struct hisi_qm, debug); 1174 } 1175 1176 static u32 current_q_read(struct debugfs_file *file) 1177 { 1178 struct hisi_qm *qm = file_to_qm(file); 1179 1180 return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT; 1181 } 1182 1183 static int current_q_write(struct debugfs_file *file, u32 val) 1184 { 1185 struct hisi_qm *qm = file_to_qm(file); 1186 u32 tmp; 1187 1188 if (val >= qm->debug.curr_qm_qp_num) 1189 return -EINVAL; 1190 1191 tmp = val << QM_DFX_QN_SHIFT | 1192 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK); 1193 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); 1194 1195 tmp = val << QM_DFX_QN_SHIFT | 1196 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK); 1197 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); 1198 1199 return 0; 1200 } 1201 1202 static u32 clear_enable_read(struct debugfs_file *file) 1203 { 1204 struct hisi_qm *qm = file_to_qm(file); 1205 1206 return readl(qm->io_base + QM_DFX_CNT_CLR_CE); 1207 } 1208 1209 /* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */ 1210 static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl) 1211 { 1212 struct hisi_qm *qm = file_to_qm(file); 1213 1214 if (rd_clr_ctrl > 1) 1215 return -EINVAL; 1216 1217 writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE); 1218 1219 return 0; 1220 } 1221 1222 static u32 current_qm_read(struct debugfs_file *file) 1223 { 1224 struct hisi_qm *qm = file_to_qm(file); 1225 1226 return readl(qm->io_base + QM_DFX_MB_CNT_VF); 1227 } 1228 1229 static int current_qm_write(struct debugfs_file *file, u32 val) 1230 { 1231 struct hisi_qm *qm = file_to_qm(file); 1232 u32 tmp; 1233 1234 if (val > qm->vfs_num) 1235 return -EINVAL; 1236 1237 /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ 1238 if (!val) 1239 qm->debug.curr_qm_qp_num = qm->qp_num; 1240 else 1241 qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val); 1242 1243 writel(val, qm->io_base + QM_DFX_MB_CNT_VF); 1244 writel(val, qm->io_base + QM_DFX_DB_CNT_VF); 1245 1246 tmp = val | 1247 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); 1248 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); 1249 1250 tmp = val | 1251 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); 1252 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); 1253 1254 return 0; 1255 } 1256 1257 static ssize_t qm_debug_read(struct file *filp, char __user *buf, 1258 size_t count, loff_t *pos) 1259 { 1260 struct debugfs_file *file = filp->private_data; 1261 enum qm_debug_file index = file->index; 1262 char tbuf[QM_DBG_TMP_BUF_LEN]; 1263 u32 val; 1264 int ret; 1265 1266 mutex_lock(&file->lock); 1267 switch (index) { 1268 case CURRENT_QM: 1269 val = current_qm_read(file); 1270 break; 1271 case CURRENT_Q: 1272 val = current_q_read(file); 1273 break; 1274 case CLEAR_ENABLE: 1275 val = clear_enable_read(file); 1276 break; 1277 default: 1278 mutex_unlock(&file->lock); 1279 return -EINVAL; 1280 } 1281 mutex_unlock(&file->lock); 1282 1283 ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val); 1284 return simple_read_from_buffer(buf, count, pos, tbuf, ret); 1285 } 1286 1287 static ssize_t qm_debug_write(struct file *filp, const char __user *buf, 1288 size_t count, loff_t *pos) 1289 { 1290 struct debugfs_file *file = filp->private_data; 1291 enum qm_debug_file index = file->index; 1292 unsigned long val; 1293 char tbuf[QM_DBG_TMP_BUF_LEN]; 1294 int len, ret; 1295 1296 if (*pos != 0) 1297 return 0; 1298 1299 if (count >= QM_DBG_TMP_BUF_LEN) 1300 return -ENOSPC; 1301 1302 len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf, 1303 count); 1304 if (len < 0) 1305 return len; 1306 1307 tbuf[len] = '\0'; 1308 if (kstrtoul(tbuf, 0, &val)) 1309 return -EFAULT; 1310 1311 mutex_lock(&file->lock); 1312 switch (index) { 1313 case CURRENT_QM: 1314 ret = current_qm_write(file, val); 1315 break; 1316 case CURRENT_Q: 1317 ret = current_q_write(file, val); 1318 break; 1319 case CLEAR_ENABLE: 1320 ret = clear_enable_write(file, val); 1321 break; 1322 default: 1323 ret = -EINVAL; 1324 } 1325 mutex_unlock(&file->lock); 1326 1327 if (ret) 1328 return ret; 1329 1330 return count; 1331 } 1332 1333 static const struct file_operations qm_debug_fops = { 1334 .owner = THIS_MODULE, 1335 .open = simple_open, 1336 .read = qm_debug_read, 1337 .write = qm_debug_write, 1338 }; 1339 1340 struct qm_dfx_registers { 1341 char *reg_name; 1342 u64 reg_offset; 1343 }; 1344 1345 #define CNT_CYC_REGS_NUM 10 1346 static struct qm_dfx_registers qm_dfx_regs[] = { 1347 /* XXX_CNT are reading clear register */ 1348 {"QM_ECC_1BIT_CNT ", 0x104000ull}, 1349 {"QM_ECC_MBIT_CNT ", 0x104008ull}, 1350 {"QM_DFX_MB_CNT ", 0x104018ull}, 1351 {"QM_DFX_DB_CNT ", 0x104028ull}, 1352 {"QM_DFX_SQE_CNT ", 0x104038ull}, 1353 {"QM_DFX_CQE_CNT ", 0x104048ull}, 1354 {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull}, 1355 {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull}, 1356 {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull}, 1357 {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull}, 1358 {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull}, 1359 {"QM_ECC_1BIT_INF ", 0x104004ull}, 1360 {"QM_ECC_MBIT_INF ", 0x10400cull}, 1361 {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull}, 1362 {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull}, 1363 {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull}, 1364 {"QM_DFX_FF_ST0 ", 0x1040c8ull}, 1365 {"QM_DFX_FF_ST1 ", 0x1040ccull}, 1366 {"QM_DFX_FF_ST2 ", 0x1040d0ull}, 1367 {"QM_DFX_FF_ST3 ", 0x1040d4ull}, 1368 {"QM_DFX_FF_ST4 ", 0x1040d8ull}, 1369 {"QM_DFX_FF_ST5 ", 0x1040dcull}, 1370 {"QM_DFX_FF_ST6 ", 0x1040e0ull}, 1371 {"QM_IN_IDLE_ST ", 0x1040e4ull}, 1372 { NULL, 0} 1373 }; 1374 1375 static struct qm_dfx_registers qm_vf_dfx_regs[] = { 1376 {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull}, 1377 { NULL, 0} 1378 }; 1379 1380 static int qm_regs_show(struct seq_file *s, void *unused) 1381 { 1382 struct hisi_qm *qm = s->private; 1383 struct qm_dfx_registers *regs; 1384 u32 val; 1385 1386 if (qm->fun_type == QM_HW_PF) 1387 regs = qm_dfx_regs; 1388 else 1389 regs = qm_vf_dfx_regs; 1390 1391 while (regs->reg_name) { 1392 val = readl(qm->io_base + regs->reg_offset); 1393 seq_printf(s, "%s= 0x%08x\n", regs->reg_name, val); 1394 regs++; 1395 } 1396 1397 return 0; 1398 } 1399 1400 DEFINE_SHOW_ATTRIBUTE(qm_regs); 1401 1402 static ssize_t qm_cmd_read(struct file *filp, char __user *buffer, 1403 size_t count, loff_t *pos) 1404 { 1405 char buf[QM_DBG_READ_LEN]; 1406 int len; 1407 1408 len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", 1409 "Please echo help to cmd to get help information"); 1410 1411 return simple_read_from_buffer(buffer, count, pos, buf, len); 1412 } 1413 1414 static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, 1415 dma_addr_t *dma_addr) 1416 { 1417 struct device *dev = &qm->pdev->dev; 1418 void *ctx_addr; 1419 1420 ctx_addr = kzalloc(ctx_size, GFP_KERNEL); 1421 if (!ctx_addr) 1422 return ERR_PTR(-ENOMEM); 1423 1424 *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE); 1425 if (dma_mapping_error(dev, *dma_addr)) { 1426 dev_err(dev, "DMA mapping error!\n"); 1427 kfree(ctx_addr); 1428 return ERR_PTR(-ENOMEM); 1429 } 1430 1431 return ctx_addr; 1432 } 1433 1434 static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, 1435 const void *ctx_addr, dma_addr_t *dma_addr) 1436 { 1437 struct device *dev = &qm->pdev->dev; 1438 1439 dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE); 1440 kfree(ctx_addr); 1441 } 1442 1443 static int dump_show(struct hisi_qm *qm, void *info, 1444 unsigned int info_size, char *info_name) 1445 { 1446 struct device *dev = &qm->pdev->dev; 1447 u8 *info_buf, *info_curr = info; 1448 u32 i; 1449 #define BYTE_PER_DW 4 1450 1451 info_buf = kzalloc(info_size, GFP_KERNEL); 1452 if (!info_buf) 1453 return -ENOMEM; 1454 1455 for (i = 0; i < info_size; i++, info_curr++) { 1456 if (i % BYTE_PER_DW == 0) 1457 info_buf[i + 3UL] = *info_curr; 1458 else if (i % BYTE_PER_DW == 1) 1459 info_buf[i + 1UL] = *info_curr; 1460 else if (i % BYTE_PER_DW == 2) 1461 info_buf[i - 1] = *info_curr; 1462 else if (i % BYTE_PER_DW == 3) 1463 info_buf[i - 3] = *info_curr; 1464 } 1465 1466 dev_info(dev, "%s DUMP\n", info_name); 1467 for (i = 0; i < info_size; i += BYTE_PER_DW) { 1468 pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW, 1469 info_buf[i], info_buf[i + 1UL], 1470 info_buf[i + 2UL], info_buf[i + 3UL]); 1471 } 1472 1473 kfree(info_buf); 1474 1475 return 0; 1476 } 1477 1478 static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) 1479 { 1480 return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); 1481 } 1482 1483 static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) 1484 { 1485 return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); 1486 } 1487 1488 static int qm_sqc_dump(struct hisi_qm *qm, const char *s) 1489 { 1490 struct device *dev = &qm->pdev->dev; 1491 struct qm_sqc *sqc, *sqc_curr; 1492 dma_addr_t sqc_dma; 1493 u32 qp_id; 1494 int ret; 1495 1496 if (!s) 1497 return -EINVAL; 1498 1499 ret = kstrtou32(s, 0, &qp_id); 1500 if (ret || qp_id >= qm->qp_num) { 1501 dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1); 1502 return -EINVAL; 1503 } 1504 1505 sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma); 1506 if (IS_ERR(sqc)) 1507 return PTR_ERR(sqc); 1508 1509 ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id); 1510 if (ret) { 1511 down_read(&qm->qps_lock); 1512 if (qm->sqc) { 1513 sqc_curr = qm->sqc + qp_id; 1514 1515 ret = dump_show(qm, sqc_curr, sizeof(*sqc), 1516 "SOFT SQC"); 1517 if (ret) 1518 dev_info(dev, "Show soft sqc failed!\n"); 1519 } 1520 up_read(&qm->qps_lock); 1521 1522 goto err_free_ctx; 1523 } 1524 1525 ret = dump_show(qm, sqc, sizeof(*sqc), "SQC"); 1526 if (ret) 1527 dev_info(dev, "Show hw sqc failed!\n"); 1528 1529 err_free_ctx: 1530 qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma); 1531 return ret; 1532 } 1533 1534 static int qm_cqc_dump(struct hisi_qm *qm, const char *s) 1535 { 1536 struct device *dev = &qm->pdev->dev; 1537 struct qm_cqc *cqc, *cqc_curr; 1538 dma_addr_t cqc_dma; 1539 u32 qp_id; 1540 int ret; 1541 1542 if (!s) 1543 return -EINVAL; 1544 1545 ret = kstrtou32(s, 0, &qp_id); 1546 if (ret || qp_id >= qm->qp_num) { 1547 dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1); 1548 return -EINVAL; 1549 } 1550 1551 cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma); 1552 if (IS_ERR(cqc)) 1553 return PTR_ERR(cqc); 1554 1555 ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id); 1556 if (ret) { 1557 down_read(&qm->qps_lock); 1558 if (qm->cqc) { 1559 cqc_curr = qm->cqc + qp_id; 1560 1561 ret = dump_show(qm, cqc_curr, sizeof(*cqc), 1562 "SOFT CQC"); 1563 if (ret) 1564 dev_info(dev, "Show soft cqc failed!\n"); 1565 } 1566 up_read(&qm->qps_lock); 1567 1568 goto err_free_ctx; 1569 } 1570 1571 ret = dump_show(qm, cqc, sizeof(*cqc), "CQC"); 1572 if (ret) 1573 dev_info(dev, "Show hw cqc failed!\n"); 1574 1575 err_free_ctx: 1576 qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma); 1577 return ret; 1578 } 1579 1580 static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size, 1581 int cmd, char *name) 1582 { 1583 struct device *dev = &qm->pdev->dev; 1584 dma_addr_t xeqc_dma; 1585 void *xeqc; 1586 int ret; 1587 1588 if (strsep(&s, " ")) { 1589 dev_err(dev, "Please do not input extra characters!\n"); 1590 return -EINVAL; 1591 } 1592 1593 xeqc = qm_ctx_alloc(qm, size, &xeqc_dma); 1594 if (IS_ERR(xeqc)) 1595 return PTR_ERR(xeqc); 1596 1597 ret = qm_mb(qm, cmd, xeqc_dma, 0, 1); 1598 if (ret) 1599 goto err_free_ctx; 1600 1601 ret = dump_show(qm, xeqc, size, name); 1602 if (ret) 1603 dev_info(dev, "Show hw %s failed!\n", name); 1604 1605 err_free_ctx: 1606 qm_ctx_free(qm, size, xeqc, &xeqc_dma); 1607 return ret; 1608 } 1609 1610 static int q_dump_param_parse(struct hisi_qm *qm, char *s, 1611 u32 *e_id, u32 *q_id) 1612 { 1613 struct device *dev = &qm->pdev->dev; 1614 unsigned int qp_num = qm->qp_num; 1615 char *presult; 1616 int ret; 1617 1618 presult = strsep(&s, " "); 1619 if (!presult) { 1620 dev_err(dev, "Please input qp number!\n"); 1621 return -EINVAL; 1622 } 1623 1624 ret = kstrtou32(presult, 0, q_id); 1625 if (ret || *q_id >= qp_num) { 1626 dev_err(dev, "Please input qp num (0-%u)", qp_num - 1); 1627 return -EINVAL; 1628 } 1629 1630 presult = strsep(&s, " "); 1631 if (!presult) { 1632 dev_err(dev, "Please input sqe number!\n"); 1633 return -EINVAL; 1634 } 1635 1636 ret = kstrtou32(presult, 0, e_id); 1637 if (ret || *e_id >= QM_Q_DEPTH) { 1638 dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1); 1639 return -EINVAL; 1640 } 1641 1642 if (strsep(&s, " ")) { 1643 dev_err(dev, "Please do not input extra characters!\n"); 1644 return -EINVAL; 1645 } 1646 1647 return 0; 1648 } 1649 1650 static int qm_sq_dump(struct hisi_qm *qm, char *s) 1651 { 1652 struct device *dev = &qm->pdev->dev; 1653 void *sqe, *sqe_curr; 1654 struct hisi_qp *qp; 1655 u32 qp_id, sqe_id; 1656 int ret; 1657 1658 ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id); 1659 if (ret) 1660 return ret; 1661 1662 sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL); 1663 if (!sqe) 1664 return -ENOMEM; 1665 1666 qp = &qm->qp_array[qp_id]; 1667 memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH); 1668 sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size); 1669 memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK, 1670 qm->debug.sqe_mask_len); 1671 1672 ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE"); 1673 if (ret) 1674 dev_info(dev, "Show sqe failed!\n"); 1675 1676 kfree(sqe); 1677 1678 return ret; 1679 } 1680 1681 static int qm_cq_dump(struct hisi_qm *qm, char *s) 1682 { 1683 struct device *dev = &qm->pdev->dev; 1684 struct qm_cqe *cqe_curr; 1685 struct hisi_qp *qp; 1686 u32 qp_id, cqe_id; 1687 int ret; 1688 1689 ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id); 1690 if (ret) 1691 return ret; 1692 1693 qp = &qm->qp_array[qp_id]; 1694 cqe_curr = qp->cqe + cqe_id; 1695 ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE"); 1696 if (ret) 1697 dev_info(dev, "Show cqe failed!\n"); 1698 1699 return ret; 1700 } 1701 1702 static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s, 1703 size_t size, char *name) 1704 { 1705 struct device *dev = &qm->pdev->dev; 1706 void *xeqe; 1707 u32 xeqe_id; 1708 int ret; 1709 1710 if (!s) 1711 return -EINVAL; 1712 1713 ret = kstrtou32(s, 0, &xeqe_id); 1714 if (ret) 1715 return -EINVAL; 1716 1717 if (!strcmp(name, "EQE") && xeqe_id >= QM_EQ_DEPTH) { 1718 dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1); 1719 return -EINVAL; 1720 } else if (!strcmp(name, "AEQE") && xeqe_id >= QM_Q_DEPTH) { 1721 dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1); 1722 return -EINVAL; 1723 } 1724 1725 down_read(&qm->qps_lock); 1726 1727 if (qm->eqe && !strcmp(name, "EQE")) { 1728 xeqe = qm->eqe + xeqe_id; 1729 } else if (qm->aeqe && !strcmp(name, "AEQE")) { 1730 xeqe = qm->aeqe + xeqe_id; 1731 } else { 1732 ret = -EINVAL; 1733 goto err_unlock; 1734 } 1735 1736 ret = dump_show(qm, xeqe, size, name); 1737 if (ret) 1738 dev_info(dev, "Show %s failed!\n", name); 1739 1740 err_unlock: 1741 up_read(&qm->qps_lock); 1742 return ret; 1743 } 1744 1745 static int qm_dbg_help(struct hisi_qm *qm, char *s) 1746 { 1747 struct device *dev = &qm->pdev->dev; 1748 1749 if (strsep(&s, " ")) { 1750 dev_err(dev, "Please do not input extra characters!\n"); 1751 return -EINVAL; 1752 } 1753 1754 dev_info(dev, "available commands:\n"); 1755 dev_info(dev, "sqc <num>\n"); 1756 dev_info(dev, "cqc <num>\n"); 1757 dev_info(dev, "eqc\n"); 1758 dev_info(dev, "aeqc\n"); 1759 dev_info(dev, "sq <num> <e>\n"); 1760 dev_info(dev, "cq <num> <e>\n"); 1761 dev_info(dev, "eq <e>\n"); 1762 dev_info(dev, "aeq <e>\n"); 1763 1764 return 0; 1765 } 1766 1767 static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf) 1768 { 1769 struct device *dev = &qm->pdev->dev; 1770 char *presult, *s, *s_tmp; 1771 int ret; 1772 1773 s = kstrdup(cmd_buf, GFP_KERNEL); 1774 if (!s) 1775 return -ENOMEM; 1776 1777 s_tmp = s; 1778 presult = strsep(&s, " "); 1779 if (!presult) { 1780 ret = -EINVAL; 1781 goto err_buffer_free; 1782 } 1783 1784 if (!strcmp(presult, "sqc")) 1785 ret = qm_sqc_dump(qm, s); 1786 else if (!strcmp(presult, "cqc")) 1787 ret = qm_cqc_dump(qm, s); 1788 else if (!strcmp(presult, "eqc")) 1789 ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc), 1790 QM_MB_CMD_EQC, "EQC"); 1791 else if (!strcmp(presult, "aeqc")) 1792 ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc), 1793 QM_MB_CMD_AEQC, "AEQC"); 1794 else if (!strcmp(presult, "sq")) 1795 ret = qm_sq_dump(qm, s); 1796 else if (!strcmp(presult, "cq")) 1797 ret = qm_cq_dump(qm, s); 1798 else if (!strcmp(presult, "eq")) 1799 ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE"); 1800 else if (!strcmp(presult, "aeq")) 1801 ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE"); 1802 else if (!strcmp(presult, "help")) 1803 ret = qm_dbg_help(qm, s); 1804 else 1805 ret = -EINVAL; 1806 1807 if (ret) 1808 dev_info(dev, "Please echo help\n"); 1809 1810 err_buffer_free: 1811 kfree(s_tmp); 1812 1813 return ret; 1814 } 1815 1816 static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer, 1817 size_t count, loff_t *pos) 1818 { 1819 struct hisi_qm *qm = filp->private_data; 1820 char *cmd_buf, *cmd_buf_tmp; 1821 int ret; 1822 1823 if (*pos) 1824 return 0; 1825 1826 /* Judge if the instance is being reset. */ 1827 if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) 1828 return 0; 1829 1830 if (count > QM_DBG_WRITE_LEN) 1831 return -ENOSPC; 1832 1833 cmd_buf = memdup_user_nul(buffer, count); 1834 if (IS_ERR(cmd_buf)) 1835 return PTR_ERR(cmd_buf); 1836 1837 cmd_buf_tmp = strchr(cmd_buf, '\n'); 1838 if (cmd_buf_tmp) { 1839 *cmd_buf_tmp = '\0'; 1840 count = cmd_buf_tmp - cmd_buf + 1; 1841 } 1842 1843 ret = qm_cmd_write_dump(qm, cmd_buf); 1844 if (ret) { 1845 kfree(cmd_buf); 1846 return ret; 1847 } 1848 1849 kfree(cmd_buf); 1850 1851 return count; 1852 } 1853 1854 static const struct file_operations qm_cmd_fops = { 1855 .owner = THIS_MODULE, 1856 .open = simple_open, 1857 .read = qm_cmd_read, 1858 .write = qm_cmd_write, 1859 }; 1860 1861 static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir, 1862 enum qm_debug_file index) 1863 { 1864 struct debugfs_file *file = qm->debug.files + index; 1865 1866 debugfs_create_file(qm_debug_file_name[index], 0600, dir, file, 1867 &qm_debug_fops); 1868 1869 file->index = index; 1870 mutex_init(&file->lock); 1871 file->debug = &qm->debug; 1872 } 1873 1874 static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) 1875 { 1876 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); 1877 } 1878 1879 static void qm_hw_error_cfg(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) 1880 { 1881 qm->error_mask = ce | nfe | fe; 1882 /* clear QM hw residual error source */ 1883 writel(QM_ABNORMAL_INT_SOURCE_CLR, 1884 qm->io_base + QM_ABNORMAL_INT_SOURCE); 1885 1886 /* configure error type */ 1887 writel(ce, qm->io_base + QM_RAS_CE_ENABLE); 1888 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); 1889 writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE); 1890 writel(fe, qm->io_base + QM_RAS_FE_ENABLE); 1891 } 1892 1893 static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) 1894 { 1895 u32 irq_enable = ce | nfe | fe; 1896 u32 irq_unmask = ~irq_enable; 1897 1898 qm_hw_error_cfg(qm, ce, nfe, fe); 1899 1900 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1901 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); 1902 } 1903 1904 static void qm_hw_error_uninit_v2(struct hisi_qm *qm) 1905 { 1906 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); 1907 } 1908 1909 static void qm_hw_error_init_v3(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) 1910 { 1911 u32 irq_enable = ce | nfe | fe; 1912 u32 irq_unmask = ~irq_enable; 1913 1914 qm_hw_error_cfg(qm, ce, nfe, fe); 1915 1916 /* enable close master ooo when hardware error happened */ 1917 writel(nfe & (~QM_DB_RANDOM_INVALID), qm->io_base + QM_OOO_SHUTDOWN_SEL); 1918 1919 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1920 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); 1921 } 1922 1923 static void qm_hw_error_uninit_v3(struct hisi_qm *qm) 1924 { 1925 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); 1926 1927 /* disable close master ooo when hardware error happened */ 1928 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL); 1929 } 1930 1931 static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) 1932 { 1933 const struct hisi_qm_hw_error *err; 1934 struct device *dev = &qm->pdev->dev; 1935 u32 reg_val, type, vf_num; 1936 int i; 1937 1938 for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) { 1939 err = &qm_hw_error[i]; 1940 if (!(err->int_msk & error_status)) 1941 continue; 1942 1943 dev_err(dev, "%s [error status=0x%x] found\n", 1944 err->msg, err->int_msk); 1945 1946 if (err->int_msk & QM_DB_TIMEOUT) { 1947 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01); 1948 type = (reg_val & QM_DB_TIMEOUT_TYPE) >> 1949 QM_DB_TIMEOUT_TYPE_SHIFT; 1950 vf_num = reg_val & QM_DB_TIMEOUT_VF; 1951 dev_err(dev, "qm %s doorbell timeout in function %u\n", 1952 qm_db_timeout[type], vf_num); 1953 } else if (err->int_msk & QM_OF_FIFO_OF) { 1954 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00); 1955 type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >> 1956 QM_FIFO_OVERFLOW_TYPE_SHIFT; 1957 vf_num = reg_val & QM_FIFO_OVERFLOW_VF; 1958 1959 if (type < ARRAY_SIZE(qm_fifo_overflow)) 1960 dev_err(dev, "qm %s fifo overflow in function %u\n", 1961 qm_fifo_overflow[type], vf_num); 1962 else 1963 dev_err(dev, "unknown error type\n"); 1964 } 1965 } 1966 } 1967 1968 static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) 1969 { 1970 u32 error_status, tmp, val; 1971 1972 /* read err sts */ 1973 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); 1974 error_status = qm->error_mask & tmp; 1975 1976 if (error_status) { 1977 if (error_status & QM_ECC_MBIT) 1978 qm->err_status.is_qm_ecc_mbit = true; 1979 1980 qm_log_hw_error(qm, error_status); 1981 val = error_status | QM_DB_RANDOM_INVALID | QM_BASE_CE; 1982 /* ce error does not need to be reset */ 1983 if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) { 1984 writel(error_status, qm->io_base + 1985 QM_ABNORMAL_INT_SOURCE); 1986 writel(qm->err_info.nfe, 1987 qm->io_base + QM_RAS_NFE_ENABLE); 1988 return ACC_ERR_RECOVERED; 1989 } 1990 1991 return ACC_ERR_NEED_RESET; 1992 } 1993 1994 return ACC_ERR_RECOVERED; 1995 } 1996 1997 static u32 qm_get_hw_error_status(struct hisi_qm *qm) 1998 { 1999 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS); 2000 } 2001 2002 static u32 qm_get_dev_err_status(struct hisi_qm *qm) 2003 { 2004 return qm->err_ini->get_dev_hw_err_status(qm); 2005 } 2006 2007 /* Check if the error causes the master ooo block */ 2008 static int qm_check_dev_error(struct hisi_qm *qm) 2009 { 2010 u32 val, dev_val; 2011 2012 if (qm->fun_type == QM_HW_VF) 2013 return 0; 2014 2015 val = qm_get_hw_error_status(qm); 2016 dev_val = qm_get_dev_err_status(qm); 2017 2018 if (qm->ver < QM_HW_V3) 2019 return (val & QM_ECC_MBIT) || 2020 (dev_val & qm->err_info.ecc_2bits_mask); 2021 2022 return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) || 2023 (dev_val & (~qm->err_info.dev_ce_mask)); 2024 } 2025 2026 static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num) 2027 { 2028 struct qm_mailbox mailbox; 2029 int ret; 2030 2031 qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0); 2032 mutex_lock(&qm->mailbox_lock); 2033 ret = qm_mb_nolock(qm, &mailbox); 2034 if (ret) 2035 goto err_unlock; 2036 2037 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | 2038 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); 2039 2040 err_unlock: 2041 mutex_unlock(&qm->mailbox_lock); 2042 return ret; 2043 } 2044 2045 static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask) 2046 { 2047 u32 val; 2048 2049 if (qm->fun_type == QM_HW_PF) 2050 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P); 2051 2052 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V); 2053 val |= QM_IFC_INT_SOURCE_MASK; 2054 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V); 2055 } 2056 2057 static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id) 2058 { 2059 struct device *dev = &qm->pdev->dev; 2060 u32 cmd; 2061 u64 msg; 2062 int ret; 2063 2064 ret = qm_get_mb_cmd(qm, &msg, vf_id); 2065 if (ret) { 2066 dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id); 2067 return; 2068 } 2069 2070 cmd = msg & QM_MB_CMD_DATA_MASK; 2071 switch (cmd) { 2072 case QM_VF_PREPARE_FAIL: 2073 dev_err(dev, "failed to stop VF(%u)!\n", vf_id); 2074 break; 2075 case QM_VF_START_FAIL: 2076 dev_err(dev, "failed to start VF(%u)!\n", vf_id); 2077 break; 2078 case QM_VF_PREPARE_DONE: 2079 case QM_VF_START_DONE: 2080 break; 2081 default: 2082 dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id); 2083 break; 2084 } 2085 } 2086 2087 static int qm_wait_vf_prepare_finish(struct hisi_qm *qm) 2088 { 2089 struct device *dev = &qm->pdev->dev; 2090 u32 vfs_num = qm->vfs_num; 2091 int cnt = 0; 2092 int ret = 0; 2093 u64 val; 2094 u32 i; 2095 2096 if (!qm->vfs_num || qm->ver < QM_HW_V3) 2097 return 0; 2098 2099 while (true) { 2100 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); 2101 /* All VFs send command to PF, break */ 2102 if ((val & GENMASK(vfs_num, 1)) == GENMASK(vfs_num, 1)) 2103 break; 2104 2105 if (++cnt > QM_MAX_PF_WAIT_COUNT) { 2106 ret = -EBUSY; 2107 break; 2108 } 2109 2110 msleep(QM_WAIT_DST_ACK); 2111 } 2112 2113 /* PF check VFs msg */ 2114 for (i = 1; i <= vfs_num; i++) { 2115 if (val & BIT(i)) 2116 qm_handle_vf_msg(qm, i); 2117 else 2118 dev_err(dev, "VF(%u) not ping PF!\n", i); 2119 } 2120 2121 /* PF clear interrupt to ack VFs */ 2122 qm_clear_cmd_interrupt(qm, val); 2123 2124 return ret; 2125 } 2126 2127 static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num) 2128 { 2129 u32 val; 2130 2131 val = readl(qm->io_base + QM_IFC_INT_CFG); 2132 val &= ~QM_IFC_SEND_ALL_VFS; 2133 val |= fun_num; 2134 writel(val, qm->io_base + QM_IFC_INT_CFG); 2135 2136 val = readl(qm->io_base + QM_IFC_INT_SET_P); 2137 val |= QM_IFC_INT_SET_MASK; 2138 writel(val, qm->io_base + QM_IFC_INT_SET_P); 2139 } 2140 2141 static void qm_trigger_pf_interrupt(struct hisi_qm *qm) 2142 { 2143 u32 val; 2144 2145 val = readl(qm->io_base + QM_IFC_INT_SET_V); 2146 val |= QM_IFC_INT_SET_MASK; 2147 writel(val, qm->io_base + QM_IFC_INT_SET_V); 2148 } 2149 2150 static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num) 2151 { 2152 struct device *dev = &qm->pdev->dev; 2153 struct qm_mailbox mailbox; 2154 int cnt = 0; 2155 u64 val; 2156 int ret; 2157 2158 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0); 2159 mutex_lock(&qm->mailbox_lock); 2160 ret = qm_mb_nolock(qm, &mailbox); 2161 if (ret) { 2162 dev_err(dev, "failed to send command to vf(%u)!\n", fun_num); 2163 goto err_unlock; 2164 } 2165 2166 qm_trigger_vf_interrupt(qm, fun_num); 2167 while (true) { 2168 msleep(QM_WAIT_DST_ACK); 2169 val = readq(qm->io_base + QM_IFC_READY_STATUS); 2170 /* if VF respond, PF notifies VF successfully. */ 2171 if (!(val & BIT(fun_num))) 2172 goto err_unlock; 2173 2174 if (++cnt > QM_MAX_PF_WAIT_COUNT) { 2175 dev_err(dev, "failed to get response from VF(%u)!\n", fun_num); 2176 ret = -ETIMEDOUT; 2177 break; 2178 } 2179 } 2180 2181 err_unlock: 2182 mutex_unlock(&qm->mailbox_lock); 2183 return ret; 2184 } 2185 2186 static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd) 2187 { 2188 struct device *dev = &qm->pdev->dev; 2189 u32 vfs_num = qm->vfs_num; 2190 struct qm_mailbox mailbox; 2191 u64 val = 0; 2192 int cnt = 0; 2193 int ret; 2194 u32 i; 2195 2196 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0); 2197 mutex_lock(&qm->mailbox_lock); 2198 /* PF sends command to all VFs by mailbox */ 2199 ret = qm_mb_nolock(qm, &mailbox); 2200 if (ret) { 2201 dev_err(dev, "failed to send command to VFs!\n"); 2202 mutex_unlock(&qm->mailbox_lock); 2203 return ret; 2204 } 2205 2206 qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS); 2207 while (true) { 2208 msleep(QM_WAIT_DST_ACK); 2209 val = readq(qm->io_base + QM_IFC_READY_STATUS); 2210 /* If all VFs acked, PF notifies VFs successfully. */ 2211 if (!(val & GENMASK(vfs_num, 1))) { 2212 mutex_unlock(&qm->mailbox_lock); 2213 return 0; 2214 } 2215 2216 if (++cnt > QM_MAX_PF_WAIT_COUNT) 2217 break; 2218 } 2219 2220 mutex_unlock(&qm->mailbox_lock); 2221 2222 /* Check which vf respond timeout. */ 2223 for (i = 1; i <= vfs_num; i++) { 2224 if (val & BIT(i)) 2225 dev_err(dev, "failed to get response from VF(%u)!\n", i); 2226 } 2227 2228 return -ETIMEDOUT; 2229 } 2230 2231 static int qm_ping_pf(struct hisi_qm *qm, u64 cmd) 2232 { 2233 struct qm_mailbox mailbox; 2234 int cnt = 0; 2235 u32 val; 2236 int ret; 2237 2238 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0); 2239 mutex_lock(&qm->mailbox_lock); 2240 ret = qm_mb_nolock(qm, &mailbox); 2241 if (ret) { 2242 dev_err(&qm->pdev->dev, "failed to send command to PF!\n"); 2243 goto unlock; 2244 } 2245 2246 qm_trigger_pf_interrupt(qm); 2247 /* Waiting for PF response */ 2248 while (true) { 2249 msleep(QM_WAIT_DST_ACK); 2250 val = readl(qm->io_base + QM_IFC_INT_SET_V); 2251 if (!(val & QM_IFC_INT_STATUS_MASK)) 2252 break; 2253 2254 if (++cnt > QM_MAX_VF_WAIT_COUNT) { 2255 ret = -ETIMEDOUT; 2256 break; 2257 } 2258 } 2259 2260 unlock: 2261 mutex_unlock(&qm->mailbox_lock); 2262 return ret; 2263 } 2264 2265 static int qm_stop_qp(struct hisi_qp *qp) 2266 { 2267 return qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); 2268 } 2269 2270 static int qm_set_msi(struct hisi_qm *qm, bool set) 2271 { 2272 struct pci_dev *pdev = qm->pdev; 2273 2274 if (set) { 2275 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, 2276 0); 2277 } else { 2278 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, 2279 ACC_PEH_MSI_DISABLE); 2280 if (qm->err_status.is_qm_ecc_mbit || 2281 qm->err_status.is_dev_ecc_mbit) 2282 return 0; 2283 2284 mdelay(1); 2285 if (readl(qm->io_base + QM_PEH_DFX_INFO0)) 2286 return -EFAULT; 2287 } 2288 2289 return 0; 2290 } 2291 2292 static void qm_wait_msi_finish(struct hisi_qm *qm) 2293 { 2294 struct pci_dev *pdev = qm->pdev; 2295 u32 cmd = ~0; 2296 int cnt = 0; 2297 u32 val; 2298 int ret; 2299 2300 while (true) { 2301 pci_read_config_dword(pdev, pdev->msi_cap + 2302 PCI_MSI_PENDING_64, &cmd); 2303 if (!cmd) 2304 break; 2305 2306 if (++cnt > MAX_WAIT_COUNTS) { 2307 pci_warn(pdev, "failed to empty MSI PENDING!\n"); 2308 break; 2309 } 2310 2311 udelay(1); 2312 } 2313 2314 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0, 2315 val, !(val & QM_PEH_DFX_MASK), 2316 POLL_PERIOD, POLL_TIMEOUT); 2317 if (ret) 2318 pci_warn(pdev, "failed to empty PEH MSI!\n"); 2319 2320 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1, 2321 val, !(val & QM_PEH_MSI_FINISH_MASK), 2322 POLL_PERIOD, POLL_TIMEOUT); 2323 if (ret) 2324 pci_warn(pdev, "failed to finish MSI operation!\n"); 2325 } 2326 2327 static int qm_set_msi_v3(struct hisi_qm *qm, bool set) 2328 { 2329 struct pci_dev *pdev = qm->pdev; 2330 int ret = -ETIMEDOUT; 2331 u32 cmd, i; 2332 2333 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); 2334 if (set) 2335 cmd |= QM_MSI_CAP_ENABLE; 2336 else 2337 cmd &= ~QM_MSI_CAP_ENABLE; 2338 2339 pci_write_config_dword(pdev, pdev->msi_cap, cmd); 2340 if (set) { 2341 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 2342 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); 2343 if (cmd & QM_MSI_CAP_ENABLE) 2344 return 0; 2345 2346 udelay(1); 2347 } 2348 } else { 2349 udelay(WAIT_PERIOD_US_MIN); 2350 qm_wait_msi_finish(qm); 2351 ret = 0; 2352 } 2353 2354 return ret; 2355 } 2356 2357 static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { 2358 .qm_db = qm_db_v1, 2359 .get_irq_num = qm_get_irq_num_v1, 2360 .hw_error_init = qm_hw_error_init_v1, 2361 .set_msi = qm_set_msi, 2362 }; 2363 2364 static const struct hisi_qm_hw_ops qm_hw_ops_v2 = { 2365 .get_vft = qm_get_vft_v2, 2366 .qm_db = qm_db_v2, 2367 .get_irq_num = qm_get_irq_num_v2, 2368 .hw_error_init = qm_hw_error_init_v2, 2369 .hw_error_uninit = qm_hw_error_uninit_v2, 2370 .hw_error_handle = qm_hw_error_handle_v2, 2371 .set_msi = qm_set_msi, 2372 }; 2373 2374 static const struct hisi_qm_hw_ops qm_hw_ops_v3 = { 2375 .get_vft = qm_get_vft_v2, 2376 .qm_db = qm_db_v2, 2377 .get_irq_num = qm_get_irq_num_v3, 2378 .hw_error_init = qm_hw_error_init_v3, 2379 .hw_error_uninit = qm_hw_error_uninit_v3, 2380 .hw_error_handle = qm_hw_error_handle_v2, 2381 .stop_qp = qm_stop_qp, 2382 .set_msi = qm_set_msi_v3, 2383 .ping_all_vfs = qm_ping_all_vfs, 2384 .ping_pf = qm_ping_pf, 2385 }; 2386 2387 static void *qm_get_avail_sqe(struct hisi_qp *qp) 2388 { 2389 struct hisi_qp_status *qp_status = &qp->qp_status; 2390 u16 sq_tail = qp_status->sq_tail; 2391 2392 if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1)) 2393 return NULL; 2394 2395 return qp->sqe + sq_tail * qp->qm->sqe_size; 2396 } 2397 2398 static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) 2399 { 2400 struct device *dev = &qm->pdev->dev; 2401 struct hisi_qp *qp; 2402 int qp_id; 2403 2404 if (!qm_qp_avail_state(qm, NULL, QP_INIT)) 2405 return ERR_PTR(-EPERM); 2406 2407 if (qm->qp_in_used == qm->qp_num) { 2408 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", 2409 qm->qp_num); 2410 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); 2411 return ERR_PTR(-EBUSY); 2412 } 2413 2414 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC); 2415 if (qp_id < 0) { 2416 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", 2417 qm->qp_num); 2418 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); 2419 return ERR_PTR(-EBUSY); 2420 } 2421 2422 qp = &qm->qp_array[qp_id]; 2423 2424 memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH); 2425 2426 qp->event_cb = NULL; 2427 qp->req_cb = NULL; 2428 qp->qp_id = qp_id; 2429 qp->alg_type = alg_type; 2430 qp->is_in_kernel = true; 2431 qm->qp_in_used++; 2432 atomic_set(&qp->qp_status.flags, QP_INIT); 2433 2434 return qp; 2435 } 2436 2437 /** 2438 * hisi_qm_create_qp() - Create a queue pair from qm. 2439 * @qm: The qm we create a qp from. 2440 * @alg_type: Accelerator specific algorithm type in sqc. 2441 * 2442 * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating 2443 * qp memory fails. 2444 */ 2445 struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) 2446 { 2447 struct hisi_qp *qp; 2448 2449 down_write(&qm->qps_lock); 2450 qp = qm_create_qp_nolock(qm, alg_type); 2451 up_write(&qm->qps_lock); 2452 2453 return qp; 2454 } 2455 EXPORT_SYMBOL_GPL(hisi_qm_create_qp); 2456 2457 /** 2458 * hisi_qm_release_qp() - Release a qp back to its qm. 2459 * @qp: The qp we want to release. 2460 * 2461 * This function releases the resource of a qp. 2462 */ 2463 void hisi_qm_release_qp(struct hisi_qp *qp) 2464 { 2465 struct hisi_qm *qm = qp->qm; 2466 2467 down_write(&qm->qps_lock); 2468 2469 if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) { 2470 up_write(&qm->qps_lock); 2471 return; 2472 } 2473 2474 qm->qp_in_used--; 2475 idr_remove(&qm->qp_idr, qp->qp_id); 2476 2477 up_write(&qm->qps_lock); 2478 } 2479 EXPORT_SYMBOL_GPL(hisi_qm_release_qp); 2480 2481 static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 2482 { 2483 struct hisi_qm *qm = qp->qm; 2484 struct device *dev = &qm->pdev->dev; 2485 enum qm_hw_ver ver = qm->ver; 2486 struct qm_sqc *sqc; 2487 dma_addr_t sqc_dma; 2488 int ret; 2489 2490 sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL); 2491 if (!sqc) 2492 return -ENOMEM; 2493 2494 INIT_QC_COMMON(sqc, qp->sqe_dma, pasid); 2495 if (ver == QM_HW_V1) { 2496 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); 2497 sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1); 2498 } else { 2499 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size)); 2500 sqc->w8 = 0; /* rand_qc */ 2501 } 2502 sqc->cq_num = cpu_to_le16(qp_id); 2503 sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); 2504 2505 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) 2506 sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE << 2507 QM_QC_PASID_ENABLE_SHIFT); 2508 2509 sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc), 2510 DMA_TO_DEVICE); 2511 if (dma_mapping_error(dev, sqc_dma)) { 2512 kfree(sqc); 2513 return -ENOMEM; 2514 } 2515 2516 ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0); 2517 dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE); 2518 kfree(sqc); 2519 2520 return ret; 2521 } 2522 2523 static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 2524 { 2525 struct hisi_qm *qm = qp->qm; 2526 struct device *dev = &qm->pdev->dev; 2527 enum qm_hw_ver ver = qm->ver; 2528 struct qm_cqc *cqc; 2529 dma_addr_t cqc_dma; 2530 int ret; 2531 2532 cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL); 2533 if (!cqc) 2534 return -ENOMEM; 2535 2536 INIT_QC_COMMON(cqc, qp->cqe_dma, pasid); 2537 if (ver == QM_HW_V1) { 2538 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, 2539 QM_QC_CQE_SIZE)); 2540 cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1); 2541 } else { 2542 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE)); 2543 cqc->w8 = 0; /* rand_qc */ 2544 } 2545 cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); 2546 2547 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) 2548 cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE); 2549 2550 cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc), 2551 DMA_TO_DEVICE); 2552 if (dma_mapping_error(dev, cqc_dma)) { 2553 kfree(cqc); 2554 return -ENOMEM; 2555 } 2556 2557 ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0); 2558 dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE); 2559 kfree(cqc); 2560 2561 return ret; 2562 } 2563 2564 static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 2565 { 2566 int ret; 2567 2568 qm_init_qp_status(qp); 2569 2570 ret = qm_sq_ctx_cfg(qp, qp_id, pasid); 2571 if (ret) 2572 return ret; 2573 2574 return qm_cq_ctx_cfg(qp, qp_id, pasid); 2575 } 2576 2577 static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg) 2578 { 2579 struct hisi_qm *qm = qp->qm; 2580 struct device *dev = &qm->pdev->dev; 2581 int qp_id = qp->qp_id; 2582 u32 pasid = arg; 2583 int ret; 2584 2585 if (!qm_qp_avail_state(qm, qp, QP_START)) 2586 return -EPERM; 2587 2588 ret = qm_qp_ctx_cfg(qp, qp_id, pasid); 2589 if (ret) 2590 return ret; 2591 2592 atomic_set(&qp->qp_status.flags, QP_START); 2593 dev_dbg(dev, "queue %d started\n", qp_id); 2594 2595 return 0; 2596 } 2597 2598 /** 2599 * hisi_qm_start_qp() - Start a qp into running. 2600 * @qp: The qp we want to start to run. 2601 * @arg: Accelerator specific argument. 2602 * 2603 * After this function, qp can receive request from user. Return 0 if 2604 * successful, Return -EBUSY if failed. 2605 */ 2606 int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) 2607 { 2608 struct hisi_qm *qm = qp->qm; 2609 int ret; 2610 2611 down_write(&qm->qps_lock); 2612 ret = qm_start_qp_nolock(qp, arg); 2613 up_write(&qm->qps_lock); 2614 2615 return ret; 2616 } 2617 EXPORT_SYMBOL_GPL(hisi_qm_start_qp); 2618 2619 /** 2620 * qp_stop_fail_cb() - call request cb. 2621 * @qp: stopped failed qp. 2622 * 2623 * Callback function should be called whether task completed or not. 2624 */ 2625 static void qp_stop_fail_cb(struct hisi_qp *qp) 2626 { 2627 int qp_used = atomic_read(&qp->qp_status.used); 2628 u16 cur_tail = qp->qp_status.sq_tail; 2629 u16 cur_head = (cur_tail + QM_Q_DEPTH - qp_used) % QM_Q_DEPTH; 2630 struct hisi_qm *qm = qp->qm; 2631 u16 pos; 2632 int i; 2633 2634 for (i = 0; i < qp_used; i++) { 2635 pos = (i + cur_head) % QM_Q_DEPTH; 2636 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos)); 2637 atomic_dec(&qp->qp_status.used); 2638 } 2639 } 2640 2641 /** 2642 * qm_drain_qp() - Drain a qp. 2643 * @qp: The qp we want to drain. 2644 * 2645 * Determine whether the queue is cleared by judging the tail pointers of 2646 * sq and cq. 2647 */ 2648 static int qm_drain_qp(struct hisi_qp *qp) 2649 { 2650 size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc); 2651 struct hisi_qm *qm = qp->qm; 2652 struct device *dev = &qm->pdev->dev; 2653 struct qm_sqc *sqc; 2654 struct qm_cqc *cqc; 2655 dma_addr_t dma_addr; 2656 int ret = 0, i = 0; 2657 void *addr; 2658 2659 /* No need to judge if master OOO is blocked. */ 2660 if (qm_check_dev_error(qm)) 2661 return 0; 2662 2663 /* Kunpeng930 supports drain qp by device */ 2664 if (qm->ops->stop_qp) { 2665 ret = qm->ops->stop_qp(qp); 2666 if (ret) 2667 dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id); 2668 return ret; 2669 } 2670 2671 addr = qm_ctx_alloc(qm, size, &dma_addr); 2672 if (IS_ERR(addr)) { 2673 dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n"); 2674 return -ENOMEM; 2675 } 2676 2677 while (++i) { 2678 ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id); 2679 if (ret) { 2680 dev_err_ratelimited(dev, "Failed to dump sqc!\n"); 2681 break; 2682 } 2683 sqc = addr; 2684 2685 ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)), 2686 qp->qp_id); 2687 if (ret) { 2688 dev_err_ratelimited(dev, "Failed to dump cqc!\n"); 2689 break; 2690 } 2691 cqc = addr + sizeof(struct qm_sqc); 2692 2693 if ((sqc->tail == cqc->tail) && 2694 (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc))) 2695 break; 2696 2697 if (i == MAX_WAIT_COUNTS) { 2698 dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id); 2699 ret = -EBUSY; 2700 break; 2701 } 2702 2703 usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); 2704 } 2705 2706 qm_ctx_free(qm, size, addr, &dma_addr); 2707 2708 return ret; 2709 } 2710 2711 static int qm_stop_qp_nolock(struct hisi_qp *qp) 2712 { 2713 struct device *dev = &qp->qm->pdev->dev; 2714 int ret; 2715 2716 /* 2717 * It is allowed to stop and release qp when reset, If the qp is 2718 * stopped when reset but still want to be released then, the 2719 * is_resetting flag should be set negative so that this qp will not 2720 * be restarted after reset. 2721 */ 2722 if (atomic_read(&qp->qp_status.flags) == QP_STOP) { 2723 qp->is_resetting = false; 2724 return 0; 2725 } 2726 2727 if (!qm_qp_avail_state(qp->qm, qp, QP_STOP)) 2728 return -EPERM; 2729 2730 atomic_set(&qp->qp_status.flags, QP_STOP); 2731 2732 ret = qm_drain_qp(qp); 2733 if (ret) 2734 dev_err(dev, "Failed to drain out data for stopping!\n"); 2735 2736 if (qp->qm->wq) 2737 flush_workqueue(qp->qm->wq); 2738 else 2739 flush_work(&qp->qm->work); 2740 2741 if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used))) 2742 qp_stop_fail_cb(qp); 2743 2744 dev_dbg(dev, "stop queue %u!", qp->qp_id); 2745 2746 return 0; 2747 } 2748 2749 /** 2750 * hisi_qm_stop_qp() - Stop a qp in qm. 2751 * @qp: The qp we want to stop. 2752 * 2753 * This function is reverse of hisi_qm_start_qp. Return 0 if successful. 2754 */ 2755 int hisi_qm_stop_qp(struct hisi_qp *qp) 2756 { 2757 int ret; 2758 2759 down_write(&qp->qm->qps_lock); 2760 ret = qm_stop_qp_nolock(qp); 2761 up_write(&qp->qm->qps_lock); 2762 2763 return ret; 2764 } 2765 EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); 2766 2767 /** 2768 * hisi_qp_send() - Queue up a task in the hardware queue. 2769 * @qp: The qp in which to put the message. 2770 * @msg: The message. 2771 * 2772 * This function will return -EBUSY if qp is currently full, and -EAGAIN 2773 * if qp related qm is resetting. 2774 * 2775 * Note: This function may run with qm_irq_thread and ACC reset at same time. 2776 * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC 2777 * reset may happen, we have no lock here considering performance. This 2778 * causes current qm_db sending fail or can not receive sended sqe. QM 2779 * sync/async receive function should handle the error sqe. ACC reset 2780 * done function should clear used sqe to 0. 2781 */ 2782 int hisi_qp_send(struct hisi_qp *qp, const void *msg) 2783 { 2784 struct hisi_qp_status *qp_status = &qp->qp_status; 2785 u16 sq_tail = qp_status->sq_tail; 2786 u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH; 2787 void *sqe = qm_get_avail_sqe(qp); 2788 2789 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || 2790 atomic_read(&qp->qm->status.flags) == QM_STOP || 2791 qp->is_resetting)) { 2792 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); 2793 return -EAGAIN; 2794 } 2795 2796 if (!sqe) 2797 return -EBUSY; 2798 2799 memcpy(sqe, msg, qp->qm->sqe_size); 2800 2801 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); 2802 atomic_inc(&qp->qp_status.used); 2803 qp_status->sq_tail = sq_tail_next; 2804 2805 return 0; 2806 } 2807 EXPORT_SYMBOL_GPL(hisi_qp_send); 2808 2809 static void hisi_qm_cache_wb(struct hisi_qm *qm) 2810 { 2811 unsigned int val; 2812 2813 if (qm->ver == QM_HW_V1) 2814 return; 2815 2816 writel(0x1, qm->io_base + QM_CACHE_WB_START); 2817 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, 2818 val, val & BIT(0), POLL_PERIOD, 2819 POLL_TIMEOUT)) 2820 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); 2821 } 2822 2823 static void qm_qp_event_notifier(struct hisi_qp *qp) 2824 { 2825 wake_up_interruptible(&qp->uacce_q->wait); 2826 } 2827 2828 static int hisi_qm_get_available_instances(struct uacce_device *uacce) 2829 { 2830 return hisi_qm_get_free_qp_num(uacce->priv); 2831 } 2832 2833 static int hisi_qm_uacce_get_queue(struct uacce_device *uacce, 2834 unsigned long arg, 2835 struct uacce_queue *q) 2836 { 2837 struct hisi_qm *qm = uacce->priv; 2838 struct hisi_qp *qp; 2839 u8 alg_type = 0; 2840 2841 qp = hisi_qm_create_qp(qm, alg_type); 2842 if (IS_ERR(qp)) 2843 return PTR_ERR(qp); 2844 2845 q->priv = qp; 2846 q->uacce = uacce; 2847 qp->uacce_q = q; 2848 qp->event_cb = qm_qp_event_notifier; 2849 qp->pasid = arg; 2850 qp->is_in_kernel = false; 2851 2852 return 0; 2853 } 2854 2855 static void hisi_qm_uacce_put_queue(struct uacce_queue *q) 2856 { 2857 struct hisi_qp *qp = q->priv; 2858 2859 hisi_qm_cache_wb(qp->qm); 2860 hisi_qm_release_qp(qp); 2861 } 2862 2863 /* map sq/cq/doorbell to user space */ 2864 static int hisi_qm_uacce_mmap(struct uacce_queue *q, 2865 struct vm_area_struct *vma, 2866 struct uacce_qfile_region *qfr) 2867 { 2868 struct hisi_qp *qp = q->priv; 2869 struct hisi_qm *qm = qp->qm; 2870 resource_size_t phys_base = qm->db_phys_base + 2871 qp->qp_id * qm->db_interval; 2872 size_t sz = vma->vm_end - vma->vm_start; 2873 struct pci_dev *pdev = qm->pdev; 2874 struct device *dev = &pdev->dev; 2875 unsigned long vm_pgoff; 2876 int ret; 2877 2878 switch (qfr->type) { 2879 case UACCE_QFRT_MMIO: 2880 if (qm->ver == QM_HW_V1) { 2881 if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR) 2882 return -EINVAL; 2883 } else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) { 2884 if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR + 2885 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE)) 2886 return -EINVAL; 2887 } else { 2888 if (sz > qm->db_interval) 2889 return -EINVAL; 2890 } 2891 2892 vma->vm_flags |= VM_IO; 2893 2894 return remap_pfn_range(vma, vma->vm_start, 2895 phys_base >> PAGE_SHIFT, 2896 sz, pgprot_noncached(vma->vm_page_prot)); 2897 case UACCE_QFRT_DUS: 2898 if (sz != qp->qdma.size) 2899 return -EINVAL; 2900 2901 /* 2902 * dma_mmap_coherent() requires vm_pgoff as 0 2903 * restore vm_pfoff to initial value for mmap() 2904 */ 2905 vm_pgoff = vma->vm_pgoff; 2906 vma->vm_pgoff = 0; 2907 ret = dma_mmap_coherent(dev, vma, qp->qdma.va, 2908 qp->qdma.dma, sz); 2909 vma->vm_pgoff = vm_pgoff; 2910 return ret; 2911 2912 default: 2913 return -EINVAL; 2914 } 2915 } 2916 2917 static int hisi_qm_uacce_start_queue(struct uacce_queue *q) 2918 { 2919 struct hisi_qp *qp = q->priv; 2920 2921 return hisi_qm_start_qp(qp, qp->pasid); 2922 } 2923 2924 static void hisi_qm_uacce_stop_queue(struct uacce_queue *q) 2925 { 2926 hisi_qm_stop_qp(q->priv); 2927 } 2928 2929 static int hisi_qm_is_q_updated(struct uacce_queue *q) 2930 { 2931 struct hisi_qp *qp = q->priv; 2932 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; 2933 int updated = 0; 2934 2935 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { 2936 /* make sure to read data from memory */ 2937 dma_rmb(); 2938 qm_cq_head_update(qp); 2939 cqe = qp->cqe + qp->qp_status.cq_head; 2940 updated = 1; 2941 } 2942 2943 return updated; 2944 } 2945 2946 static void qm_set_sqctype(struct uacce_queue *q, u16 type) 2947 { 2948 struct hisi_qm *qm = q->uacce->priv; 2949 struct hisi_qp *qp = q->priv; 2950 2951 down_write(&qm->qps_lock); 2952 qp->alg_type = type; 2953 up_write(&qm->qps_lock); 2954 } 2955 2956 static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd, 2957 unsigned long arg) 2958 { 2959 struct hisi_qp *qp = q->priv; 2960 struct hisi_qp_ctx qp_ctx; 2961 2962 if (cmd == UACCE_CMD_QM_SET_QP_CTX) { 2963 if (copy_from_user(&qp_ctx, (void __user *)arg, 2964 sizeof(struct hisi_qp_ctx))) 2965 return -EFAULT; 2966 2967 if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1) 2968 return -EINVAL; 2969 2970 qm_set_sqctype(q, qp_ctx.qc_type); 2971 qp_ctx.id = qp->qp_id; 2972 2973 if (copy_to_user((void __user *)arg, &qp_ctx, 2974 sizeof(struct hisi_qp_ctx))) 2975 return -EFAULT; 2976 } else { 2977 return -EINVAL; 2978 } 2979 2980 return 0; 2981 } 2982 2983 static const struct uacce_ops uacce_qm_ops = { 2984 .get_available_instances = hisi_qm_get_available_instances, 2985 .get_queue = hisi_qm_uacce_get_queue, 2986 .put_queue = hisi_qm_uacce_put_queue, 2987 .start_queue = hisi_qm_uacce_start_queue, 2988 .stop_queue = hisi_qm_uacce_stop_queue, 2989 .mmap = hisi_qm_uacce_mmap, 2990 .ioctl = hisi_qm_uacce_ioctl, 2991 .is_q_updated = hisi_qm_is_q_updated, 2992 }; 2993 2994 static int qm_alloc_uacce(struct hisi_qm *qm) 2995 { 2996 struct pci_dev *pdev = qm->pdev; 2997 struct uacce_device *uacce; 2998 unsigned long mmio_page_nr; 2999 unsigned long dus_page_nr; 3000 struct uacce_interface interface = { 3001 .flags = UACCE_DEV_SVA, 3002 .ops = &uacce_qm_ops, 3003 }; 3004 int ret; 3005 3006 ret = strscpy(interface.name, pdev->driver->name, 3007 sizeof(interface.name)); 3008 if (ret < 0) 3009 return -ENAMETOOLONG; 3010 3011 uacce = uacce_alloc(&pdev->dev, &interface); 3012 if (IS_ERR(uacce)) 3013 return PTR_ERR(uacce); 3014 3015 if (uacce->flags & UACCE_DEV_SVA && qm->mode == UACCE_MODE_SVA) { 3016 qm->use_sva = true; 3017 } else { 3018 /* only consider sva case */ 3019 uacce_remove(uacce); 3020 qm->uacce = NULL; 3021 return -EINVAL; 3022 } 3023 3024 uacce->is_vf = pdev->is_virtfn; 3025 uacce->priv = qm; 3026 uacce->algs = qm->algs; 3027 3028 if (qm->ver == QM_HW_V1) 3029 uacce->api_ver = HISI_QM_API_VER_BASE; 3030 else if (qm->ver == QM_HW_V2) 3031 uacce->api_ver = HISI_QM_API_VER2_BASE; 3032 else 3033 uacce->api_ver = HISI_QM_API_VER3_BASE; 3034 3035 if (qm->ver == QM_HW_V1) 3036 mmio_page_nr = QM_DOORBELL_PAGE_NR; 3037 else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) 3038 mmio_page_nr = QM_DOORBELL_PAGE_NR + 3039 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE; 3040 else 3041 mmio_page_nr = qm->db_interval / PAGE_SIZE; 3042 3043 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH + 3044 sizeof(struct qm_cqe) * QM_Q_DEPTH) >> PAGE_SHIFT; 3045 3046 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr; 3047 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr; 3048 3049 qm->uacce = uacce; 3050 3051 return 0; 3052 } 3053 3054 /** 3055 * qm_frozen() - Try to froze QM to cut continuous queue request. If 3056 * there is user on the QM, return failure without doing anything. 3057 * @qm: The qm needed to be fronzen. 3058 * 3059 * This function frozes QM, then we can do SRIOV disabling. 3060 */ 3061 static int qm_frozen(struct hisi_qm *qm) 3062 { 3063 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) 3064 return 0; 3065 3066 down_write(&qm->qps_lock); 3067 3068 if (!qm->qp_in_used) { 3069 qm->qp_in_used = qm->qp_num; 3070 up_write(&qm->qps_lock); 3071 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl); 3072 return 0; 3073 } 3074 3075 up_write(&qm->qps_lock); 3076 3077 return -EBUSY; 3078 } 3079 3080 static int qm_try_frozen_vfs(struct pci_dev *pdev, 3081 struct hisi_qm_list *qm_list) 3082 { 3083 struct hisi_qm *qm, *vf_qm; 3084 struct pci_dev *dev; 3085 int ret = 0; 3086 3087 if (!qm_list || !pdev) 3088 return -EINVAL; 3089 3090 /* Try to frozen all the VFs as disable SRIOV */ 3091 mutex_lock(&qm_list->lock); 3092 list_for_each_entry(qm, &qm_list->list, list) { 3093 dev = qm->pdev; 3094 if (dev == pdev) 3095 continue; 3096 if (pci_physfn(dev) == pdev) { 3097 vf_qm = pci_get_drvdata(dev); 3098 ret = qm_frozen(vf_qm); 3099 if (ret) 3100 goto frozen_fail; 3101 } 3102 } 3103 3104 frozen_fail: 3105 mutex_unlock(&qm_list->lock); 3106 3107 return ret; 3108 } 3109 3110 /** 3111 * hisi_qm_wait_task_finish() - Wait until the task is finished 3112 * when removing the driver. 3113 * @qm: The qm needed to wait for the task to finish. 3114 * @qm_list: The list of all available devices. 3115 */ 3116 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list) 3117 { 3118 while (qm_frozen(qm) || 3119 ((qm->fun_type == QM_HW_PF) && 3120 qm_try_frozen_vfs(qm->pdev, qm_list))) { 3121 msleep(WAIT_PERIOD); 3122 } 3123 3124 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) || 3125 test_bit(QM_RESETTING, &qm->misc_ctl)) 3126 msleep(WAIT_PERIOD); 3127 3128 udelay(REMOVE_WAIT_DELAY); 3129 } 3130 EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish); 3131 3132 /** 3133 * hisi_qm_get_free_qp_num() - Get free number of qp in qm. 3134 * @qm: The qm which want to get free qp. 3135 * 3136 * This function return free number of qp in qm. 3137 */ 3138 int hisi_qm_get_free_qp_num(struct hisi_qm *qm) 3139 { 3140 int ret; 3141 3142 down_read(&qm->qps_lock); 3143 ret = qm->qp_num - qm->qp_in_used; 3144 up_read(&qm->qps_lock); 3145 3146 return ret; 3147 } 3148 EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num); 3149 3150 static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) 3151 { 3152 struct device *dev = &qm->pdev->dev; 3153 struct qm_dma *qdma; 3154 int i; 3155 3156 for (i = num - 1; i >= 0; i--) { 3157 qdma = &qm->qp_array[i].qdma; 3158 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); 3159 } 3160 3161 kfree(qm->qp_array); 3162 } 3163 3164 static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id) 3165 { 3166 struct device *dev = &qm->pdev->dev; 3167 size_t off = qm->sqe_size * QM_Q_DEPTH; 3168 struct hisi_qp *qp; 3169 3170 qp = &qm->qp_array[id]; 3171 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, 3172 GFP_KERNEL); 3173 if (!qp->qdma.va) 3174 return -ENOMEM; 3175 3176 qp->sqe = qp->qdma.va; 3177 qp->sqe_dma = qp->qdma.dma; 3178 qp->cqe = qp->qdma.va + off; 3179 qp->cqe_dma = qp->qdma.dma + off; 3180 qp->qdma.size = dma_size; 3181 qp->qm = qm; 3182 qp->qp_id = id; 3183 3184 return 0; 3185 } 3186 3187 static void hisi_qm_pre_init(struct hisi_qm *qm) 3188 { 3189 struct pci_dev *pdev = qm->pdev; 3190 3191 if (qm->ver == QM_HW_V1) 3192 qm->ops = &qm_hw_ops_v1; 3193 else if (qm->ver == QM_HW_V2) 3194 qm->ops = &qm_hw_ops_v2; 3195 else 3196 qm->ops = &qm_hw_ops_v3; 3197 3198 pci_set_drvdata(pdev, qm); 3199 mutex_init(&qm->mailbox_lock); 3200 init_rwsem(&qm->qps_lock); 3201 qm->qp_in_used = 0; 3202 qm->misc_ctl = false; 3203 } 3204 3205 static void qm_cmd_uninit(struct hisi_qm *qm) 3206 { 3207 u32 val; 3208 3209 if (qm->ver < QM_HW_V3) 3210 return; 3211 3212 val = readl(qm->io_base + QM_IFC_INT_MASK); 3213 val |= QM_IFC_INT_DISABLE; 3214 writel(val, qm->io_base + QM_IFC_INT_MASK); 3215 } 3216 3217 static void qm_cmd_init(struct hisi_qm *qm) 3218 { 3219 u32 val; 3220 3221 if (qm->ver < QM_HW_V3) 3222 return; 3223 3224 /* Clear communication interrupt source */ 3225 qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR); 3226 3227 /* Enable pf to vf communication reg. */ 3228 val = readl(qm->io_base + QM_IFC_INT_MASK); 3229 val &= ~QM_IFC_INT_DISABLE; 3230 writel(val, qm->io_base + QM_IFC_INT_MASK); 3231 } 3232 3233 static void qm_put_pci_res(struct hisi_qm *qm) 3234 { 3235 struct pci_dev *pdev = qm->pdev; 3236 3237 if (qm->use_db_isolation) 3238 iounmap(qm->db_io_base); 3239 3240 iounmap(qm->io_base); 3241 pci_release_mem_regions(pdev); 3242 } 3243 3244 static void hisi_qm_pci_uninit(struct hisi_qm *qm) 3245 { 3246 struct pci_dev *pdev = qm->pdev; 3247 3248 pci_free_irq_vectors(pdev); 3249 qm_put_pci_res(qm); 3250 pci_disable_device(pdev); 3251 } 3252 3253 /** 3254 * hisi_qm_uninit() - Uninitialize qm. 3255 * @qm: The qm needed uninit. 3256 * 3257 * This function uninits qm related device resources. 3258 */ 3259 void hisi_qm_uninit(struct hisi_qm *qm) 3260 { 3261 struct pci_dev *pdev = qm->pdev; 3262 struct device *dev = &pdev->dev; 3263 3264 qm_cmd_uninit(qm); 3265 kfree(qm->factor); 3266 down_write(&qm->qps_lock); 3267 3268 if (!qm_avail_state(qm, QM_CLOSE)) { 3269 up_write(&qm->qps_lock); 3270 return; 3271 } 3272 3273 hisi_qp_memory_uninit(qm, qm->qp_num); 3274 idr_destroy(&qm->qp_idr); 3275 3276 if (qm->qdma.va) { 3277 hisi_qm_cache_wb(qm); 3278 dma_free_coherent(dev, qm->qdma.size, 3279 qm->qdma.va, qm->qdma.dma); 3280 } 3281 3282 qm_irq_unregister(qm); 3283 hisi_qm_pci_uninit(qm); 3284 uacce_remove(qm->uacce); 3285 qm->uacce = NULL; 3286 3287 up_write(&qm->qps_lock); 3288 } 3289 EXPORT_SYMBOL_GPL(hisi_qm_uninit); 3290 3291 /** 3292 * hisi_qm_get_vft() - Get vft from a qm. 3293 * @qm: The qm we want to get its vft. 3294 * @base: The base number of queue in vft. 3295 * @number: The number of queues in vft. 3296 * 3297 * We can allocate multiple queues to a qm by configuring virtual function 3298 * table. We get related configures by this function. Normally, we call this 3299 * function in VF driver to get the queue information. 3300 * 3301 * qm hw v1 does not support this interface. 3302 */ 3303 int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) 3304 { 3305 if (!base || !number) 3306 return -EINVAL; 3307 3308 if (!qm->ops->get_vft) { 3309 dev_err(&qm->pdev->dev, "Don't support vft read!\n"); 3310 return -EINVAL; 3311 } 3312 3313 return qm->ops->get_vft(qm, base, number); 3314 } 3315 EXPORT_SYMBOL_GPL(hisi_qm_get_vft); 3316 3317 /** 3318 * hisi_qm_set_vft() - Set vft to a qm. 3319 * @qm: The qm we want to set its vft. 3320 * @fun_num: The function number. 3321 * @base: The base number of queue in vft. 3322 * @number: The number of queues in vft. 3323 * 3324 * This function is alway called in PF driver, it is used to assign queues 3325 * among PF and VFs. 3326 * 3327 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1) 3328 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1) 3329 * (VF function number 0x2) 3330 */ 3331 static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, 3332 u32 number) 3333 { 3334 u32 max_q_num = qm->ctrl_qp_num; 3335 3336 if (base >= max_q_num || number > max_q_num || 3337 (base + number) > max_q_num) 3338 return -EINVAL; 3339 3340 return qm_set_sqc_cqc_vft(qm, fun_num, base, number); 3341 } 3342 3343 static void qm_init_eq_aeq_status(struct hisi_qm *qm) 3344 { 3345 struct hisi_qm_status *status = &qm->status; 3346 3347 status->eq_head = 0; 3348 status->aeq_head = 0; 3349 status->eqc_phase = true; 3350 status->aeqc_phase = true; 3351 } 3352 3353 static int qm_eq_ctx_cfg(struct hisi_qm *qm) 3354 { 3355 struct device *dev = &qm->pdev->dev; 3356 struct qm_eqc *eqc; 3357 dma_addr_t eqc_dma; 3358 int ret; 3359 3360 eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL); 3361 if (!eqc) 3362 return -ENOMEM; 3363 3364 eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); 3365 eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); 3366 if (qm->ver == QM_HW_V1) 3367 eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); 3368 eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT)); 3369 3370 eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc), 3371 DMA_TO_DEVICE); 3372 if (dma_mapping_error(dev, eqc_dma)) { 3373 kfree(eqc); 3374 return -ENOMEM; 3375 } 3376 3377 ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0); 3378 dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE); 3379 kfree(eqc); 3380 3381 return ret; 3382 } 3383 3384 static int qm_aeq_ctx_cfg(struct hisi_qm *qm) 3385 { 3386 struct device *dev = &qm->pdev->dev; 3387 struct qm_aeqc *aeqc; 3388 dma_addr_t aeqc_dma; 3389 int ret; 3390 3391 aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL); 3392 if (!aeqc) 3393 return -ENOMEM; 3394 3395 aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); 3396 aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); 3397 aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT)); 3398 3399 aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc), 3400 DMA_TO_DEVICE); 3401 if (dma_mapping_error(dev, aeqc_dma)) { 3402 kfree(aeqc); 3403 return -ENOMEM; 3404 } 3405 3406 ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0); 3407 dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE); 3408 kfree(aeqc); 3409 3410 return ret; 3411 } 3412 3413 static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm) 3414 { 3415 struct device *dev = &qm->pdev->dev; 3416 int ret; 3417 3418 qm_init_eq_aeq_status(qm); 3419 3420 ret = qm_eq_ctx_cfg(qm); 3421 if (ret) { 3422 dev_err(dev, "Set eqc failed!\n"); 3423 return ret; 3424 } 3425 3426 return qm_aeq_ctx_cfg(qm); 3427 } 3428 3429 static int __hisi_qm_start(struct hisi_qm *qm) 3430 { 3431 int ret; 3432 3433 WARN_ON(!qm->qdma.va); 3434 3435 if (qm->fun_type == QM_HW_PF) { 3436 ret = qm_dev_mem_reset(qm); 3437 if (ret) 3438 return ret; 3439 3440 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); 3441 if (ret) 3442 return ret; 3443 } 3444 3445 ret = qm_eq_aeq_ctx_cfg(qm); 3446 if (ret) 3447 return ret; 3448 3449 ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); 3450 if (ret) 3451 return ret; 3452 3453 ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); 3454 if (ret) 3455 return ret; 3456 3457 qm_init_prefetch(qm); 3458 3459 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); 3460 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); 3461 3462 return 0; 3463 } 3464 3465 /** 3466 * hisi_qm_start() - start qm 3467 * @qm: The qm to be started. 3468 * 3469 * This function starts a qm, then we can allocate qp from this qm. 3470 */ 3471 int hisi_qm_start(struct hisi_qm *qm) 3472 { 3473 struct device *dev = &qm->pdev->dev; 3474 int ret = 0; 3475 3476 down_write(&qm->qps_lock); 3477 3478 if (!qm_avail_state(qm, QM_START)) { 3479 up_write(&qm->qps_lock); 3480 return -EPERM; 3481 } 3482 3483 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num); 3484 3485 if (!qm->qp_num) { 3486 dev_err(dev, "qp_num should not be 0\n"); 3487 ret = -EINVAL; 3488 goto err_unlock; 3489 } 3490 3491 ret = __hisi_qm_start(qm); 3492 if (!ret) 3493 atomic_set(&qm->status.flags, QM_START); 3494 3495 err_unlock: 3496 up_write(&qm->qps_lock); 3497 return ret; 3498 } 3499 EXPORT_SYMBOL_GPL(hisi_qm_start); 3500 3501 static int qm_restart(struct hisi_qm *qm) 3502 { 3503 struct device *dev = &qm->pdev->dev; 3504 struct hisi_qp *qp; 3505 int ret, i; 3506 3507 ret = hisi_qm_start(qm); 3508 if (ret < 0) 3509 return ret; 3510 3511 down_write(&qm->qps_lock); 3512 for (i = 0; i < qm->qp_num; i++) { 3513 qp = &qm->qp_array[i]; 3514 if (atomic_read(&qp->qp_status.flags) == QP_STOP && 3515 qp->is_resetting == true) { 3516 ret = qm_start_qp_nolock(qp, 0); 3517 if (ret < 0) { 3518 dev_err(dev, "Failed to start qp%d!\n", i); 3519 3520 up_write(&qm->qps_lock); 3521 return ret; 3522 } 3523 qp->is_resetting = false; 3524 } 3525 } 3526 up_write(&qm->qps_lock); 3527 3528 return 0; 3529 } 3530 3531 /* Stop started qps in reset flow */ 3532 static int qm_stop_started_qp(struct hisi_qm *qm) 3533 { 3534 struct device *dev = &qm->pdev->dev; 3535 struct hisi_qp *qp; 3536 int i, ret; 3537 3538 for (i = 0; i < qm->qp_num; i++) { 3539 qp = &qm->qp_array[i]; 3540 if (qp && atomic_read(&qp->qp_status.flags) == QP_START) { 3541 qp->is_resetting = true; 3542 ret = qm_stop_qp_nolock(qp); 3543 if (ret < 0) { 3544 dev_err(dev, "Failed to stop qp%d!\n", i); 3545 return ret; 3546 } 3547 } 3548 } 3549 3550 return 0; 3551 } 3552 3553 3554 /** 3555 * qm_clear_queues() - Clear all queues memory in a qm. 3556 * @qm: The qm in which the queues will be cleared. 3557 * 3558 * This function clears all queues memory in a qm. Reset of accelerator can 3559 * use this to clear queues. 3560 */ 3561 static void qm_clear_queues(struct hisi_qm *qm) 3562 { 3563 struct hisi_qp *qp; 3564 int i; 3565 3566 for (i = 0; i < qm->qp_num; i++) { 3567 qp = &qm->qp_array[i]; 3568 if (qp->is_resetting) 3569 memset(qp->qdma.va, 0, qp->qdma.size); 3570 } 3571 3572 memset(qm->qdma.va, 0, qm->qdma.size); 3573 } 3574 3575 /** 3576 * hisi_qm_stop() - Stop a qm. 3577 * @qm: The qm which will be stopped. 3578 * @r: The reason to stop qm. 3579 * 3580 * This function stops qm and its qps, then qm can not accept request. 3581 * Related resources are not released at this state, we can use hisi_qm_start 3582 * to let qm start again. 3583 */ 3584 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) 3585 { 3586 struct device *dev = &qm->pdev->dev; 3587 int ret = 0; 3588 3589 down_write(&qm->qps_lock); 3590 3591 qm->status.stop_reason = r; 3592 if (!qm_avail_state(qm, QM_STOP)) { 3593 ret = -EPERM; 3594 goto err_unlock; 3595 } 3596 3597 if (qm->status.stop_reason == QM_SOFT_RESET || 3598 qm->status.stop_reason == QM_FLR) { 3599 ret = qm_stop_started_qp(qm); 3600 if (ret < 0) { 3601 dev_err(dev, "Failed to stop started qp!\n"); 3602 goto err_unlock; 3603 } 3604 } 3605 3606 /* Mask eq and aeq irq */ 3607 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); 3608 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); 3609 3610 if (qm->fun_type == QM_HW_PF) { 3611 ret = hisi_qm_set_vft(qm, 0, 0, 0); 3612 if (ret < 0) { 3613 dev_err(dev, "Failed to set vft!\n"); 3614 ret = -EBUSY; 3615 goto err_unlock; 3616 } 3617 } 3618 3619 qm_clear_queues(qm); 3620 atomic_set(&qm->status.flags, QM_STOP); 3621 3622 err_unlock: 3623 up_write(&qm->qps_lock); 3624 return ret; 3625 } 3626 EXPORT_SYMBOL_GPL(hisi_qm_stop); 3627 3628 static ssize_t qm_status_read(struct file *filp, char __user *buffer, 3629 size_t count, loff_t *pos) 3630 { 3631 struct hisi_qm *qm = filp->private_data; 3632 char buf[QM_DBG_READ_LEN]; 3633 int val, len; 3634 3635 val = atomic_read(&qm->status.flags); 3636 len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]); 3637 3638 return simple_read_from_buffer(buffer, count, pos, buf, len); 3639 } 3640 3641 static const struct file_operations qm_status_fops = { 3642 .owner = THIS_MODULE, 3643 .open = simple_open, 3644 .read = qm_status_read, 3645 }; 3646 3647 static int qm_debugfs_atomic64_set(void *data, u64 val) 3648 { 3649 if (val) 3650 return -EINVAL; 3651 3652 atomic64_set((atomic64_t *)data, 0); 3653 3654 return 0; 3655 } 3656 3657 static int qm_debugfs_atomic64_get(void *data, u64 *val) 3658 { 3659 *val = atomic64_read((atomic64_t *)data); 3660 3661 return 0; 3662 } 3663 3664 DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get, 3665 qm_debugfs_atomic64_set, "%llu\n"); 3666 3667 static void qm_hw_error_init(struct hisi_qm *qm) 3668 { 3669 struct hisi_qm_err_info *err_info = &qm->err_info; 3670 3671 if (!qm->ops->hw_error_init) { 3672 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); 3673 return; 3674 } 3675 3676 qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe); 3677 } 3678 3679 static void qm_hw_error_uninit(struct hisi_qm *qm) 3680 { 3681 if (!qm->ops->hw_error_uninit) { 3682 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n"); 3683 return; 3684 } 3685 3686 qm->ops->hw_error_uninit(qm); 3687 } 3688 3689 static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm) 3690 { 3691 if (!qm->ops->hw_error_handle) { 3692 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n"); 3693 return ACC_ERR_NONE; 3694 } 3695 3696 return qm->ops->hw_error_handle(qm); 3697 } 3698 3699 /** 3700 * hisi_qm_dev_err_init() - Initialize device error configuration. 3701 * @qm: The qm for which we want to do error initialization. 3702 * 3703 * Initialize QM and device error related configuration. 3704 */ 3705 void hisi_qm_dev_err_init(struct hisi_qm *qm) 3706 { 3707 if (qm->fun_type == QM_HW_VF) 3708 return; 3709 3710 qm_hw_error_init(qm); 3711 3712 if (!qm->err_ini->hw_err_enable) { 3713 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n"); 3714 return; 3715 } 3716 qm->err_ini->hw_err_enable(qm); 3717 } 3718 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init); 3719 3720 /** 3721 * hisi_qm_dev_err_uninit() - Uninitialize device error configuration. 3722 * @qm: The qm for which we want to do error uninitialization. 3723 * 3724 * Uninitialize QM and device error related configuration. 3725 */ 3726 void hisi_qm_dev_err_uninit(struct hisi_qm *qm) 3727 { 3728 if (qm->fun_type == QM_HW_VF) 3729 return; 3730 3731 qm_hw_error_uninit(qm); 3732 3733 if (!qm->err_ini->hw_err_disable) { 3734 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n"); 3735 return; 3736 } 3737 qm->err_ini->hw_err_disable(qm); 3738 } 3739 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit); 3740 3741 /** 3742 * hisi_qm_free_qps() - free multiple queue pairs. 3743 * @qps: The queue pairs need to be freed. 3744 * @qp_num: The num of queue pairs. 3745 */ 3746 void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num) 3747 { 3748 int i; 3749 3750 if (!qps || qp_num <= 0) 3751 return; 3752 3753 for (i = qp_num - 1; i >= 0; i--) 3754 hisi_qm_release_qp(qps[i]); 3755 } 3756 EXPORT_SYMBOL_GPL(hisi_qm_free_qps); 3757 3758 static void free_list(struct list_head *head) 3759 { 3760 struct hisi_qm_resource *res, *tmp; 3761 3762 list_for_each_entry_safe(res, tmp, head, list) { 3763 list_del(&res->list); 3764 kfree(res); 3765 } 3766 } 3767 3768 static int hisi_qm_sort_devices(int node, struct list_head *head, 3769 struct hisi_qm_list *qm_list) 3770 { 3771 struct hisi_qm_resource *res, *tmp; 3772 struct hisi_qm *qm; 3773 struct list_head *n; 3774 struct device *dev; 3775 int dev_node = 0; 3776 3777 list_for_each_entry(qm, &qm_list->list, list) { 3778 dev = &qm->pdev->dev; 3779 3780 if (IS_ENABLED(CONFIG_NUMA)) { 3781 dev_node = dev_to_node(dev); 3782 if (dev_node < 0) 3783 dev_node = 0; 3784 } 3785 3786 res = kzalloc(sizeof(*res), GFP_KERNEL); 3787 if (!res) 3788 return -ENOMEM; 3789 3790 res->qm = qm; 3791 res->distance = node_distance(dev_node, node); 3792 n = head; 3793 list_for_each_entry(tmp, head, list) { 3794 if (res->distance < tmp->distance) { 3795 n = &tmp->list; 3796 break; 3797 } 3798 } 3799 list_add_tail(&res->list, n); 3800 } 3801 3802 return 0; 3803 } 3804 3805 /** 3806 * hisi_qm_alloc_qps_node() - Create multiple queue pairs. 3807 * @qm_list: The list of all available devices. 3808 * @qp_num: The number of queue pairs need created. 3809 * @alg_type: The algorithm type. 3810 * @node: The numa node. 3811 * @qps: The queue pairs need created. 3812 * 3813 * This function will sort all available device according to numa distance. 3814 * Then try to create all queue pairs from one device, if all devices do 3815 * not meet the requirements will return error. 3816 */ 3817 int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, 3818 u8 alg_type, int node, struct hisi_qp **qps) 3819 { 3820 struct hisi_qm_resource *tmp; 3821 int ret = -ENODEV; 3822 LIST_HEAD(head); 3823 int i; 3824 3825 if (!qps || !qm_list || qp_num <= 0) 3826 return -EINVAL; 3827 3828 mutex_lock(&qm_list->lock); 3829 if (hisi_qm_sort_devices(node, &head, qm_list)) { 3830 mutex_unlock(&qm_list->lock); 3831 goto err; 3832 } 3833 3834 list_for_each_entry(tmp, &head, list) { 3835 for (i = 0; i < qp_num; i++) { 3836 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type); 3837 if (IS_ERR(qps[i])) { 3838 hisi_qm_free_qps(qps, i); 3839 break; 3840 } 3841 } 3842 3843 if (i == qp_num) { 3844 ret = 0; 3845 break; 3846 } 3847 } 3848 3849 mutex_unlock(&qm_list->lock); 3850 if (ret) 3851 pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n", 3852 node, alg_type, qp_num); 3853 3854 err: 3855 free_list(&head); 3856 return ret; 3857 } 3858 EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node); 3859 3860 static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs) 3861 { 3862 u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j; 3863 u32 max_qp_num = qm->max_qp_num; 3864 u32 q_base = qm->qp_num; 3865 int ret; 3866 3867 if (!num_vfs) 3868 return -EINVAL; 3869 3870 vfs_q_num = qm->ctrl_qp_num - qm->qp_num; 3871 3872 /* If vfs_q_num is less than num_vfs, return error. */ 3873 if (vfs_q_num < num_vfs) 3874 return -EINVAL; 3875 3876 q_num = vfs_q_num / num_vfs; 3877 remain_q_num = vfs_q_num % num_vfs; 3878 3879 for (i = num_vfs; i > 0; i--) { 3880 /* 3881 * if q_num + remain_q_num > max_qp_num in last vf, divide the 3882 * remaining queues equally. 3883 */ 3884 if (i == num_vfs && q_num + remain_q_num <= max_qp_num) { 3885 act_q_num = q_num + remain_q_num; 3886 remain_q_num = 0; 3887 } else if (remain_q_num > 0) { 3888 act_q_num = q_num + 1; 3889 remain_q_num--; 3890 } else { 3891 act_q_num = q_num; 3892 } 3893 3894 act_q_num = min_t(int, act_q_num, max_qp_num); 3895 ret = hisi_qm_set_vft(qm, i, q_base, act_q_num); 3896 if (ret) { 3897 for (j = num_vfs; j > i; j--) 3898 hisi_qm_set_vft(qm, j, 0, 0); 3899 return ret; 3900 } 3901 q_base += act_q_num; 3902 } 3903 3904 return 0; 3905 } 3906 3907 static int qm_clear_vft_config(struct hisi_qm *qm) 3908 { 3909 int ret; 3910 u32 i; 3911 3912 for (i = 1; i <= qm->vfs_num; i++) { 3913 ret = hisi_qm_set_vft(qm, i, 0, 0); 3914 if (ret) 3915 return ret; 3916 } 3917 qm->vfs_num = 0; 3918 3919 return 0; 3920 } 3921 3922 static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos) 3923 { 3924 struct device *dev = &qm->pdev->dev; 3925 u32 ir = qos * QM_QOS_RATE; 3926 int ret, total_vfs, i; 3927 3928 total_vfs = pci_sriov_get_totalvfs(qm->pdev); 3929 if (fun_index > total_vfs) 3930 return -EINVAL; 3931 3932 qm->factor[fun_index].func_qos = qos; 3933 3934 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]); 3935 if (ret) { 3936 dev_err(dev, "failed to calculate shaper parameter!\n"); 3937 return -EINVAL; 3938 } 3939 3940 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) { 3941 /* The base number of queue reuse for different alg type */ 3942 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1); 3943 if (ret) { 3944 dev_err(dev, "type: %d, failed to set shaper vft!\n", i); 3945 return -EINVAL; 3946 } 3947 } 3948 3949 return 0; 3950 } 3951 3952 static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index) 3953 { 3954 u64 cir_u = 0, cir_b = 0, cir_s = 0; 3955 u64 shaper_vft, ir_calc, ir; 3956 unsigned int val; 3957 u32 error_rate; 3958 int ret; 3959 3960 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 3961 val & BIT(0), POLL_PERIOD, 3962 POLL_TIMEOUT); 3963 if (ret) 3964 return 0; 3965 3966 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR); 3967 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE); 3968 writel(fun_index, qm->io_base + QM_VFT_CFG); 3969 3970 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); 3971 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); 3972 3973 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 3974 val & BIT(0), POLL_PERIOD, 3975 POLL_TIMEOUT); 3976 if (ret) 3977 return 0; 3978 3979 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) | 3980 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32); 3981 3982 cir_b = shaper_vft & QM_SHAPER_CIR_B_MASK; 3983 cir_u = shaper_vft & QM_SHAPER_CIR_U_MASK; 3984 cir_u = cir_u >> QM_SHAPER_FACTOR_CIR_U_SHIFT; 3985 3986 cir_s = shaper_vft & QM_SHAPER_CIR_S_MASK; 3987 cir_s = cir_s >> QM_SHAPER_FACTOR_CIR_S_SHIFT; 3988 3989 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s); 3990 3991 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE; 3992 3993 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; 3994 if (error_rate > QM_QOS_MIN_ERROR_RATE) { 3995 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate); 3996 return 0; 3997 } 3998 3999 return ir; 4000 } 4001 4002 static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num) 4003 { 4004 struct device *dev = &qm->pdev->dev; 4005 u64 mb_cmd; 4006 u32 qos; 4007 int ret; 4008 4009 qos = qm_get_shaper_vft_qos(qm, fun_num); 4010 if (!qos) { 4011 dev_err(dev, "function(%u) failed to get qos by PF!\n", fun_num); 4012 return; 4013 } 4014 4015 mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT; 4016 ret = qm_ping_single_vf(qm, mb_cmd, fun_num); 4017 if (ret) 4018 dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num); 4019 } 4020 4021 static int qm_vf_read_qos(struct hisi_qm *qm) 4022 { 4023 int cnt = 0; 4024 int ret; 4025 4026 /* reset mailbox qos val */ 4027 qm->mb_qos = 0; 4028 4029 /* vf ping pf to get function qos */ 4030 if (qm->ops->ping_pf) { 4031 ret = qm->ops->ping_pf(qm, QM_VF_GET_QOS); 4032 if (ret) { 4033 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n"); 4034 return ret; 4035 } 4036 } 4037 4038 while (true) { 4039 msleep(QM_WAIT_DST_ACK); 4040 if (qm->mb_qos) 4041 break; 4042 4043 if (++cnt > QM_MAX_VF_WAIT_COUNT) { 4044 pci_err(qm->pdev, "PF ping VF timeout!\n"); 4045 return -ETIMEDOUT; 4046 } 4047 } 4048 4049 return ret; 4050 } 4051 4052 static ssize_t qm_algqos_read(struct file *filp, char __user *buf, 4053 size_t count, loff_t *pos) 4054 { 4055 struct hisi_qm *qm = filp->private_data; 4056 char tbuf[QM_DBG_READ_LEN]; 4057 u32 qos_val, ir; 4058 int ret; 4059 4060 /* Mailbox and reset cannot be operated at the same time */ 4061 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 4062 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n"); 4063 return -EAGAIN; 4064 } 4065 4066 if (qm->fun_type == QM_HW_PF) { 4067 ir = qm_get_shaper_vft_qos(qm, 0); 4068 } else { 4069 ret = qm_vf_read_qos(qm); 4070 if (ret) 4071 goto err_get_status; 4072 ir = qm->mb_qos; 4073 } 4074 4075 qos_val = ir / QM_QOS_RATE; 4076 ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val); 4077 4078 ret = simple_read_from_buffer(buf, count, pos, tbuf, ret); 4079 4080 err_get_status: 4081 clear_bit(QM_RESETTING, &qm->misc_ctl); 4082 return ret; 4083 } 4084 4085 static ssize_t qm_qos_value_init(const char *buf, unsigned long *val) 4086 { 4087 int buflen = strlen(buf); 4088 int ret, i; 4089 4090 for (i = 0; i < buflen; i++) { 4091 if (!isdigit(buf[i])) 4092 return -EINVAL; 4093 } 4094 4095 ret = sscanf(buf, "%ld", val); 4096 if (ret != QM_QOS_VAL_NUM) 4097 return -EINVAL; 4098 4099 return 0; 4100 } 4101 4102 static ssize_t qm_algqos_write(struct file *filp, const char __user *buf, 4103 size_t count, loff_t *pos) 4104 { 4105 struct hisi_qm *qm = filp->private_data; 4106 char tbuf[QM_DBG_READ_LEN]; 4107 int tmp1, bus, device, function; 4108 char tbuf_bdf[QM_DBG_READ_LEN] = {0}; 4109 char val_buf[QM_QOS_VAL_MAX_LEN] = {0}; 4110 unsigned int fun_index; 4111 unsigned long val = 0; 4112 int len, ret; 4113 4114 if (qm->fun_type == QM_HW_VF) 4115 return -EINVAL; 4116 4117 /* Mailbox and reset cannot be operated at the same time */ 4118 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 4119 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n"); 4120 return -EAGAIN; 4121 } 4122 4123 if (*pos != 0) { 4124 ret = 0; 4125 goto err_get_status; 4126 } 4127 4128 if (count >= QM_DBG_READ_LEN) { 4129 ret = -ENOSPC; 4130 goto err_get_status; 4131 } 4132 4133 len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count); 4134 if (len < 0) { 4135 ret = len; 4136 goto err_get_status; 4137 } 4138 4139 tbuf[len] = '\0'; 4140 ret = sscanf(tbuf, "%s %s", tbuf_bdf, val_buf); 4141 if (ret != QM_QOS_PARAM_NUM) { 4142 ret = -EINVAL; 4143 goto err_get_status; 4144 } 4145 4146 ret = qm_qos_value_init(val_buf, &val); 4147 if (val == 0 || val > QM_QOS_MAX_VAL || ret) { 4148 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n"); 4149 ret = -EINVAL; 4150 goto err_get_status; 4151 } 4152 4153 ret = sscanf(tbuf_bdf, "%d:%x:%d.%d", &tmp1, &bus, &device, &function); 4154 if (ret != QM_QOS_BDF_PARAM_NUM) { 4155 pci_err(qm->pdev, "input pci bdf value is error!\n"); 4156 ret = -EINVAL; 4157 goto err_get_status; 4158 } 4159 4160 fun_index = device * 8 + function; 4161 4162 ret = qm_func_shaper_enable(qm, fun_index, val); 4163 if (ret) { 4164 pci_err(qm->pdev, "failed to enable function shaper!\n"); 4165 ret = -EINVAL; 4166 goto err_get_status; 4167 } 4168 4169 ret = count; 4170 4171 err_get_status: 4172 clear_bit(QM_RESETTING, &qm->misc_ctl); 4173 return ret; 4174 } 4175 4176 static const struct file_operations qm_algqos_fops = { 4177 .owner = THIS_MODULE, 4178 .open = simple_open, 4179 .read = qm_algqos_read, 4180 .write = qm_algqos_write, 4181 }; 4182 4183 /** 4184 * hisi_qm_set_algqos_init() - Initialize function qos debugfs files. 4185 * @qm: The qm for which we want to add debugfs files. 4186 * 4187 * Create function qos debugfs files. 4188 */ 4189 static void hisi_qm_set_algqos_init(struct hisi_qm *qm) 4190 { 4191 if (qm->fun_type == QM_HW_PF) 4192 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root, 4193 qm, &qm_algqos_fops); 4194 else 4195 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root, 4196 qm, &qm_algqos_fops); 4197 } 4198 4199 /** 4200 * hisi_qm_debug_init() - Initialize qm related debugfs files. 4201 * @qm: The qm for which we want to add debugfs files. 4202 * 4203 * Create qm related debugfs files. 4204 */ 4205 void hisi_qm_debug_init(struct hisi_qm *qm) 4206 { 4207 struct qm_dfx *dfx = &qm->debug.dfx; 4208 struct dentry *qm_d; 4209 void *data; 4210 int i; 4211 4212 qm_d = debugfs_create_dir("qm", qm->debug.debug_root); 4213 qm->debug.qm_d = qm_d; 4214 4215 /* only show this in PF */ 4216 if (qm->fun_type == QM_HW_PF) { 4217 qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM); 4218 for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++) 4219 qm_create_debugfs_file(qm, qm->debug.qm_d, i); 4220 } 4221 4222 debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); 4223 4224 debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops); 4225 4226 debugfs_create_file("status", 0444, qm->debug.qm_d, qm, 4227 &qm_status_fops); 4228 for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) { 4229 data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset); 4230 debugfs_create_file(qm_dfx_files[i].name, 4231 0644, 4232 qm_d, 4233 data, 4234 &qm_atomic64_ops); 4235 } 4236 4237 if (qm->ver >= QM_HW_V3) 4238 hisi_qm_set_algqos_init(qm); 4239 } 4240 EXPORT_SYMBOL_GPL(hisi_qm_debug_init); 4241 4242 /** 4243 * hisi_qm_debug_regs_clear() - clear qm debug related registers. 4244 * @qm: The qm for which we want to clear its debug registers. 4245 */ 4246 void hisi_qm_debug_regs_clear(struct hisi_qm *qm) 4247 { 4248 struct qm_dfx_registers *regs; 4249 int i; 4250 4251 /* clear current_qm */ 4252 writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); 4253 writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); 4254 4255 /* clear current_q */ 4256 writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); 4257 writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); 4258 4259 /* 4260 * these registers are reading and clearing, so clear them after 4261 * reading them. 4262 */ 4263 writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE); 4264 4265 regs = qm_dfx_regs; 4266 for (i = 0; i < CNT_CYC_REGS_NUM; i++) { 4267 readl(qm->io_base + regs->reg_offset); 4268 regs++; 4269 } 4270 4271 /* clear clear_enable */ 4272 writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE); 4273 } 4274 EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear); 4275 4276 /** 4277 * hisi_qm_sriov_enable() - enable virtual functions 4278 * @pdev: the PCIe device 4279 * @max_vfs: the number of virtual functions to enable 4280 * 4281 * Returns the number of enabled VFs. If there are VFs enabled already or 4282 * max_vfs is more than the total number of device can be enabled, returns 4283 * failure. 4284 */ 4285 int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs) 4286 { 4287 struct hisi_qm *qm = pci_get_drvdata(pdev); 4288 int pre_existing_vfs, num_vfs, total_vfs, ret; 4289 4290 total_vfs = pci_sriov_get_totalvfs(pdev); 4291 pre_existing_vfs = pci_num_vf(pdev); 4292 if (pre_existing_vfs) { 4293 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n", 4294 pre_existing_vfs); 4295 return 0; 4296 } 4297 4298 num_vfs = min_t(int, max_vfs, total_vfs); 4299 ret = qm_vf_q_assign(qm, num_vfs); 4300 if (ret) { 4301 pci_err(pdev, "Can't assign queues for VF!\n"); 4302 return ret; 4303 } 4304 4305 qm->vfs_num = num_vfs; 4306 4307 ret = pci_enable_sriov(pdev, num_vfs); 4308 if (ret) { 4309 pci_err(pdev, "Can't enable VF!\n"); 4310 qm_clear_vft_config(qm); 4311 return ret; 4312 } 4313 4314 pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs); 4315 4316 return num_vfs; 4317 } 4318 EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable); 4319 4320 /** 4321 * hisi_qm_sriov_disable - disable virtual functions 4322 * @pdev: the PCI device. 4323 * @is_frozen: true when all the VFs are frozen. 4324 * 4325 * Return failure if there are VFs assigned already or VF is in used. 4326 */ 4327 int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen) 4328 { 4329 struct hisi_qm *qm = pci_get_drvdata(pdev); 4330 int total_vfs = pci_sriov_get_totalvfs(qm->pdev); 4331 4332 if (pci_vfs_assigned(pdev)) { 4333 pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n"); 4334 return -EPERM; 4335 } 4336 4337 /* While VF is in used, SRIOV cannot be disabled. */ 4338 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) { 4339 pci_err(pdev, "Task is using its VF!\n"); 4340 return -EBUSY; 4341 } 4342 4343 pci_disable_sriov(pdev); 4344 /* clear vf function shaper configure array */ 4345 memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs); 4346 4347 return qm_clear_vft_config(qm); 4348 } 4349 EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable); 4350 4351 /** 4352 * hisi_qm_sriov_configure - configure the number of VFs 4353 * @pdev: The PCI device 4354 * @num_vfs: The number of VFs need enabled 4355 * 4356 * Enable SR-IOV according to num_vfs, 0 means disable. 4357 */ 4358 int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs) 4359 { 4360 if (num_vfs == 0) 4361 return hisi_qm_sriov_disable(pdev, false); 4362 else 4363 return hisi_qm_sriov_enable(pdev, num_vfs); 4364 } 4365 EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure); 4366 4367 static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) 4368 { 4369 u32 err_sts; 4370 4371 if (!qm->err_ini->get_dev_hw_err_status) { 4372 dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n"); 4373 return ACC_ERR_NONE; 4374 } 4375 4376 /* get device hardware error status */ 4377 err_sts = qm->err_ini->get_dev_hw_err_status(qm); 4378 if (err_sts) { 4379 if (err_sts & qm->err_info.ecc_2bits_mask) 4380 qm->err_status.is_dev_ecc_mbit = true; 4381 4382 if (qm->err_ini->log_dev_hw_err) 4383 qm->err_ini->log_dev_hw_err(qm, err_sts); 4384 4385 /* ce error does not need to be reset */ 4386 if ((err_sts | qm->err_info.dev_ce_mask) == 4387 qm->err_info.dev_ce_mask) { 4388 if (qm->err_ini->clear_dev_hw_err_status) 4389 qm->err_ini->clear_dev_hw_err_status(qm, 4390 err_sts); 4391 4392 return ACC_ERR_RECOVERED; 4393 } 4394 4395 return ACC_ERR_NEED_RESET; 4396 } 4397 4398 return ACC_ERR_RECOVERED; 4399 } 4400 4401 static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm) 4402 { 4403 enum acc_err_result qm_ret, dev_ret; 4404 4405 /* log qm error */ 4406 qm_ret = qm_hw_error_handle(qm); 4407 4408 /* log device error */ 4409 dev_ret = qm_dev_err_handle(qm); 4410 4411 return (qm_ret == ACC_ERR_NEED_RESET || 4412 dev_ret == ACC_ERR_NEED_RESET) ? 4413 ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED; 4414 } 4415 4416 /** 4417 * hisi_qm_dev_err_detected() - Get device and qm error status then log it. 4418 * @pdev: The PCI device which need report error. 4419 * @state: The connectivity between CPU and device. 4420 * 4421 * We register this function into PCIe AER handlers, It will report device or 4422 * qm hardware error status when error occur. 4423 */ 4424 pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, 4425 pci_channel_state_t state) 4426 { 4427 struct hisi_qm *qm = pci_get_drvdata(pdev); 4428 enum acc_err_result ret; 4429 4430 if (pdev->is_virtfn) 4431 return PCI_ERS_RESULT_NONE; 4432 4433 pci_info(pdev, "PCI error detected, state(=%u)!!\n", state); 4434 if (state == pci_channel_io_perm_failure) 4435 return PCI_ERS_RESULT_DISCONNECT; 4436 4437 ret = qm_process_dev_error(qm); 4438 if (ret == ACC_ERR_NEED_RESET) 4439 return PCI_ERS_RESULT_NEED_RESET; 4440 4441 return PCI_ERS_RESULT_RECOVERED; 4442 } 4443 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected); 4444 4445 static int qm_check_req_recv(struct hisi_qm *qm) 4446 { 4447 struct pci_dev *pdev = qm->pdev; 4448 int ret; 4449 u32 val; 4450 4451 if (qm->ver >= QM_HW_V3) 4452 return 0; 4453 4454 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID); 4455 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, 4456 (val == ACC_VENDOR_ID_VALUE), 4457 POLL_PERIOD, POLL_TIMEOUT); 4458 if (ret) { 4459 dev_err(&pdev->dev, "Fails to read QM reg!\n"); 4460 return ret; 4461 } 4462 4463 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID); 4464 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, 4465 (val == PCI_VENDOR_ID_HUAWEI), 4466 POLL_PERIOD, POLL_TIMEOUT); 4467 if (ret) 4468 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n"); 4469 4470 return ret; 4471 } 4472 4473 static int qm_set_pf_mse(struct hisi_qm *qm, bool set) 4474 { 4475 struct pci_dev *pdev = qm->pdev; 4476 u16 cmd; 4477 int i; 4478 4479 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 4480 if (set) 4481 cmd |= PCI_COMMAND_MEMORY; 4482 else 4483 cmd &= ~PCI_COMMAND_MEMORY; 4484 4485 pci_write_config_word(pdev, PCI_COMMAND, cmd); 4486 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 4487 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 4488 if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1)) 4489 return 0; 4490 4491 udelay(1); 4492 } 4493 4494 return -ETIMEDOUT; 4495 } 4496 4497 static int qm_set_vf_mse(struct hisi_qm *qm, bool set) 4498 { 4499 struct pci_dev *pdev = qm->pdev; 4500 u16 sriov_ctrl; 4501 int pos; 4502 int i; 4503 4504 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 4505 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); 4506 if (set) 4507 sriov_ctrl |= PCI_SRIOV_CTRL_MSE; 4508 else 4509 sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE; 4510 pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl); 4511 4512 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 4513 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); 4514 if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >> 4515 ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT) 4516 return 0; 4517 4518 udelay(1); 4519 } 4520 4521 return -ETIMEDOUT; 4522 } 4523 4524 static int qm_vf_reset_prepare(struct hisi_qm *qm, 4525 enum qm_stop_reason stop_reason) 4526 { 4527 struct hisi_qm_list *qm_list = qm->qm_list; 4528 struct pci_dev *pdev = qm->pdev; 4529 struct pci_dev *virtfn; 4530 struct hisi_qm *vf_qm; 4531 int ret = 0; 4532 4533 mutex_lock(&qm_list->lock); 4534 list_for_each_entry(vf_qm, &qm_list->list, list) { 4535 virtfn = vf_qm->pdev; 4536 if (virtfn == pdev) 4537 continue; 4538 4539 if (pci_physfn(virtfn) == pdev) { 4540 /* save VFs PCIE BAR configuration */ 4541 pci_save_state(virtfn); 4542 4543 ret = hisi_qm_stop(vf_qm, stop_reason); 4544 if (ret) 4545 goto stop_fail; 4546 } 4547 } 4548 4549 stop_fail: 4550 mutex_unlock(&qm_list->lock); 4551 return ret; 4552 } 4553 4554 static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd, 4555 enum qm_stop_reason stop_reason) 4556 { 4557 struct pci_dev *pdev = qm->pdev; 4558 int ret; 4559 4560 if (!qm->vfs_num) 4561 return 0; 4562 4563 /* Kunpeng930 supports to notify VFs to stop before PF reset */ 4564 if (qm->ops->ping_all_vfs) { 4565 ret = qm->ops->ping_all_vfs(qm, cmd); 4566 if (ret) 4567 pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n"); 4568 } else { 4569 ret = qm_vf_reset_prepare(qm, stop_reason); 4570 if (ret) 4571 pci_err(pdev, "failed to prepare reset, ret = %d.\n", ret); 4572 } 4573 4574 return ret; 4575 } 4576 4577 static int qm_wait_reset_finish(struct hisi_qm *qm) 4578 { 4579 int delay = 0; 4580 4581 /* All reset requests need to be queued for processing */ 4582 while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 4583 msleep(++delay); 4584 if (delay > QM_RESET_WAIT_TIMEOUT) 4585 return -EBUSY; 4586 } 4587 4588 return 0; 4589 } 4590 4591 static int qm_reset_prepare_ready(struct hisi_qm *qm) 4592 { 4593 struct pci_dev *pdev = qm->pdev; 4594 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 4595 4596 /* 4597 * PF and VF on host doesnot support resetting at the 4598 * same time on Kunpeng920. 4599 */ 4600 if (qm->ver < QM_HW_V3) 4601 return qm_wait_reset_finish(pf_qm); 4602 4603 return qm_wait_reset_finish(qm); 4604 } 4605 4606 static void qm_reset_bit_clear(struct hisi_qm *qm) 4607 { 4608 struct pci_dev *pdev = qm->pdev; 4609 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 4610 4611 if (qm->ver < QM_HW_V3) 4612 clear_bit(QM_RESETTING, &pf_qm->misc_ctl); 4613 4614 clear_bit(QM_RESETTING, &qm->misc_ctl); 4615 } 4616 4617 static int qm_controller_reset_prepare(struct hisi_qm *qm) 4618 { 4619 struct pci_dev *pdev = qm->pdev; 4620 int ret; 4621 4622 ret = qm_reset_prepare_ready(qm); 4623 if (ret) { 4624 pci_err(pdev, "Controller reset not ready!\n"); 4625 return ret; 4626 } 4627 4628 /* PF obtains the information of VF by querying the register. */ 4629 qm_cmd_uninit(qm); 4630 4631 /* Whether VFs stop successfully, soft reset will continue. */ 4632 ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET); 4633 if (ret) 4634 pci_err(pdev, "failed to stop vfs by pf in soft reset.\n"); 4635 4636 ret = hisi_qm_stop(qm, QM_SOFT_RESET); 4637 if (ret) { 4638 pci_err(pdev, "Fails to stop QM!\n"); 4639 qm_reset_bit_clear(qm); 4640 return ret; 4641 } 4642 4643 ret = qm_wait_vf_prepare_finish(qm); 4644 if (ret) 4645 pci_err(pdev, "failed to stop by vfs in soft reset!\n"); 4646 4647 clear_bit(QM_RST_SCHED, &qm->misc_ctl); 4648 4649 return 0; 4650 } 4651 4652 static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) 4653 { 4654 u32 nfe_enb = 0; 4655 4656 /* Kunpeng930 hardware automatically close master ooo when NFE occurs */ 4657 if (qm->ver >= QM_HW_V3) 4658 return; 4659 4660 if (!qm->err_status.is_dev_ecc_mbit && 4661 qm->err_status.is_qm_ecc_mbit && 4662 qm->err_ini->close_axi_master_ooo) { 4663 4664 qm->err_ini->close_axi_master_ooo(qm); 4665 4666 } else if (qm->err_status.is_dev_ecc_mbit && 4667 !qm->err_status.is_qm_ecc_mbit && 4668 !qm->err_ini->close_axi_master_ooo) { 4669 4670 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); 4671 writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, 4672 qm->io_base + QM_RAS_NFE_ENABLE); 4673 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); 4674 } 4675 } 4676 4677 static int qm_soft_reset(struct hisi_qm *qm) 4678 { 4679 struct pci_dev *pdev = qm->pdev; 4680 int ret; 4681 u32 val; 4682 4683 /* Ensure all doorbells and mailboxes received by QM */ 4684 ret = qm_check_req_recv(qm); 4685 if (ret) 4686 return ret; 4687 4688 if (qm->vfs_num) { 4689 ret = qm_set_vf_mse(qm, false); 4690 if (ret) { 4691 pci_err(pdev, "Fails to disable vf MSE bit.\n"); 4692 return ret; 4693 } 4694 } 4695 4696 ret = qm->ops->set_msi(qm, false); 4697 if (ret) { 4698 pci_err(pdev, "Fails to disable PEH MSI bit.\n"); 4699 return ret; 4700 } 4701 4702 qm_dev_ecc_mbit_handle(qm); 4703 4704 /* OOO register set and check */ 4705 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, 4706 qm->io_base + ACC_MASTER_GLOBAL_CTRL); 4707 4708 /* If bus lock, reset chip */ 4709 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, 4710 val, 4711 (val == ACC_MASTER_TRANS_RETURN_RW), 4712 POLL_PERIOD, POLL_TIMEOUT); 4713 if (ret) { 4714 pci_emerg(pdev, "Bus lock! Please reset system.\n"); 4715 return ret; 4716 } 4717 4718 if (qm->err_ini->close_sva_prefetch) 4719 qm->err_ini->close_sva_prefetch(qm); 4720 4721 ret = qm_set_pf_mse(qm, false); 4722 if (ret) { 4723 pci_err(pdev, "Fails to disable pf MSE bit.\n"); 4724 return ret; 4725 } 4726 4727 /* The reset related sub-control registers are not in PCI BAR */ 4728 if (ACPI_HANDLE(&pdev->dev)) { 4729 unsigned long long value = 0; 4730 acpi_status s; 4731 4732 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), 4733 qm->err_info.acpi_rst, 4734 NULL, &value); 4735 if (ACPI_FAILURE(s)) { 4736 pci_err(pdev, "NO controller reset method!\n"); 4737 return -EIO; 4738 } 4739 4740 if (value) { 4741 pci_err(pdev, "Reset step %llu failed!\n", value); 4742 return -EIO; 4743 } 4744 } else { 4745 pci_err(pdev, "No reset method!\n"); 4746 return -EINVAL; 4747 } 4748 4749 return 0; 4750 } 4751 4752 static int qm_vf_reset_done(struct hisi_qm *qm) 4753 { 4754 struct hisi_qm_list *qm_list = qm->qm_list; 4755 struct pci_dev *pdev = qm->pdev; 4756 struct pci_dev *virtfn; 4757 struct hisi_qm *vf_qm; 4758 int ret = 0; 4759 4760 mutex_lock(&qm_list->lock); 4761 list_for_each_entry(vf_qm, &qm_list->list, list) { 4762 virtfn = vf_qm->pdev; 4763 if (virtfn == pdev) 4764 continue; 4765 4766 if (pci_physfn(virtfn) == pdev) { 4767 /* enable VFs PCIE BAR configuration */ 4768 pci_restore_state(virtfn); 4769 4770 ret = qm_restart(vf_qm); 4771 if (ret) 4772 goto restart_fail; 4773 } 4774 } 4775 4776 restart_fail: 4777 mutex_unlock(&qm_list->lock); 4778 return ret; 4779 } 4780 4781 static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd) 4782 { 4783 struct pci_dev *pdev = qm->pdev; 4784 int ret; 4785 4786 if (!qm->vfs_num) 4787 return 0; 4788 4789 ret = qm_vf_q_assign(qm, qm->vfs_num); 4790 if (ret) { 4791 pci_err(pdev, "failed to assign VFs, ret = %d.\n", ret); 4792 return ret; 4793 } 4794 4795 /* Kunpeng930 supports to notify VFs to start after PF reset. */ 4796 if (qm->ops->ping_all_vfs) { 4797 ret = qm->ops->ping_all_vfs(qm, cmd); 4798 if (ret) 4799 pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n"); 4800 } else { 4801 ret = qm_vf_reset_done(qm); 4802 if (ret) 4803 pci_warn(pdev, "failed to start vfs, ret = %d.\n", ret); 4804 } 4805 4806 return ret; 4807 } 4808 4809 static int qm_dev_hw_init(struct hisi_qm *qm) 4810 { 4811 return qm->err_ini->hw_init(qm); 4812 } 4813 4814 static void qm_restart_prepare(struct hisi_qm *qm) 4815 { 4816 u32 value; 4817 4818 if (qm->err_ini->open_sva_prefetch) 4819 qm->err_ini->open_sva_prefetch(qm); 4820 4821 if (qm->ver >= QM_HW_V3) 4822 return; 4823 4824 if (!qm->err_status.is_qm_ecc_mbit && 4825 !qm->err_status.is_dev_ecc_mbit) 4826 return; 4827 4828 /* temporarily close the OOO port used for PEH to write out MSI */ 4829 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4830 writel(value & ~qm->err_info.msi_wr_port, 4831 qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4832 4833 /* clear dev ecc 2bit error source if having */ 4834 value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask; 4835 if (value && qm->err_ini->clear_dev_hw_err_status) 4836 qm->err_ini->clear_dev_hw_err_status(qm, value); 4837 4838 /* clear QM ecc mbit error source */ 4839 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); 4840 4841 /* clear AM Reorder Buffer ecc mbit source */ 4842 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS); 4843 } 4844 4845 static void qm_restart_done(struct hisi_qm *qm) 4846 { 4847 u32 value; 4848 4849 if (qm->ver >= QM_HW_V3) 4850 goto clear_flags; 4851 4852 if (!qm->err_status.is_qm_ecc_mbit && 4853 !qm->err_status.is_dev_ecc_mbit) 4854 return; 4855 4856 /* open the OOO port for PEH to write out MSI */ 4857 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4858 value |= qm->err_info.msi_wr_port; 4859 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4860 4861 clear_flags: 4862 qm->err_status.is_qm_ecc_mbit = false; 4863 qm->err_status.is_dev_ecc_mbit = false; 4864 } 4865 4866 static int qm_controller_reset_done(struct hisi_qm *qm) 4867 { 4868 struct pci_dev *pdev = qm->pdev; 4869 int ret; 4870 4871 ret = qm->ops->set_msi(qm, true); 4872 if (ret) { 4873 pci_err(pdev, "Fails to enable PEH MSI bit!\n"); 4874 return ret; 4875 } 4876 4877 ret = qm_set_pf_mse(qm, true); 4878 if (ret) { 4879 pci_err(pdev, "Fails to enable pf MSE bit!\n"); 4880 return ret; 4881 } 4882 4883 if (qm->vfs_num) { 4884 ret = qm_set_vf_mse(qm, true); 4885 if (ret) { 4886 pci_err(pdev, "Fails to enable vf MSE bit!\n"); 4887 return ret; 4888 } 4889 } 4890 4891 ret = qm_dev_hw_init(qm); 4892 if (ret) { 4893 pci_err(pdev, "Failed to init device\n"); 4894 return ret; 4895 } 4896 4897 qm_restart_prepare(qm); 4898 hisi_qm_dev_err_init(qm); 4899 if (qm->err_ini->open_axi_master_ooo) 4900 qm->err_ini->open_axi_master_ooo(qm); 4901 4902 ret = qm_restart(qm); 4903 if (ret) { 4904 pci_err(pdev, "Failed to start QM!\n"); 4905 return ret; 4906 } 4907 4908 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE); 4909 if (ret) 4910 pci_err(pdev, "failed to start vfs by pf in soft reset.\n"); 4911 4912 ret = qm_wait_vf_prepare_finish(qm); 4913 if (ret) 4914 pci_err(pdev, "failed to start by vfs in soft reset!\n"); 4915 4916 qm_cmd_init(qm); 4917 qm_restart_done(qm); 4918 4919 qm_reset_bit_clear(qm); 4920 4921 return 0; 4922 } 4923 4924 static int qm_controller_reset(struct hisi_qm *qm) 4925 { 4926 struct pci_dev *pdev = qm->pdev; 4927 int ret; 4928 4929 pci_info(pdev, "Controller resetting...\n"); 4930 4931 ret = qm_controller_reset_prepare(qm); 4932 if (ret) { 4933 clear_bit(QM_RST_SCHED, &qm->misc_ctl); 4934 return ret; 4935 } 4936 4937 ret = qm_soft_reset(qm); 4938 if (ret) { 4939 pci_err(pdev, "Controller reset failed (%d)\n", ret); 4940 qm_reset_bit_clear(qm); 4941 return ret; 4942 } 4943 4944 ret = qm_controller_reset_done(qm); 4945 if (ret) { 4946 qm_reset_bit_clear(qm); 4947 return ret; 4948 } 4949 4950 pci_info(pdev, "Controller reset complete\n"); 4951 4952 return 0; 4953 } 4954 4955 /** 4956 * hisi_qm_dev_slot_reset() - slot reset 4957 * @pdev: the PCIe device 4958 * 4959 * This function offers QM relate PCIe device reset interface. Drivers which 4960 * use QM can use this function as slot_reset in its struct pci_error_handlers. 4961 */ 4962 pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev) 4963 { 4964 struct hisi_qm *qm = pci_get_drvdata(pdev); 4965 int ret; 4966 4967 if (pdev->is_virtfn) 4968 return PCI_ERS_RESULT_RECOVERED; 4969 4970 pci_aer_clear_nonfatal_status(pdev); 4971 4972 /* reset pcie device controller */ 4973 ret = qm_controller_reset(qm); 4974 if (ret) { 4975 pci_err(pdev, "Controller reset failed (%d)\n", ret); 4976 return PCI_ERS_RESULT_DISCONNECT; 4977 } 4978 4979 return PCI_ERS_RESULT_RECOVERED; 4980 } 4981 EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset); 4982 4983 void hisi_qm_reset_prepare(struct pci_dev *pdev) 4984 { 4985 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 4986 struct hisi_qm *qm = pci_get_drvdata(pdev); 4987 u32 delay = 0; 4988 int ret; 4989 4990 hisi_qm_dev_err_uninit(pf_qm); 4991 4992 /* 4993 * Check whether there is an ECC mbit error, If it occurs, need to 4994 * wait for soft reset to fix it. 4995 */ 4996 while (qm_check_dev_error(pf_qm)) { 4997 msleep(++delay); 4998 if (delay > QM_RESET_WAIT_TIMEOUT) 4999 return; 5000 } 5001 5002 ret = qm_reset_prepare_ready(qm); 5003 if (ret) { 5004 pci_err(pdev, "FLR not ready!\n"); 5005 return; 5006 } 5007 5008 /* PF obtains the information of VF by querying the register. */ 5009 if (qm->fun_type == QM_HW_PF) 5010 qm_cmd_uninit(qm); 5011 5012 ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_FLR); 5013 if (ret) 5014 pci_err(pdev, "failed to stop vfs by pf in FLR.\n"); 5015 5016 ret = hisi_qm_stop(qm, QM_FLR); 5017 if (ret) { 5018 pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret); 5019 return; 5020 } 5021 5022 ret = qm_wait_vf_prepare_finish(qm); 5023 if (ret) 5024 pci_err(pdev, "failed to stop by vfs in FLR!\n"); 5025 5026 pci_info(pdev, "FLR resetting...\n"); 5027 } 5028 EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare); 5029 5030 static bool qm_flr_reset_complete(struct pci_dev *pdev) 5031 { 5032 struct pci_dev *pf_pdev = pci_physfn(pdev); 5033 struct hisi_qm *qm = pci_get_drvdata(pf_pdev); 5034 u32 id; 5035 5036 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id); 5037 if (id == QM_PCI_COMMAND_INVALID) { 5038 pci_err(pdev, "Device can not be used!\n"); 5039 return false; 5040 } 5041 5042 return true; 5043 } 5044 5045 void hisi_qm_reset_done(struct pci_dev *pdev) 5046 { 5047 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 5048 struct hisi_qm *qm = pci_get_drvdata(pdev); 5049 int ret; 5050 5051 if (qm->fun_type == QM_HW_PF) { 5052 ret = qm_dev_hw_init(qm); 5053 if (ret) { 5054 pci_err(pdev, "Failed to init PF, ret = %d.\n", ret); 5055 goto flr_done; 5056 } 5057 } 5058 5059 hisi_qm_dev_err_init(pf_qm); 5060 5061 ret = qm_restart(qm); 5062 if (ret) { 5063 pci_err(pdev, "Failed to start QM, ret = %d.\n", ret); 5064 goto flr_done; 5065 } 5066 5067 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE); 5068 if (ret) 5069 pci_err(pdev, "failed to start vfs by pf in FLR.\n"); 5070 5071 ret = qm_wait_vf_prepare_finish(qm); 5072 if (ret) 5073 pci_err(pdev, "failed to start by vfs in FLR!\n"); 5074 5075 flr_done: 5076 if (qm->fun_type == QM_HW_PF) 5077 qm_cmd_init(qm); 5078 5079 if (qm_flr_reset_complete(pdev)) 5080 pci_info(pdev, "FLR reset complete\n"); 5081 5082 qm_reset_bit_clear(qm); 5083 } 5084 EXPORT_SYMBOL_GPL(hisi_qm_reset_done); 5085 5086 static irqreturn_t qm_abnormal_irq(int irq, void *data) 5087 { 5088 struct hisi_qm *qm = data; 5089 enum acc_err_result ret; 5090 5091 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt); 5092 ret = qm_process_dev_error(qm); 5093 if (ret == ACC_ERR_NEED_RESET && 5094 !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) && 5095 !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl)) 5096 schedule_work(&qm->rst_work); 5097 5098 return IRQ_HANDLED; 5099 } 5100 5101 static int qm_irq_register(struct hisi_qm *qm) 5102 { 5103 struct pci_dev *pdev = qm->pdev; 5104 int ret; 5105 5106 ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), 5107 qm_irq, 0, qm->dev_name, qm); 5108 if (ret) 5109 return ret; 5110 5111 if (qm->ver > QM_HW_V1) { 5112 ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), 5113 qm_aeq_irq, 0, qm->dev_name, qm); 5114 if (ret) 5115 goto err_aeq_irq; 5116 5117 if (qm->fun_type == QM_HW_PF) { 5118 ret = request_irq(pci_irq_vector(pdev, 5119 QM_ABNORMAL_EVENT_IRQ_VECTOR), 5120 qm_abnormal_irq, 0, qm->dev_name, qm); 5121 if (ret) 5122 goto err_abonormal_irq; 5123 } 5124 } 5125 5126 if (qm->ver > QM_HW_V2) { 5127 ret = request_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR), 5128 qm_mb_cmd_irq, 0, qm->dev_name, qm); 5129 if (ret) 5130 goto err_mb_cmd_irq; 5131 } 5132 5133 return 0; 5134 5135 err_mb_cmd_irq: 5136 if (qm->fun_type == QM_HW_PF) 5137 free_irq(pci_irq_vector(pdev, QM_ABNORMAL_EVENT_IRQ_VECTOR), qm); 5138 err_abonormal_irq: 5139 free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); 5140 err_aeq_irq: 5141 free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); 5142 return ret; 5143 } 5144 5145 /** 5146 * hisi_qm_dev_shutdown() - Shutdown device. 5147 * @pdev: The device will be shutdown. 5148 * 5149 * This function will stop qm when OS shutdown or rebooting. 5150 */ 5151 void hisi_qm_dev_shutdown(struct pci_dev *pdev) 5152 { 5153 struct hisi_qm *qm = pci_get_drvdata(pdev); 5154 int ret; 5155 5156 ret = hisi_qm_stop(qm, QM_NORMAL); 5157 if (ret) 5158 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n"); 5159 } 5160 EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown); 5161 5162 static void hisi_qm_controller_reset(struct work_struct *rst_work) 5163 { 5164 struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work); 5165 int ret; 5166 5167 /* reset pcie device controller */ 5168 ret = qm_controller_reset(qm); 5169 if (ret) 5170 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret); 5171 5172 } 5173 5174 static void qm_pf_reset_vf_prepare(struct hisi_qm *qm, 5175 enum qm_stop_reason stop_reason) 5176 { 5177 enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE; 5178 struct pci_dev *pdev = qm->pdev; 5179 int ret; 5180 5181 ret = qm_reset_prepare_ready(qm); 5182 if (ret) { 5183 dev_err(&pdev->dev, "reset prepare not ready!\n"); 5184 atomic_set(&qm->status.flags, QM_STOP); 5185 cmd = QM_VF_PREPARE_FAIL; 5186 goto err_prepare; 5187 } 5188 5189 ret = hisi_qm_stop(qm, stop_reason); 5190 if (ret) { 5191 dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret); 5192 atomic_set(&qm->status.flags, QM_STOP); 5193 cmd = QM_VF_PREPARE_FAIL; 5194 goto err_prepare; 5195 } 5196 5197 err_prepare: 5198 pci_save_state(pdev); 5199 ret = qm->ops->ping_pf(qm, cmd); 5200 if (ret) 5201 dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n"); 5202 } 5203 5204 static void qm_pf_reset_vf_done(struct hisi_qm *qm) 5205 { 5206 enum qm_mb_cmd cmd = QM_VF_START_DONE; 5207 struct pci_dev *pdev = qm->pdev; 5208 int ret; 5209 5210 pci_restore_state(pdev); 5211 ret = hisi_qm_start(qm); 5212 if (ret) { 5213 dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret); 5214 cmd = QM_VF_START_FAIL; 5215 } 5216 5217 ret = qm->ops->ping_pf(qm, cmd); 5218 if (ret) 5219 dev_warn(&pdev->dev, "PF responds timeout in reset done!\n"); 5220 5221 qm_reset_bit_clear(qm); 5222 } 5223 5224 static int qm_wait_pf_reset_finish(struct hisi_qm *qm) 5225 { 5226 struct device *dev = &qm->pdev->dev; 5227 u32 val, cmd; 5228 u64 msg; 5229 int ret; 5230 5231 /* Wait for reset to finish */ 5232 ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val, 5233 val == BIT(0), QM_VF_RESET_WAIT_US, 5234 QM_VF_RESET_WAIT_TIMEOUT_US); 5235 /* hardware completion status should be available by this time */ 5236 if (ret) { 5237 dev_err(dev, "couldn't get reset done status from PF, timeout!\n"); 5238 return -ETIMEDOUT; 5239 } 5240 5241 /* 5242 * Whether message is got successfully, 5243 * VF needs to ack PF by clearing the interrupt. 5244 */ 5245 ret = qm_get_mb_cmd(qm, &msg, 0); 5246 qm_clear_cmd_interrupt(qm, 0); 5247 if (ret) { 5248 dev_err(dev, "failed to get msg from PF in reset done!\n"); 5249 return ret; 5250 } 5251 5252 cmd = msg & QM_MB_CMD_DATA_MASK; 5253 if (cmd != QM_PF_RESET_DONE) { 5254 dev_err(dev, "the cmd(%u) is not reset done!\n", cmd); 5255 ret = -EINVAL; 5256 } 5257 5258 return ret; 5259 } 5260 5261 static void qm_pf_reset_vf_process(struct hisi_qm *qm, 5262 enum qm_stop_reason stop_reason) 5263 { 5264 struct device *dev = &qm->pdev->dev; 5265 int ret; 5266 5267 dev_info(dev, "device reset start...\n"); 5268 5269 /* The message is obtained by querying the register during resetting */ 5270 qm_cmd_uninit(qm); 5271 qm_pf_reset_vf_prepare(qm, stop_reason); 5272 5273 ret = qm_wait_pf_reset_finish(qm); 5274 if (ret) 5275 goto err_get_status; 5276 5277 qm_pf_reset_vf_done(qm); 5278 qm_cmd_init(qm); 5279 5280 dev_info(dev, "device reset done.\n"); 5281 5282 return; 5283 5284 err_get_status: 5285 qm_cmd_init(qm); 5286 qm_reset_bit_clear(qm); 5287 } 5288 5289 static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) 5290 { 5291 struct device *dev = &qm->pdev->dev; 5292 u64 msg; 5293 u32 cmd; 5294 int ret; 5295 5296 /* 5297 * Get the msg from source by sending mailbox. Whether message is got 5298 * successfully, destination needs to ack source by clearing the interrupt. 5299 */ 5300 ret = qm_get_mb_cmd(qm, &msg, fun_num); 5301 qm_clear_cmd_interrupt(qm, BIT(fun_num)); 5302 if (ret) { 5303 dev_err(dev, "failed to get msg from source!\n"); 5304 return; 5305 } 5306 5307 cmd = msg & QM_MB_CMD_DATA_MASK; 5308 switch (cmd) { 5309 case QM_PF_FLR_PREPARE: 5310 qm_pf_reset_vf_process(qm, QM_FLR); 5311 break; 5312 case QM_PF_SRST_PREPARE: 5313 qm_pf_reset_vf_process(qm, QM_SOFT_RESET); 5314 break; 5315 case QM_VF_GET_QOS: 5316 qm_vf_get_qos(qm, fun_num); 5317 break; 5318 case QM_PF_SET_QOS: 5319 qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT; 5320 break; 5321 default: 5322 dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num); 5323 break; 5324 } 5325 } 5326 5327 static void qm_cmd_process(struct work_struct *cmd_process) 5328 { 5329 struct hisi_qm *qm = container_of(cmd_process, 5330 struct hisi_qm, cmd_process); 5331 u32 vfs_num = qm->vfs_num; 5332 u64 val; 5333 u32 i; 5334 5335 if (qm->fun_type == QM_HW_PF) { 5336 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); 5337 if (!val) 5338 return; 5339 5340 for (i = 1; i <= vfs_num; i++) { 5341 if (val & BIT(i)) 5342 qm_handle_cmd_msg(qm, i); 5343 } 5344 5345 return; 5346 } 5347 5348 qm_handle_cmd_msg(qm, 0); 5349 } 5350 5351 /** 5352 * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list. 5353 * @qm: The qm needs add. 5354 * @qm_list: The qm list. 5355 * 5356 * This function adds qm to qm list, and will register algorithm to 5357 * crypto when the qm list is empty. 5358 */ 5359 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list) 5360 { 5361 struct device *dev = &qm->pdev->dev; 5362 int flag = 0; 5363 int ret = 0; 5364 5365 mutex_lock(&qm_list->lock); 5366 if (list_empty(&qm_list->list)) 5367 flag = 1; 5368 list_add_tail(&qm->list, &qm_list->list); 5369 mutex_unlock(&qm_list->lock); 5370 5371 if (qm->ver <= QM_HW_V2 && qm->use_sva) { 5372 dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n"); 5373 return 0; 5374 } 5375 5376 if (flag) { 5377 ret = qm_list->register_to_crypto(qm); 5378 if (ret) { 5379 mutex_lock(&qm_list->lock); 5380 list_del(&qm->list); 5381 mutex_unlock(&qm_list->lock); 5382 } 5383 } 5384 5385 return ret; 5386 } 5387 EXPORT_SYMBOL_GPL(hisi_qm_alg_register); 5388 5389 /** 5390 * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from 5391 * qm list. 5392 * @qm: The qm needs delete. 5393 * @qm_list: The qm list. 5394 * 5395 * This function deletes qm from qm list, and will unregister algorithm 5396 * from crypto when the qm list is empty. 5397 */ 5398 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list) 5399 { 5400 mutex_lock(&qm_list->lock); 5401 list_del(&qm->list); 5402 mutex_unlock(&qm_list->lock); 5403 5404 if (qm->ver <= QM_HW_V2 && qm->use_sva) 5405 return; 5406 5407 if (list_empty(&qm_list->list)) 5408 qm_list->unregister_from_crypto(qm); 5409 } 5410 EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister); 5411 5412 static int qm_get_qp_num(struct hisi_qm *qm) 5413 { 5414 if (qm->ver == QM_HW_V1) 5415 qm->ctrl_qp_num = QM_QNUM_V1; 5416 else if (qm->ver == QM_HW_V2) 5417 qm->ctrl_qp_num = QM_QNUM_V2; 5418 else 5419 qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) & 5420 QM_QP_NUN_MASK; 5421 5422 if (qm->use_db_isolation) 5423 qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >> 5424 QM_QP_MAX_NUM_SHIFT) & QM_QP_NUN_MASK; 5425 else 5426 qm->max_qp_num = qm->ctrl_qp_num; 5427 5428 /* check if qp number is valid */ 5429 if (qm->qp_num > qm->max_qp_num) { 5430 dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n", 5431 qm->qp_num, qm->max_qp_num); 5432 return -EINVAL; 5433 } 5434 5435 return 0; 5436 } 5437 5438 static int qm_get_pci_res(struct hisi_qm *qm) 5439 { 5440 struct pci_dev *pdev = qm->pdev; 5441 struct device *dev = &pdev->dev; 5442 int ret; 5443 5444 ret = pci_request_mem_regions(pdev, qm->dev_name); 5445 if (ret < 0) { 5446 dev_err(dev, "Failed to request mem regions!\n"); 5447 return ret; 5448 } 5449 5450 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); 5451 qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2)); 5452 if (!qm->io_base) { 5453 ret = -EIO; 5454 goto err_request_mem_regions; 5455 } 5456 5457 if (qm->ver > QM_HW_V2) { 5458 if (qm->fun_type == QM_HW_PF) 5459 qm->use_db_isolation = readl(qm->io_base + 5460 QM_QUE_ISO_EN) & BIT(0); 5461 else 5462 qm->use_db_isolation = readl(qm->io_base + 5463 QM_QUE_ISO_CFG_V) & BIT(0); 5464 } 5465 5466 if (qm->use_db_isolation) { 5467 qm->db_interval = QM_QP_DB_INTERVAL; 5468 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); 5469 qm->db_io_base = ioremap(qm->db_phys_base, 5470 pci_resource_len(pdev, PCI_BAR_4)); 5471 if (!qm->db_io_base) { 5472 ret = -EIO; 5473 goto err_ioremap; 5474 } 5475 } else { 5476 qm->db_phys_base = qm->phys_base; 5477 qm->db_io_base = qm->io_base; 5478 qm->db_interval = 0; 5479 } 5480 5481 if (qm->fun_type == QM_HW_PF) { 5482 ret = qm_get_qp_num(qm); 5483 if (ret) 5484 goto err_db_ioremap; 5485 } 5486 5487 return 0; 5488 5489 err_db_ioremap: 5490 if (qm->use_db_isolation) 5491 iounmap(qm->db_io_base); 5492 err_ioremap: 5493 iounmap(qm->io_base); 5494 err_request_mem_regions: 5495 pci_release_mem_regions(pdev); 5496 return ret; 5497 } 5498 5499 static int hisi_qm_pci_init(struct hisi_qm *qm) 5500 { 5501 struct pci_dev *pdev = qm->pdev; 5502 struct device *dev = &pdev->dev; 5503 unsigned int num_vec; 5504 int ret; 5505 5506 ret = pci_enable_device_mem(pdev); 5507 if (ret < 0) { 5508 dev_err(dev, "Failed to enable device mem!\n"); 5509 return ret; 5510 } 5511 5512 ret = qm_get_pci_res(qm); 5513 if (ret) 5514 goto err_disable_pcidev; 5515 5516 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 5517 if (ret < 0) 5518 goto err_get_pci_res; 5519 pci_set_master(pdev); 5520 5521 if (!qm->ops->get_irq_num) { 5522 ret = -EOPNOTSUPP; 5523 goto err_get_pci_res; 5524 } 5525 num_vec = qm->ops->get_irq_num(qm); 5526 ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); 5527 if (ret < 0) { 5528 dev_err(dev, "Failed to enable MSI vectors!\n"); 5529 goto err_get_pci_res; 5530 } 5531 5532 return 0; 5533 5534 err_get_pci_res: 5535 qm_put_pci_res(qm); 5536 err_disable_pcidev: 5537 pci_disable_device(pdev); 5538 return ret; 5539 } 5540 5541 static void hisi_qm_init_work(struct hisi_qm *qm) 5542 { 5543 INIT_WORK(&qm->work, qm_work_process); 5544 if (qm->fun_type == QM_HW_PF) 5545 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); 5546 5547 if (qm->ver > QM_HW_V2) 5548 INIT_WORK(&qm->cmd_process, qm_cmd_process); 5549 } 5550 5551 static int hisi_qp_alloc_memory(struct hisi_qm *qm) 5552 { 5553 struct device *dev = &qm->pdev->dev; 5554 size_t qp_dma_size; 5555 int i, ret; 5556 5557 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL); 5558 if (!qm->qp_array) 5559 return -ENOMEM; 5560 5561 /* one more page for device or qp statuses */ 5562 qp_dma_size = qm->sqe_size * QM_Q_DEPTH + 5563 sizeof(struct qm_cqe) * QM_Q_DEPTH; 5564 qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE; 5565 for (i = 0; i < qm->qp_num; i++) { 5566 ret = hisi_qp_memory_init(qm, qp_dma_size, i); 5567 if (ret) 5568 goto err_init_qp_mem; 5569 5570 dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size); 5571 } 5572 5573 return 0; 5574 err_init_qp_mem: 5575 hisi_qp_memory_uninit(qm, i); 5576 5577 return ret; 5578 } 5579 5580 static int hisi_qm_memory_init(struct hisi_qm *qm) 5581 { 5582 struct device *dev = &qm->pdev->dev; 5583 int ret, total_vfs; 5584 size_t off = 0; 5585 5586 total_vfs = pci_sriov_get_totalvfs(qm->pdev); 5587 qm->factor = kcalloc(total_vfs + 1, sizeof(struct qm_shaper_factor), GFP_KERNEL); 5588 if (!qm->factor) 5589 return -ENOMEM; 5590 5591 #define QM_INIT_BUF(qm, type, num) do { \ 5592 (qm)->type = ((qm)->qdma.va + (off)); \ 5593 (qm)->type##_dma = (qm)->qdma.dma + (off); \ 5594 off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \ 5595 } while (0) 5596 5597 idr_init(&qm->qp_idr); 5598 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) + 5599 QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) + 5600 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + 5601 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); 5602 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, 5603 GFP_ATOMIC); 5604 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); 5605 if (!qm->qdma.va) { 5606 ret = -ENOMEM; 5607 goto err_alloc_qdma; 5608 } 5609 5610 QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH); 5611 QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH); 5612 QM_INIT_BUF(qm, sqc, qm->qp_num); 5613 QM_INIT_BUF(qm, cqc, qm->qp_num); 5614 5615 ret = hisi_qp_alloc_memory(qm); 5616 if (ret) 5617 goto err_alloc_qp_array; 5618 5619 return 0; 5620 5621 err_alloc_qp_array: 5622 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); 5623 err_alloc_qdma: 5624 kfree(qm->factor); 5625 5626 return ret; 5627 } 5628 5629 /** 5630 * hisi_qm_init() - Initialize configures about qm. 5631 * @qm: The qm needing init. 5632 * 5633 * This function init qm, then we can call hisi_qm_start to put qm into work. 5634 */ 5635 int hisi_qm_init(struct hisi_qm *qm) 5636 { 5637 struct pci_dev *pdev = qm->pdev; 5638 struct device *dev = &pdev->dev; 5639 int ret; 5640 5641 hisi_qm_pre_init(qm); 5642 5643 ret = hisi_qm_pci_init(qm); 5644 if (ret) 5645 return ret; 5646 5647 ret = qm_irq_register(qm); 5648 if (ret) 5649 goto err_pci_init; 5650 5651 if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) { 5652 /* v2 starts to support get vft by mailbox */ 5653 ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); 5654 if (ret) 5655 goto err_irq_register; 5656 } 5657 5658 ret = qm_alloc_uacce(qm); 5659 if (ret < 0) 5660 dev_warn(dev, "fail to alloc uacce (%d)\n", ret); 5661 5662 ret = hisi_qm_memory_init(qm); 5663 if (ret) 5664 goto err_alloc_uacce; 5665 5666 hisi_qm_init_work(qm); 5667 qm_cmd_init(qm); 5668 atomic_set(&qm->status.flags, QM_INIT); 5669 5670 return 0; 5671 5672 err_alloc_uacce: 5673 uacce_remove(qm->uacce); 5674 qm->uacce = NULL; 5675 err_irq_register: 5676 qm_irq_unregister(qm); 5677 err_pci_init: 5678 hisi_qm_pci_uninit(qm); 5679 return ret; 5680 } 5681 EXPORT_SYMBOL_GPL(hisi_qm_init); 5682 5683 MODULE_LICENSE("GPL v2"); 5684 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); 5685 MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver"); 5686