1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 #include <asm/page.h> 4 #include <linux/acpi.h> 5 #include <linux/aer.h> 6 #include <linux/bitmap.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/idr.h> 9 #include <linux/io.h> 10 #include <linux/irqreturn.h> 11 #include <linux/log2.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/seq_file.h> 14 #include <linux/slab.h> 15 #include <linux/uacce.h> 16 #include <linux/uaccess.h> 17 #include <uapi/misc/uacce/hisi_qm.h> 18 #include <linux/hisi_acc_qm.h> 19 #include "qm_common.h" 20 21 /* eq/aeq irq enable */ 22 #define QM_VF_AEQ_INT_SOURCE 0x0 23 #define QM_VF_AEQ_INT_MASK 0x4 24 #define QM_VF_EQ_INT_SOURCE 0x8 25 #define QM_VF_EQ_INT_MASK 0xc 26 27 #define QM_IRQ_VECTOR_MASK GENMASK(15, 0) 28 #define QM_IRQ_TYPE_MASK GENMASK(15, 0) 29 #define QM_IRQ_TYPE_SHIFT 16 30 #define QM_ABN_IRQ_TYPE_MASK GENMASK(7, 0) 31 32 /* mailbox */ 33 #define QM_MB_PING_ALL_VFS 0xffff 34 #define QM_MB_CMD_DATA_SHIFT 32 35 #define QM_MB_CMD_DATA_MASK GENMASK(31, 0) 36 #define QM_MB_STATUS_MASK GENMASK(12, 9) 37 38 /* sqc shift */ 39 #define QM_SQ_HOP_NUM_SHIFT 0 40 #define QM_SQ_PAGE_SIZE_SHIFT 4 41 #define QM_SQ_BUF_SIZE_SHIFT 8 42 #define QM_SQ_SQE_SIZE_SHIFT 12 43 #define QM_SQ_PRIORITY_SHIFT 0 44 #define QM_SQ_ORDERS_SHIFT 4 45 #define QM_SQ_TYPE_SHIFT 8 46 #define QM_QC_PASID_ENABLE 0x1 47 #define QM_QC_PASID_ENABLE_SHIFT 7 48 49 #define QM_SQ_TYPE_MASK GENMASK(3, 0) 50 #define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1) 51 52 /* cqc shift */ 53 #define QM_CQ_HOP_NUM_SHIFT 0 54 #define QM_CQ_PAGE_SIZE_SHIFT 4 55 #define QM_CQ_BUF_SIZE_SHIFT 8 56 #define QM_CQ_CQE_SIZE_SHIFT 12 57 #define QM_CQ_PHASE_SHIFT 0 58 #define QM_CQ_FLAG_SHIFT 1 59 60 #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1) 61 #define QM_QC_CQE_SIZE 4 62 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1) 63 64 /* eqc shift */ 65 #define QM_EQE_AEQE_SIZE (2UL << 12) 66 #define QM_EQC_PHASE_SHIFT 16 67 68 #define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1) 69 #define QM_EQE_CQN_MASK GENMASK(15, 0) 70 71 #define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1) 72 #define QM_AEQE_TYPE_SHIFT 17 73 #define QM_AEQE_CQN_MASK GENMASK(15, 0) 74 #define QM_CQ_OVERFLOW 0 75 #define QM_EQ_OVERFLOW 1 76 #define QM_CQE_ERROR 2 77 78 #define QM_XQ_DEPTH_SHIFT 16 79 #define QM_XQ_DEPTH_MASK GENMASK(15, 0) 80 81 #define QM_DOORBELL_CMD_SQ 0 82 #define QM_DOORBELL_CMD_CQ 1 83 #define QM_DOORBELL_CMD_EQ 2 84 #define QM_DOORBELL_CMD_AEQ 3 85 86 #define QM_DOORBELL_BASE_V1 0x340 87 #define QM_DB_CMD_SHIFT_V1 16 88 #define QM_DB_INDEX_SHIFT_V1 32 89 #define QM_DB_PRIORITY_SHIFT_V1 48 90 #define QM_PAGE_SIZE 0x0034 91 #define QM_QP_DB_INTERVAL 0x10000 92 93 #define QM_MEM_START_INIT 0x100040 94 #define QM_MEM_INIT_DONE 0x100044 95 #define QM_VFT_CFG_RDY 0x10006c 96 #define QM_VFT_CFG_OP_WR 0x100058 97 #define QM_VFT_CFG_TYPE 0x10005c 98 #define QM_VFT_CFG 0x100060 99 #define QM_VFT_CFG_OP_ENABLE 0x100054 100 #define QM_PM_CTRL 0x100148 101 #define QM_IDLE_DISABLE BIT(9) 102 103 #define QM_VFT_CFG_DATA_L 0x100064 104 #define QM_VFT_CFG_DATA_H 0x100068 105 #define QM_SQC_VFT_BUF_SIZE (7ULL << 8) 106 #define QM_SQC_VFT_SQC_SIZE (5ULL << 12) 107 #define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16) 108 #define QM_SQC_VFT_START_SQN_SHIFT 28 109 #define QM_SQC_VFT_VALID (1ULL << 44) 110 #define QM_SQC_VFT_SQN_SHIFT 45 111 #define QM_CQC_VFT_BUF_SIZE (7ULL << 8) 112 #define QM_CQC_VFT_SQC_SIZE (5ULL << 12) 113 #define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16) 114 #define QM_CQC_VFT_VALID (1ULL << 28) 115 116 #define QM_SQC_VFT_BASE_SHIFT_V2 28 117 #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0) 118 #define QM_SQC_VFT_NUM_SHIFT_V2 45 119 #define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0) 120 121 #define QM_ABNORMAL_INT_SOURCE 0x100000 122 #define QM_ABNORMAL_INT_MASK 0x100004 123 #define QM_ABNORMAL_INT_MASK_VALUE 0x7fff 124 #define QM_ABNORMAL_INT_STATUS 0x100008 125 #define QM_ABNORMAL_INT_SET 0x10000c 126 #define QM_ABNORMAL_INF00 0x100010 127 #define QM_FIFO_OVERFLOW_TYPE 0xc0 128 #define QM_FIFO_OVERFLOW_TYPE_SHIFT 6 129 #define QM_FIFO_OVERFLOW_VF 0x3f 130 #define QM_ABNORMAL_INF01 0x100014 131 #define QM_DB_TIMEOUT_TYPE 0xc0 132 #define QM_DB_TIMEOUT_TYPE_SHIFT 6 133 #define QM_DB_TIMEOUT_VF 0x3f 134 #define QM_RAS_CE_ENABLE 0x1000ec 135 #define QM_RAS_FE_ENABLE 0x1000f0 136 #define QM_RAS_NFE_ENABLE 0x1000f4 137 #define QM_RAS_CE_THRESHOLD 0x1000f8 138 #define QM_RAS_CE_TIMES_PER_IRQ 1 139 #define QM_OOO_SHUTDOWN_SEL 0x1040f8 140 #define QM_ECC_MBIT BIT(2) 141 #define QM_DB_TIMEOUT BIT(10) 142 #define QM_OF_FIFO_OF BIT(11) 143 144 #define QM_RESET_WAIT_TIMEOUT 400 145 #define QM_PEH_VENDOR_ID 0x1000d8 146 #define ACC_VENDOR_ID_VALUE 0x5a5a 147 #define QM_PEH_DFX_INFO0 0x1000fc 148 #define QM_PEH_DFX_INFO1 0x100100 149 #define QM_PEH_DFX_MASK (BIT(0) | BIT(2)) 150 #define QM_PEH_MSI_FINISH_MASK GENMASK(19, 16) 151 #define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3 152 #define ACC_PEH_MSI_DISABLE GENMASK(31, 0) 153 #define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1 154 #define ACC_MASTER_TRANS_RETURN_RW 3 155 #define ACC_MASTER_TRANS_RETURN 0x300150 156 #define ACC_MASTER_GLOBAL_CTRL 0x300000 157 #define ACC_AM_CFG_PORT_WR_EN 0x30001c 158 #define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT 159 #define ACC_AM_ROB_ECC_INT_STS 0x300104 160 #define ACC_ROB_ECC_ERR_MULTPL BIT(1) 161 #define QM_MSI_CAP_ENABLE BIT(16) 162 163 /* interfunction communication */ 164 #define QM_IFC_READY_STATUS 0x100128 165 #define QM_IFC_INT_SET_P 0x100130 166 #define QM_IFC_INT_CFG 0x100134 167 #define QM_IFC_INT_SOURCE_P 0x100138 168 #define QM_IFC_INT_SOURCE_V 0x0020 169 #define QM_IFC_INT_MASK 0x0024 170 #define QM_IFC_INT_STATUS 0x0028 171 #define QM_IFC_INT_SET_V 0x002C 172 #define QM_IFC_SEND_ALL_VFS GENMASK(6, 0) 173 #define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0) 174 #define QM_IFC_INT_SOURCE_MASK BIT(0) 175 #define QM_IFC_INT_DISABLE BIT(0) 176 #define QM_IFC_INT_STATUS_MASK BIT(0) 177 #define QM_IFC_INT_SET_MASK BIT(0) 178 #define QM_WAIT_DST_ACK 10 179 #define QM_MAX_PF_WAIT_COUNT 10 180 #define QM_MAX_VF_WAIT_COUNT 40 181 #define QM_VF_RESET_WAIT_US 20000 182 #define QM_VF_RESET_WAIT_CNT 3000 183 #define QM_VF_RESET_WAIT_TIMEOUT_US \ 184 (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT) 185 186 #define POLL_PERIOD 10 187 #define POLL_TIMEOUT 1000 188 #define WAIT_PERIOD_US_MAX 200 189 #define WAIT_PERIOD_US_MIN 100 190 #define MAX_WAIT_COUNTS 1000 191 #define QM_CACHE_WB_START 0x204 192 #define QM_CACHE_WB_DONE 0x208 193 #define QM_FUNC_CAPS_REG 0x3100 194 #define QM_CAPBILITY_VERSION GENMASK(7, 0) 195 196 #define PCI_BAR_2 2 197 #define PCI_BAR_4 4 198 #define QMC_ALIGN(sz) ALIGN(sz, 32) 199 200 #define QM_DBG_READ_LEN 256 201 #define QM_PCI_COMMAND_INVALID ~0 202 #define QM_RESET_STOP_TX_OFFSET 1 203 #define QM_RESET_STOP_RX_OFFSET 2 204 205 #define WAIT_PERIOD 20 206 #define REMOVE_WAIT_DELAY 10 207 208 #define QM_DRIVER_REMOVING 0 209 #define QM_RST_SCHED 1 210 #define QM_QOS_PARAM_NUM 2 211 #define QM_QOS_MAX_VAL 1000 212 #define QM_QOS_RATE 100 213 #define QM_QOS_EXPAND_RATE 1000 214 #define QM_SHAPER_CIR_B_MASK GENMASK(7, 0) 215 #define QM_SHAPER_CIR_U_MASK GENMASK(10, 8) 216 #define QM_SHAPER_CIR_S_MASK GENMASK(14, 11) 217 #define QM_SHAPER_FACTOR_CIR_U_SHIFT 8 218 #define QM_SHAPER_FACTOR_CIR_S_SHIFT 11 219 #define QM_SHAPER_FACTOR_CBS_B_SHIFT 15 220 #define QM_SHAPER_FACTOR_CBS_S_SHIFT 19 221 #define QM_SHAPER_CBS_B 1 222 #define QM_SHAPER_VFT_OFFSET 6 223 #define QM_QOS_MIN_ERROR_RATE 5 224 #define QM_SHAPER_MIN_CBS_S 8 225 #define QM_QOS_TICK 0x300U 226 #define QM_QOS_DIVISOR_CLK 0x1f40U 227 #define QM_QOS_MAX_CIR_B 200 228 #define QM_QOS_MIN_CIR_B 100 229 #define QM_QOS_MAX_CIR_U 6 230 #define QM_AUTOSUSPEND_DELAY 3000 231 232 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ 233 (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ 234 ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \ 235 ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \ 236 ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) 237 238 #define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \ 239 ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) 240 241 #define QM_MK_SQC_W13(priority, orders, alg_type) \ 242 (((priority) << QM_SQ_PRIORITY_SHIFT) | \ 243 ((orders) << QM_SQ_ORDERS_SHIFT) | \ 244 (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT)) 245 246 #define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \ 247 (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \ 248 ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \ 249 ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \ 250 ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) 251 252 #define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \ 253 ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) 254 255 #define INIT_QC_COMMON(qc, base, pasid) do { \ 256 (qc)->head = 0; \ 257 (qc)->tail = 0; \ 258 (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \ 259 (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \ 260 (qc)->dw3 = 0; \ 261 (qc)->w8 = 0; \ 262 (qc)->rsvd0 = 0; \ 263 (qc)->pasid = cpu_to_le16(pasid); \ 264 (qc)->w11 = 0; \ 265 (qc)->rsvd1 = 0; \ 266 } while (0) 267 268 enum vft_type { 269 SQC_VFT = 0, 270 CQC_VFT, 271 SHAPER_VFT, 272 }; 273 274 enum acc_err_result { 275 ACC_ERR_NONE, 276 ACC_ERR_NEED_RESET, 277 ACC_ERR_RECOVERED, 278 }; 279 280 enum qm_alg_type { 281 ALG_TYPE_0, 282 ALG_TYPE_1, 283 }; 284 285 enum qm_mb_cmd { 286 QM_PF_FLR_PREPARE = 0x01, 287 QM_PF_SRST_PREPARE, 288 QM_PF_RESET_DONE, 289 QM_VF_PREPARE_DONE, 290 QM_VF_PREPARE_FAIL, 291 QM_VF_START_DONE, 292 QM_VF_START_FAIL, 293 QM_PF_SET_QOS, 294 QM_VF_GET_QOS, 295 }; 296 297 enum qm_basic_type { 298 QM_TOTAL_QP_NUM_CAP = 0x0, 299 QM_FUNC_MAX_QP_CAP, 300 QM_XEQ_DEPTH_CAP, 301 QM_QP_DEPTH_CAP, 302 QM_EQ_IRQ_TYPE_CAP, 303 QM_AEQ_IRQ_TYPE_CAP, 304 QM_ABN_IRQ_TYPE_CAP, 305 QM_PF2VF_IRQ_TYPE_CAP, 306 QM_PF_IRQ_NUM_CAP, 307 QM_VF_IRQ_NUM_CAP, 308 }; 309 310 static const struct hisi_qm_cap_info qm_cap_info_comm[] = { 311 {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0}, 312 {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1}, 313 {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1}, 314 {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1}, 315 {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1}, 316 }; 317 318 static const struct hisi_qm_cap_info qm_cap_info_pf[] = { 319 {QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1}, 320 }; 321 322 static const struct hisi_qm_cap_info qm_cap_info_vf[] = { 323 {QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0}, 324 }; 325 326 static const struct hisi_qm_cap_info qm_basic_info[] = { 327 {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400}, 328 {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400}, 329 {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(31, 0), 0x800, 0x4000800, 0x4000800}, 330 {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400}, 331 {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000}, 332 {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001}, 333 {QM_ABN_IRQ_TYPE_CAP, 0x3114, 0, GENMASK(31, 0), 0x0, 0x10003, 0x10003}, 334 {QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002}, 335 {QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4}, 336 {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3}, 337 }; 338 339 struct qm_mailbox { 340 __le16 w0; 341 __le16 queue_num; 342 __le32 base_l; 343 __le32 base_h; 344 __le32 rsvd; 345 }; 346 347 struct qm_doorbell { 348 __le16 queue_num; 349 __le16 cmd; 350 __le16 index; 351 __le16 priority; 352 }; 353 354 struct hisi_qm_resource { 355 struct hisi_qm *qm; 356 int distance; 357 struct list_head list; 358 }; 359 360 /** 361 * struct qm_hw_err - Structure describing the device errors 362 * @list: hardware error list 363 * @timestamp: timestamp when the error occurred 364 */ 365 struct qm_hw_err { 366 struct list_head list; 367 unsigned long long timestamp; 368 }; 369 370 struct hisi_qm_hw_ops { 371 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number); 372 void (*qm_db)(struct hisi_qm *qm, u16 qn, 373 u8 cmd, u16 index, u8 priority); 374 int (*debug_init)(struct hisi_qm *qm); 375 void (*hw_error_init)(struct hisi_qm *qm); 376 void (*hw_error_uninit)(struct hisi_qm *qm); 377 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); 378 int (*set_msi)(struct hisi_qm *qm, bool set); 379 }; 380 381 struct hisi_qm_hw_error { 382 u32 int_msk; 383 const char *msg; 384 }; 385 386 static const struct hisi_qm_hw_error qm_hw_error[] = { 387 { .int_msk = BIT(0), .msg = "qm_axi_rresp" }, 388 { .int_msk = BIT(1), .msg = "qm_axi_bresp" }, 389 { .int_msk = BIT(2), .msg = "qm_ecc_mbit" }, 390 { .int_msk = BIT(3), .msg = "qm_ecc_1bit" }, 391 { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" }, 392 { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" }, 393 { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" }, 394 { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" }, 395 { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" }, 396 { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" }, 397 { .int_msk = BIT(10), .msg = "qm_db_timeout" }, 398 { .int_msk = BIT(11), .msg = "qm_of_fifo_of" }, 399 { .int_msk = BIT(12), .msg = "qm_db_random_invalid" }, 400 { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" }, 401 { .int_msk = BIT(14), .msg = "qm_flr_timeout" }, 402 { /* sentinel */ } 403 }; 404 405 static const char * const qm_db_timeout[] = { 406 "sq", "cq", "eq", "aeq", 407 }; 408 409 static const char * const qm_fifo_overflow[] = { 410 "cq", "eq", "aeq", 411 }; 412 413 static const char * const qp_s[] = { 414 "none", "init", "start", "stop", "close", 415 }; 416 417 struct qm_typical_qos_table { 418 u32 start; 419 u32 end; 420 u32 val; 421 }; 422 423 /* the qos step is 100 */ 424 static struct qm_typical_qos_table shaper_cir_s[] = { 425 {100, 100, 4}, 426 {200, 200, 3}, 427 {300, 500, 2}, 428 {600, 1000, 1}, 429 {1100, 100000, 0}, 430 }; 431 432 static struct qm_typical_qos_table shaper_cbs_s[] = { 433 {100, 200, 9}, 434 {300, 500, 11}, 435 {600, 1000, 12}, 436 {1100, 10000, 16}, 437 {10100, 25000, 17}, 438 {25100, 50000, 18}, 439 {50100, 100000, 19} 440 }; 441 442 static void qm_irqs_unregister(struct hisi_qm *qm); 443 444 static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new) 445 { 446 enum qm_state curr = atomic_read(&qm->status.flags); 447 bool avail = false; 448 449 switch (curr) { 450 case QM_INIT: 451 if (new == QM_START || new == QM_CLOSE) 452 avail = true; 453 break; 454 case QM_START: 455 if (new == QM_STOP) 456 avail = true; 457 break; 458 case QM_STOP: 459 if (new == QM_CLOSE || new == QM_START) 460 avail = true; 461 break; 462 default: 463 break; 464 } 465 466 dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n", 467 qm_s[curr], qm_s[new]); 468 469 if (!avail) 470 dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n", 471 qm_s[curr], qm_s[new]); 472 473 return avail; 474 } 475 476 static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp, 477 enum qp_state new) 478 { 479 enum qm_state qm_curr = atomic_read(&qm->status.flags); 480 enum qp_state qp_curr = 0; 481 bool avail = false; 482 483 if (qp) 484 qp_curr = atomic_read(&qp->qp_status.flags); 485 486 switch (new) { 487 case QP_INIT: 488 if (qm_curr == QM_START || qm_curr == QM_INIT) 489 avail = true; 490 break; 491 case QP_START: 492 if ((qm_curr == QM_START && qp_curr == QP_INIT) || 493 (qm_curr == QM_START && qp_curr == QP_STOP)) 494 avail = true; 495 break; 496 case QP_STOP: 497 if ((qm_curr == QM_START && qp_curr == QP_START) || 498 (qp_curr == QP_INIT)) 499 avail = true; 500 break; 501 case QP_CLOSE: 502 if ((qm_curr == QM_START && qp_curr == QP_INIT) || 503 (qm_curr == QM_START && qp_curr == QP_STOP) || 504 (qm_curr == QM_STOP && qp_curr == QP_STOP) || 505 (qm_curr == QM_STOP && qp_curr == QP_INIT)) 506 avail = true; 507 break; 508 default: 509 break; 510 } 511 512 dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n", 513 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]); 514 515 if (!avail) 516 dev_warn(&qm->pdev->dev, 517 "Can not change qp state from %s to %s in QM %s\n", 518 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]); 519 520 return avail; 521 } 522 523 static u32 qm_get_hw_error_status(struct hisi_qm *qm) 524 { 525 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS); 526 } 527 528 static u32 qm_get_dev_err_status(struct hisi_qm *qm) 529 { 530 return qm->err_ini->get_dev_hw_err_status(qm); 531 } 532 533 /* Check if the error causes the master ooo block */ 534 static bool qm_check_dev_error(struct hisi_qm *qm) 535 { 536 u32 val, dev_val; 537 538 if (qm->fun_type == QM_HW_VF) 539 return false; 540 541 val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask; 542 dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask; 543 544 return val || dev_val; 545 } 546 547 static int qm_wait_reset_finish(struct hisi_qm *qm) 548 { 549 int delay = 0; 550 551 /* All reset requests need to be queued for processing */ 552 while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 553 msleep(++delay); 554 if (delay > QM_RESET_WAIT_TIMEOUT) 555 return -EBUSY; 556 } 557 558 return 0; 559 } 560 561 static int qm_reset_prepare_ready(struct hisi_qm *qm) 562 { 563 struct pci_dev *pdev = qm->pdev; 564 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 565 566 /* 567 * PF and VF on host doesnot support resetting at the 568 * same time on Kunpeng920. 569 */ 570 if (qm->ver < QM_HW_V3) 571 return qm_wait_reset_finish(pf_qm); 572 573 return qm_wait_reset_finish(qm); 574 } 575 576 static void qm_reset_bit_clear(struct hisi_qm *qm) 577 { 578 struct pci_dev *pdev = qm->pdev; 579 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 580 581 if (qm->ver < QM_HW_V3) 582 clear_bit(QM_RESETTING, &pf_qm->misc_ctl); 583 584 clear_bit(QM_RESETTING, &qm->misc_ctl); 585 } 586 587 static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd, 588 u64 base, u16 queue, bool op) 589 { 590 mailbox->w0 = cpu_to_le16((cmd) | 591 ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) | 592 (0x1 << QM_MB_BUSY_SHIFT)); 593 mailbox->queue_num = cpu_to_le16(queue); 594 mailbox->base_l = cpu_to_le32(lower_32_bits(base)); 595 mailbox->base_h = cpu_to_le32(upper_32_bits(base)); 596 mailbox->rsvd = 0; 597 } 598 599 /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ 600 int hisi_qm_wait_mb_ready(struct hisi_qm *qm) 601 { 602 u32 val; 603 604 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, 605 val, !((val >> QM_MB_BUSY_SHIFT) & 606 0x1), POLL_PERIOD, POLL_TIMEOUT); 607 } 608 EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready); 609 610 /* 128 bit should be written to hardware at one time to trigger a mailbox */ 611 static void qm_mb_write(struct hisi_qm *qm, const void *src) 612 { 613 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; 614 unsigned long tmp0 = 0, tmp1 = 0; 615 616 if (!IS_ENABLED(CONFIG_ARM64)) { 617 memcpy_toio(fun_base, src, 16); 618 dma_wmb(); 619 return; 620 } 621 622 asm volatile("ldp %0, %1, %3\n" 623 "stp %0, %1, %2\n" 624 "dmb oshst\n" 625 : "=&r" (tmp0), 626 "=&r" (tmp1), 627 "+Q" (*((char __iomem *)fun_base)) 628 : "Q" (*((char *)src)) 629 : "memory"); 630 } 631 632 static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) 633 { 634 int ret; 635 u32 val; 636 637 if (unlikely(hisi_qm_wait_mb_ready(qm))) { 638 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); 639 ret = -EBUSY; 640 goto mb_busy; 641 } 642 643 qm_mb_write(qm, mailbox); 644 645 if (unlikely(hisi_qm_wait_mb_ready(qm))) { 646 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); 647 ret = -ETIMEDOUT; 648 goto mb_busy; 649 } 650 651 val = readl(qm->io_base + QM_MB_CMD_SEND_BASE); 652 if (val & QM_MB_STATUS_MASK) { 653 dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); 654 ret = -EIO; 655 goto mb_busy; 656 } 657 658 return 0; 659 660 mb_busy: 661 atomic64_inc(&qm->debug.dfx.mb_err_cnt); 662 return ret; 663 } 664 665 int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, 666 bool op) 667 { 668 struct qm_mailbox mailbox; 669 int ret; 670 671 dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n", 672 queue, cmd, (unsigned long long)dma_addr); 673 674 qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op); 675 676 mutex_lock(&qm->mailbox_lock); 677 ret = qm_mb_nolock(qm, &mailbox); 678 mutex_unlock(&qm->mailbox_lock); 679 680 return ret; 681 } 682 EXPORT_SYMBOL_GPL(hisi_qm_mb); 683 684 static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 685 { 686 u64 doorbell; 687 688 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) | 689 ((u64)index << QM_DB_INDEX_SHIFT_V1) | 690 ((u64)priority << QM_DB_PRIORITY_SHIFT_V1); 691 692 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1); 693 } 694 695 static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 696 { 697 void __iomem *io_base = qm->io_base; 698 u16 randata = 0; 699 u64 doorbell; 700 701 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ) 702 io_base = qm->db_io_base + (u64)qn * qm->db_interval + 703 QM_DOORBELL_SQ_CQ_BASE_V2; 704 else 705 io_base += QM_DOORBELL_EQ_AEQ_BASE_V2; 706 707 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) | 708 ((u64)randata << QM_DB_RAND_SHIFT_V2) | 709 ((u64)index << QM_DB_INDEX_SHIFT_V2) | 710 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2); 711 712 writeq(doorbell, io_base); 713 } 714 715 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 716 { 717 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n", 718 qn, cmd, index); 719 720 qm->ops->qm_db(qm, qn, cmd, index, priority); 721 } 722 723 static void qm_disable_clock_gate(struct hisi_qm *qm) 724 { 725 u32 val; 726 727 /* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */ 728 if (qm->ver < QM_HW_V3) 729 return; 730 731 val = readl(qm->io_base + QM_PM_CTRL); 732 val |= QM_IDLE_DISABLE; 733 writel(val, qm->io_base + QM_PM_CTRL); 734 } 735 736 static int qm_dev_mem_reset(struct hisi_qm *qm) 737 { 738 u32 val; 739 740 writel(0x1, qm->io_base + QM_MEM_START_INIT); 741 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, 742 val & BIT(0), POLL_PERIOD, 743 POLL_TIMEOUT); 744 } 745 746 /** 747 * hisi_qm_get_hw_info() - Get device information. 748 * @qm: The qm which want to get information. 749 * @info_table: Array for storing device information. 750 * @index: Index in info_table. 751 * @is_read: Whether read from reg, 0: not support read from reg. 752 * 753 * This function returns device information the caller needs. 754 */ 755 u32 hisi_qm_get_hw_info(struct hisi_qm *qm, 756 const struct hisi_qm_cap_info *info_table, 757 u32 index, bool is_read) 758 { 759 u32 val; 760 761 switch (qm->ver) { 762 case QM_HW_V1: 763 return info_table[index].v1_val; 764 case QM_HW_V2: 765 return info_table[index].v2_val; 766 default: 767 if (!is_read) 768 return info_table[index].v3_val; 769 770 val = readl(qm->io_base + info_table[index].offset); 771 return (val >> info_table[index].shift) & info_table[index].mask; 772 } 773 } 774 EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info); 775 776 static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits, 777 u16 *high_bits, enum qm_basic_type type) 778 { 779 u32 depth; 780 781 depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver); 782 *low_bits = depth & QM_XQ_DEPTH_MASK; 783 *high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK; 784 } 785 786 static u32 qm_get_irq_num(struct hisi_qm *qm) 787 { 788 if (qm->fun_type == QM_HW_PF) 789 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver); 790 791 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver); 792 } 793 794 static int qm_pm_get_sync(struct hisi_qm *qm) 795 { 796 struct device *dev = &qm->pdev->dev; 797 int ret; 798 799 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 800 return 0; 801 802 ret = pm_runtime_resume_and_get(dev); 803 if (ret < 0) { 804 dev_err(dev, "failed to get_sync(%d).\n", ret); 805 return ret; 806 } 807 808 return 0; 809 } 810 811 static void qm_pm_put_sync(struct hisi_qm *qm) 812 { 813 struct device *dev = &qm->pdev->dev; 814 815 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 816 return; 817 818 pm_runtime_mark_last_busy(dev); 819 pm_runtime_put_autosuspend(dev); 820 } 821 822 static void qm_cq_head_update(struct hisi_qp *qp) 823 { 824 if (qp->qp_status.cq_head == qp->cq_depth - 1) { 825 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase; 826 qp->qp_status.cq_head = 0; 827 } else { 828 qp->qp_status.cq_head++; 829 } 830 } 831 832 static void qm_poll_req_cb(struct hisi_qp *qp) 833 { 834 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; 835 struct hisi_qm *qm = qp->qm; 836 837 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { 838 dma_rmb(); 839 qp->req_cb(qp, qp->sqe + qm->sqe_size * 840 le16_to_cpu(cqe->sq_head)); 841 qm_cq_head_update(qp); 842 cqe = qp->cqe + qp->qp_status.cq_head; 843 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, 844 qp->qp_status.cq_head, 0); 845 atomic_dec(&qp->qp_status.used); 846 } 847 848 /* set c_flag */ 849 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1); 850 } 851 852 static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data) 853 { 854 struct hisi_qm *qm = poll_data->qm; 855 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; 856 u16 eq_depth = qm->eq_depth; 857 int eqe_num = 0; 858 u16 cqn; 859 860 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { 861 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; 862 poll_data->qp_finish_id[eqe_num] = cqn; 863 eqe_num++; 864 865 if (qm->status.eq_head == eq_depth - 1) { 866 qm->status.eqc_phase = !qm->status.eqc_phase; 867 eqe = qm->eqe; 868 qm->status.eq_head = 0; 869 } else { 870 eqe++; 871 qm->status.eq_head++; 872 } 873 874 if (eqe_num == (eq_depth >> 1) - 1) 875 break; 876 } 877 878 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 879 880 return eqe_num; 881 } 882 883 static void qm_work_process(struct work_struct *work) 884 { 885 struct hisi_qm_poll_data *poll_data = 886 container_of(work, struct hisi_qm_poll_data, work); 887 struct hisi_qm *qm = poll_data->qm; 888 struct hisi_qp *qp; 889 int eqe_num, i; 890 891 /* Get qp id of completed tasks and re-enable the interrupt. */ 892 eqe_num = qm_get_complete_eqe_num(poll_data); 893 for (i = eqe_num - 1; i >= 0; i--) { 894 qp = &qm->qp_array[poll_data->qp_finish_id[i]]; 895 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) 896 continue; 897 898 if (qp->event_cb) { 899 qp->event_cb(qp); 900 continue; 901 } 902 903 if (likely(qp->req_cb)) 904 qm_poll_req_cb(qp); 905 } 906 } 907 908 static bool do_qm_eq_irq(struct hisi_qm *qm) 909 { 910 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; 911 struct hisi_qm_poll_data *poll_data; 912 u16 cqn; 913 914 if (!readl(qm->io_base + QM_VF_EQ_INT_SOURCE)) 915 return false; 916 917 if (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { 918 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; 919 poll_data = &qm->poll_data[cqn]; 920 queue_work(qm->wq, &poll_data->work); 921 922 return true; 923 } 924 925 return false; 926 } 927 928 static irqreturn_t qm_eq_irq(int irq, void *data) 929 { 930 struct hisi_qm *qm = data; 931 bool ret; 932 933 ret = do_qm_eq_irq(qm); 934 if (ret) 935 return IRQ_HANDLED; 936 937 atomic64_inc(&qm->debug.dfx.err_irq_cnt); 938 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 939 940 return IRQ_NONE; 941 } 942 943 static irqreturn_t qm_mb_cmd_irq(int irq, void *data) 944 { 945 struct hisi_qm *qm = data; 946 u32 val; 947 948 val = readl(qm->io_base + QM_IFC_INT_STATUS); 949 val &= QM_IFC_INT_STATUS_MASK; 950 if (!val) 951 return IRQ_NONE; 952 953 schedule_work(&qm->cmd_process); 954 955 return IRQ_HANDLED; 956 } 957 958 static void qm_set_qp_disable(struct hisi_qp *qp, int offset) 959 { 960 u32 *addr; 961 962 if (qp->is_in_kernel) 963 return; 964 965 addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset; 966 *addr = 1; 967 968 /* make sure setup is completed */ 969 smp_wmb(); 970 } 971 972 static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id) 973 { 974 struct hisi_qp *qp = &qm->qp_array[qp_id]; 975 976 qm_set_qp_disable(qp, QM_RESET_STOP_TX_OFFSET); 977 hisi_qm_stop_qp(qp); 978 qm_set_qp_disable(qp, QM_RESET_STOP_RX_OFFSET); 979 } 980 981 static void qm_reset_function(struct hisi_qm *qm) 982 { 983 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 984 struct device *dev = &qm->pdev->dev; 985 int ret; 986 987 if (qm_check_dev_error(pf_qm)) 988 return; 989 990 ret = qm_reset_prepare_ready(qm); 991 if (ret) { 992 dev_err(dev, "reset function not ready\n"); 993 return; 994 } 995 996 ret = hisi_qm_stop(qm, QM_FLR); 997 if (ret) { 998 dev_err(dev, "failed to stop qm when reset function\n"); 999 goto clear_bit; 1000 } 1001 1002 ret = hisi_qm_start(qm); 1003 if (ret) 1004 dev_err(dev, "failed to start qm when reset function\n"); 1005 1006 clear_bit: 1007 qm_reset_bit_clear(qm); 1008 } 1009 1010 static irqreturn_t qm_aeq_thread(int irq, void *data) 1011 { 1012 struct hisi_qm *qm = data; 1013 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; 1014 u16 aeq_depth = qm->aeq_depth; 1015 u32 type, qp_id; 1016 1017 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { 1018 type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT; 1019 qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK; 1020 1021 switch (type) { 1022 case QM_EQ_OVERFLOW: 1023 dev_err(&qm->pdev->dev, "eq overflow, reset function\n"); 1024 qm_reset_function(qm); 1025 return IRQ_HANDLED; 1026 case QM_CQ_OVERFLOW: 1027 dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n", 1028 qp_id); 1029 fallthrough; 1030 case QM_CQE_ERROR: 1031 qm_disable_qp(qm, qp_id); 1032 break; 1033 default: 1034 dev_err(&qm->pdev->dev, "unknown error type %u\n", 1035 type); 1036 break; 1037 } 1038 1039 if (qm->status.aeq_head == aeq_depth - 1) { 1040 qm->status.aeqc_phase = !qm->status.aeqc_phase; 1041 aeqe = qm->aeqe; 1042 qm->status.aeq_head = 0; 1043 } else { 1044 aeqe++; 1045 qm->status.aeq_head++; 1046 } 1047 } 1048 1049 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); 1050 1051 return IRQ_HANDLED; 1052 } 1053 1054 static irqreturn_t qm_aeq_irq(int irq, void *data) 1055 { 1056 struct hisi_qm *qm = data; 1057 1058 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); 1059 if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE)) 1060 return IRQ_NONE; 1061 1062 return IRQ_WAKE_THREAD; 1063 } 1064 1065 static void qm_init_qp_status(struct hisi_qp *qp) 1066 { 1067 struct hisi_qp_status *qp_status = &qp->qp_status; 1068 1069 qp_status->sq_tail = 0; 1070 qp_status->cq_head = 0; 1071 qp_status->cqc_phase = true; 1072 atomic_set(&qp_status->used, 0); 1073 } 1074 1075 static void qm_init_prefetch(struct hisi_qm *qm) 1076 { 1077 struct device *dev = &qm->pdev->dev; 1078 u32 page_type = 0x0; 1079 1080 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) 1081 return; 1082 1083 switch (PAGE_SIZE) { 1084 case SZ_4K: 1085 page_type = 0x0; 1086 break; 1087 case SZ_16K: 1088 page_type = 0x1; 1089 break; 1090 case SZ_64K: 1091 page_type = 0x2; 1092 break; 1093 default: 1094 dev_err(dev, "system page size is not support: %lu, default set to 4KB", 1095 PAGE_SIZE); 1096 } 1097 1098 writel(page_type, qm->io_base + QM_PAGE_SIZE); 1099 } 1100 1101 /* 1102 * acc_shaper_para_calc() Get the IR value by the qos formula, the return value 1103 * is the expected qos calculated. 1104 * the formula: 1105 * IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps 1106 * 1107 * IR_b * (2 ^ IR_u) * 8000 1108 * IR(Mbps) = ------------------------- 1109 * Tick * (2 ^ IR_s) 1110 */ 1111 static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s) 1112 { 1113 return ((cir_b * QM_QOS_DIVISOR_CLK) * (1 << cir_u)) / 1114 (QM_QOS_TICK * (1 << cir_s)); 1115 } 1116 1117 static u32 acc_shaper_calc_cbs_s(u32 ir) 1118 { 1119 int table_size = ARRAY_SIZE(shaper_cbs_s); 1120 int i; 1121 1122 for (i = 0; i < table_size; i++) { 1123 if (ir >= shaper_cbs_s[i].start && ir <= shaper_cbs_s[i].end) 1124 return shaper_cbs_s[i].val; 1125 } 1126 1127 return QM_SHAPER_MIN_CBS_S; 1128 } 1129 1130 static u32 acc_shaper_calc_cir_s(u32 ir) 1131 { 1132 int table_size = ARRAY_SIZE(shaper_cir_s); 1133 int i; 1134 1135 for (i = 0; i < table_size; i++) { 1136 if (ir >= shaper_cir_s[i].start && ir <= shaper_cir_s[i].end) 1137 return shaper_cir_s[i].val; 1138 } 1139 1140 return 0; 1141 } 1142 1143 static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor) 1144 { 1145 u32 cir_b, cir_u, cir_s, ir_calc; 1146 u32 error_rate; 1147 1148 factor->cbs_s = acc_shaper_calc_cbs_s(ir); 1149 cir_s = acc_shaper_calc_cir_s(ir); 1150 1151 for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) { 1152 for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) { 1153 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s); 1154 1155 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; 1156 if (error_rate <= QM_QOS_MIN_ERROR_RATE) { 1157 factor->cir_b = cir_b; 1158 factor->cir_u = cir_u; 1159 factor->cir_s = cir_s; 1160 return 0; 1161 } 1162 } 1163 } 1164 1165 return -EINVAL; 1166 } 1167 1168 static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, 1169 u32 number, struct qm_shaper_factor *factor) 1170 { 1171 u64 tmp = 0; 1172 1173 if (number > 0) { 1174 switch (type) { 1175 case SQC_VFT: 1176 if (qm->ver == QM_HW_V1) { 1177 tmp = QM_SQC_VFT_BUF_SIZE | 1178 QM_SQC_VFT_SQC_SIZE | 1179 QM_SQC_VFT_INDEX_NUMBER | 1180 QM_SQC_VFT_VALID | 1181 (u64)base << QM_SQC_VFT_START_SQN_SHIFT; 1182 } else { 1183 tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT | 1184 QM_SQC_VFT_VALID | 1185 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT; 1186 } 1187 break; 1188 case CQC_VFT: 1189 if (qm->ver == QM_HW_V1) { 1190 tmp = QM_CQC_VFT_BUF_SIZE | 1191 QM_CQC_VFT_SQC_SIZE | 1192 QM_CQC_VFT_INDEX_NUMBER | 1193 QM_CQC_VFT_VALID; 1194 } else { 1195 tmp = QM_CQC_VFT_VALID; 1196 } 1197 break; 1198 case SHAPER_VFT: 1199 if (factor) { 1200 tmp = factor->cir_b | 1201 (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) | 1202 (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) | 1203 (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) | 1204 (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT); 1205 } 1206 break; 1207 } 1208 } 1209 1210 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); 1211 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); 1212 } 1213 1214 static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, 1215 u32 fun_num, u32 base, u32 number) 1216 { 1217 struct qm_shaper_factor *factor = NULL; 1218 unsigned int val; 1219 int ret; 1220 1221 if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 1222 factor = &qm->factor[fun_num]; 1223 1224 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 1225 val & BIT(0), POLL_PERIOD, 1226 POLL_TIMEOUT); 1227 if (ret) 1228 return ret; 1229 1230 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); 1231 writel(type, qm->io_base + QM_VFT_CFG_TYPE); 1232 if (type == SHAPER_VFT) 1233 fun_num |= base << QM_SHAPER_VFT_OFFSET; 1234 1235 writel(fun_num, qm->io_base + QM_VFT_CFG); 1236 1237 qm_vft_data_cfg(qm, type, base, number, factor); 1238 1239 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); 1240 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); 1241 1242 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 1243 val & BIT(0), POLL_PERIOD, 1244 POLL_TIMEOUT); 1245 } 1246 1247 static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num) 1248 { 1249 u32 qos = qm->factor[fun_num].func_qos; 1250 int ret, i; 1251 1252 ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]); 1253 if (ret) { 1254 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n"); 1255 return ret; 1256 } 1257 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG); 1258 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) { 1259 /* The base number of queue reuse for different alg type */ 1260 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1); 1261 if (ret) 1262 return ret; 1263 } 1264 1265 return 0; 1266 } 1267 1268 /* The config should be conducted after qm_dev_mem_reset() */ 1269 static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, 1270 u32 number) 1271 { 1272 int ret, i; 1273 1274 for (i = SQC_VFT; i <= CQC_VFT; i++) { 1275 ret = qm_set_vft_common(qm, i, fun_num, base, number); 1276 if (ret) 1277 return ret; 1278 } 1279 1280 /* init default shaper qos val */ 1281 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { 1282 ret = qm_shaper_init_vft(qm, fun_num); 1283 if (ret) 1284 goto back_sqc_cqc; 1285 } 1286 1287 return 0; 1288 back_sqc_cqc: 1289 for (i = SQC_VFT; i <= CQC_VFT; i++) 1290 qm_set_vft_common(qm, i, fun_num, 0, 0); 1291 1292 return ret; 1293 } 1294 1295 static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) 1296 { 1297 u64 sqc_vft; 1298 int ret; 1299 1300 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); 1301 if (ret) 1302 return ret; 1303 1304 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | 1305 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); 1306 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); 1307 *number = (QM_SQC_VFT_NUM_MASK_V2 & 1308 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; 1309 1310 return 0; 1311 } 1312 1313 void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, 1314 dma_addr_t *dma_addr) 1315 { 1316 struct device *dev = &qm->pdev->dev; 1317 void *ctx_addr; 1318 1319 ctx_addr = kzalloc(ctx_size, GFP_KERNEL); 1320 if (!ctx_addr) 1321 return ERR_PTR(-ENOMEM); 1322 1323 *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE); 1324 if (dma_mapping_error(dev, *dma_addr)) { 1325 dev_err(dev, "DMA mapping error!\n"); 1326 kfree(ctx_addr); 1327 return ERR_PTR(-ENOMEM); 1328 } 1329 1330 return ctx_addr; 1331 } 1332 1333 void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, 1334 const void *ctx_addr, dma_addr_t *dma_addr) 1335 { 1336 struct device *dev = &qm->pdev->dev; 1337 1338 dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE); 1339 kfree(ctx_addr); 1340 } 1341 1342 static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) 1343 { 1344 return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); 1345 } 1346 1347 static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) 1348 { 1349 return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); 1350 } 1351 1352 static void qm_hw_error_init_v1(struct hisi_qm *qm) 1353 { 1354 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); 1355 } 1356 1357 static void qm_hw_error_cfg(struct hisi_qm *qm) 1358 { 1359 struct hisi_qm_err_info *err_info = &qm->err_info; 1360 1361 qm->error_mask = err_info->nfe | err_info->ce | err_info->fe; 1362 /* clear QM hw residual error source */ 1363 writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE); 1364 1365 /* configure error type */ 1366 writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE); 1367 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); 1368 writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE); 1369 writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE); 1370 } 1371 1372 static void qm_hw_error_init_v2(struct hisi_qm *qm) 1373 { 1374 u32 irq_unmask; 1375 1376 qm_hw_error_cfg(qm); 1377 1378 irq_unmask = ~qm->error_mask; 1379 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1380 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); 1381 } 1382 1383 static void qm_hw_error_uninit_v2(struct hisi_qm *qm) 1384 { 1385 u32 irq_mask = qm->error_mask; 1386 1387 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1388 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); 1389 } 1390 1391 static void qm_hw_error_init_v3(struct hisi_qm *qm) 1392 { 1393 u32 irq_unmask; 1394 1395 qm_hw_error_cfg(qm); 1396 1397 /* enable close master ooo when hardware error happened */ 1398 writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); 1399 1400 irq_unmask = ~qm->error_mask; 1401 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1402 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); 1403 } 1404 1405 static void qm_hw_error_uninit_v3(struct hisi_qm *qm) 1406 { 1407 u32 irq_mask = qm->error_mask; 1408 1409 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1410 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); 1411 1412 /* disable close master ooo when hardware error happened */ 1413 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL); 1414 } 1415 1416 static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) 1417 { 1418 const struct hisi_qm_hw_error *err; 1419 struct device *dev = &qm->pdev->dev; 1420 u32 reg_val, type, vf_num; 1421 int i; 1422 1423 for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) { 1424 err = &qm_hw_error[i]; 1425 if (!(err->int_msk & error_status)) 1426 continue; 1427 1428 dev_err(dev, "%s [error status=0x%x] found\n", 1429 err->msg, err->int_msk); 1430 1431 if (err->int_msk & QM_DB_TIMEOUT) { 1432 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01); 1433 type = (reg_val & QM_DB_TIMEOUT_TYPE) >> 1434 QM_DB_TIMEOUT_TYPE_SHIFT; 1435 vf_num = reg_val & QM_DB_TIMEOUT_VF; 1436 dev_err(dev, "qm %s doorbell timeout in function %u\n", 1437 qm_db_timeout[type], vf_num); 1438 } else if (err->int_msk & QM_OF_FIFO_OF) { 1439 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00); 1440 type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >> 1441 QM_FIFO_OVERFLOW_TYPE_SHIFT; 1442 vf_num = reg_val & QM_FIFO_OVERFLOW_VF; 1443 1444 if (type < ARRAY_SIZE(qm_fifo_overflow)) 1445 dev_err(dev, "qm %s fifo overflow in function %u\n", 1446 qm_fifo_overflow[type], vf_num); 1447 else 1448 dev_err(dev, "unknown error type\n"); 1449 } 1450 } 1451 } 1452 1453 static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) 1454 { 1455 u32 error_status, tmp; 1456 1457 /* read err sts */ 1458 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); 1459 error_status = qm->error_mask & tmp; 1460 1461 if (error_status) { 1462 if (error_status & QM_ECC_MBIT) 1463 qm->err_status.is_qm_ecc_mbit = true; 1464 1465 qm_log_hw_error(qm, error_status); 1466 if (error_status & qm->err_info.qm_reset_mask) 1467 return ACC_ERR_NEED_RESET; 1468 1469 writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); 1470 writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE); 1471 } 1472 1473 return ACC_ERR_RECOVERED; 1474 } 1475 1476 static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num) 1477 { 1478 struct qm_mailbox mailbox; 1479 int ret; 1480 1481 qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0); 1482 mutex_lock(&qm->mailbox_lock); 1483 ret = qm_mb_nolock(qm, &mailbox); 1484 if (ret) 1485 goto err_unlock; 1486 1487 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | 1488 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); 1489 1490 err_unlock: 1491 mutex_unlock(&qm->mailbox_lock); 1492 return ret; 1493 } 1494 1495 static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask) 1496 { 1497 u32 val; 1498 1499 if (qm->fun_type == QM_HW_PF) 1500 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P); 1501 1502 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V); 1503 val |= QM_IFC_INT_SOURCE_MASK; 1504 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V); 1505 } 1506 1507 static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id) 1508 { 1509 struct device *dev = &qm->pdev->dev; 1510 u32 cmd; 1511 u64 msg; 1512 int ret; 1513 1514 ret = qm_get_mb_cmd(qm, &msg, vf_id); 1515 if (ret) { 1516 dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id); 1517 return; 1518 } 1519 1520 cmd = msg & QM_MB_CMD_DATA_MASK; 1521 switch (cmd) { 1522 case QM_VF_PREPARE_FAIL: 1523 dev_err(dev, "failed to stop VF(%u)!\n", vf_id); 1524 break; 1525 case QM_VF_START_FAIL: 1526 dev_err(dev, "failed to start VF(%u)!\n", vf_id); 1527 break; 1528 case QM_VF_PREPARE_DONE: 1529 case QM_VF_START_DONE: 1530 break; 1531 default: 1532 dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id); 1533 break; 1534 } 1535 } 1536 1537 static int qm_wait_vf_prepare_finish(struct hisi_qm *qm) 1538 { 1539 struct device *dev = &qm->pdev->dev; 1540 u32 vfs_num = qm->vfs_num; 1541 int cnt = 0; 1542 int ret = 0; 1543 u64 val; 1544 u32 i; 1545 1546 if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 1547 return 0; 1548 1549 while (true) { 1550 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); 1551 /* All VFs send command to PF, break */ 1552 if ((val & GENMASK(vfs_num, 1)) == GENMASK(vfs_num, 1)) 1553 break; 1554 1555 if (++cnt > QM_MAX_PF_WAIT_COUNT) { 1556 ret = -EBUSY; 1557 break; 1558 } 1559 1560 msleep(QM_WAIT_DST_ACK); 1561 } 1562 1563 /* PF check VFs msg */ 1564 for (i = 1; i <= vfs_num; i++) { 1565 if (val & BIT(i)) 1566 qm_handle_vf_msg(qm, i); 1567 else 1568 dev_err(dev, "VF(%u) not ping PF!\n", i); 1569 } 1570 1571 /* PF clear interrupt to ack VFs */ 1572 qm_clear_cmd_interrupt(qm, val); 1573 1574 return ret; 1575 } 1576 1577 static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num) 1578 { 1579 u32 val; 1580 1581 val = readl(qm->io_base + QM_IFC_INT_CFG); 1582 val &= ~QM_IFC_SEND_ALL_VFS; 1583 val |= fun_num; 1584 writel(val, qm->io_base + QM_IFC_INT_CFG); 1585 1586 val = readl(qm->io_base + QM_IFC_INT_SET_P); 1587 val |= QM_IFC_INT_SET_MASK; 1588 writel(val, qm->io_base + QM_IFC_INT_SET_P); 1589 } 1590 1591 static void qm_trigger_pf_interrupt(struct hisi_qm *qm) 1592 { 1593 u32 val; 1594 1595 val = readl(qm->io_base + QM_IFC_INT_SET_V); 1596 val |= QM_IFC_INT_SET_MASK; 1597 writel(val, qm->io_base + QM_IFC_INT_SET_V); 1598 } 1599 1600 static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num) 1601 { 1602 struct device *dev = &qm->pdev->dev; 1603 struct qm_mailbox mailbox; 1604 int cnt = 0; 1605 u64 val; 1606 int ret; 1607 1608 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0); 1609 mutex_lock(&qm->mailbox_lock); 1610 ret = qm_mb_nolock(qm, &mailbox); 1611 if (ret) { 1612 dev_err(dev, "failed to send command to vf(%u)!\n", fun_num); 1613 goto err_unlock; 1614 } 1615 1616 qm_trigger_vf_interrupt(qm, fun_num); 1617 while (true) { 1618 msleep(QM_WAIT_DST_ACK); 1619 val = readq(qm->io_base + QM_IFC_READY_STATUS); 1620 /* if VF respond, PF notifies VF successfully. */ 1621 if (!(val & BIT(fun_num))) 1622 goto err_unlock; 1623 1624 if (++cnt > QM_MAX_PF_WAIT_COUNT) { 1625 dev_err(dev, "failed to get response from VF(%u)!\n", fun_num); 1626 ret = -ETIMEDOUT; 1627 break; 1628 } 1629 } 1630 1631 err_unlock: 1632 mutex_unlock(&qm->mailbox_lock); 1633 return ret; 1634 } 1635 1636 static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd) 1637 { 1638 struct device *dev = &qm->pdev->dev; 1639 u32 vfs_num = qm->vfs_num; 1640 struct qm_mailbox mailbox; 1641 u64 val = 0; 1642 int cnt = 0; 1643 int ret; 1644 u32 i; 1645 1646 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0); 1647 mutex_lock(&qm->mailbox_lock); 1648 /* PF sends command to all VFs by mailbox */ 1649 ret = qm_mb_nolock(qm, &mailbox); 1650 if (ret) { 1651 dev_err(dev, "failed to send command to VFs!\n"); 1652 mutex_unlock(&qm->mailbox_lock); 1653 return ret; 1654 } 1655 1656 qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS); 1657 while (true) { 1658 msleep(QM_WAIT_DST_ACK); 1659 val = readq(qm->io_base + QM_IFC_READY_STATUS); 1660 /* If all VFs acked, PF notifies VFs successfully. */ 1661 if (!(val & GENMASK(vfs_num, 1))) { 1662 mutex_unlock(&qm->mailbox_lock); 1663 return 0; 1664 } 1665 1666 if (++cnt > QM_MAX_PF_WAIT_COUNT) 1667 break; 1668 } 1669 1670 mutex_unlock(&qm->mailbox_lock); 1671 1672 /* Check which vf respond timeout. */ 1673 for (i = 1; i <= vfs_num; i++) { 1674 if (val & BIT(i)) 1675 dev_err(dev, "failed to get response from VF(%u)!\n", i); 1676 } 1677 1678 return -ETIMEDOUT; 1679 } 1680 1681 static int qm_ping_pf(struct hisi_qm *qm, u64 cmd) 1682 { 1683 struct qm_mailbox mailbox; 1684 int cnt = 0; 1685 u32 val; 1686 int ret; 1687 1688 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0); 1689 mutex_lock(&qm->mailbox_lock); 1690 ret = qm_mb_nolock(qm, &mailbox); 1691 if (ret) { 1692 dev_err(&qm->pdev->dev, "failed to send command to PF!\n"); 1693 goto unlock; 1694 } 1695 1696 qm_trigger_pf_interrupt(qm); 1697 /* Waiting for PF response */ 1698 while (true) { 1699 msleep(QM_WAIT_DST_ACK); 1700 val = readl(qm->io_base + QM_IFC_INT_SET_V); 1701 if (!(val & QM_IFC_INT_STATUS_MASK)) 1702 break; 1703 1704 if (++cnt > QM_MAX_VF_WAIT_COUNT) { 1705 ret = -ETIMEDOUT; 1706 break; 1707 } 1708 } 1709 1710 unlock: 1711 mutex_unlock(&qm->mailbox_lock); 1712 return ret; 1713 } 1714 1715 static int qm_stop_qp(struct hisi_qp *qp) 1716 { 1717 return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); 1718 } 1719 1720 static int qm_set_msi(struct hisi_qm *qm, bool set) 1721 { 1722 struct pci_dev *pdev = qm->pdev; 1723 1724 if (set) { 1725 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, 1726 0); 1727 } else { 1728 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, 1729 ACC_PEH_MSI_DISABLE); 1730 if (qm->err_status.is_qm_ecc_mbit || 1731 qm->err_status.is_dev_ecc_mbit) 1732 return 0; 1733 1734 mdelay(1); 1735 if (readl(qm->io_base + QM_PEH_DFX_INFO0)) 1736 return -EFAULT; 1737 } 1738 1739 return 0; 1740 } 1741 1742 static void qm_wait_msi_finish(struct hisi_qm *qm) 1743 { 1744 struct pci_dev *pdev = qm->pdev; 1745 u32 cmd = ~0; 1746 int cnt = 0; 1747 u32 val; 1748 int ret; 1749 1750 while (true) { 1751 pci_read_config_dword(pdev, pdev->msi_cap + 1752 PCI_MSI_PENDING_64, &cmd); 1753 if (!cmd) 1754 break; 1755 1756 if (++cnt > MAX_WAIT_COUNTS) { 1757 pci_warn(pdev, "failed to empty MSI PENDING!\n"); 1758 break; 1759 } 1760 1761 udelay(1); 1762 } 1763 1764 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0, 1765 val, !(val & QM_PEH_DFX_MASK), 1766 POLL_PERIOD, POLL_TIMEOUT); 1767 if (ret) 1768 pci_warn(pdev, "failed to empty PEH MSI!\n"); 1769 1770 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1, 1771 val, !(val & QM_PEH_MSI_FINISH_MASK), 1772 POLL_PERIOD, POLL_TIMEOUT); 1773 if (ret) 1774 pci_warn(pdev, "failed to finish MSI operation!\n"); 1775 } 1776 1777 static int qm_set_msi_v3(struct hisi_qm *qm, bool set) 1778 { 1779 struct pci_dev *pdev = qm->pdev; 1780 int ret = -ETIMEDOUT; 1781 u32 cmd, i; 1782 1783 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); 1784 if (set) 1785 cmd |= QM_MSI_CAP_ENABLE; 1786 else 1787 cmd &= ~QM_MSI_CAP_ENABLE; 1788 1789 pci_write_config_dword(pdev, pdev->msi_cap, cmd); 1790 if (set) { 1791 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 1792 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); 1793 if (cmd & QM_MSI_CAP_ENABLE) 1794 return 0; 1795 1796 udelay(1); 1797 } 1798 } else { 1799 udelay(WAIT_PERIOD_US_MIN); 1800 qm_wait_msi_finish(qm); 1801 ret = 0; 1802 } 1803 1804 return ret; 1805 } 1806 1807 static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { 1808 .qm_db = qm_db_v1, 1809 .hw_error_init = qm_hw_error_init_v1, 1810 .set_msi = qm_set_msi, 1811 }; 1812 1813 static const struct hisi_qm_hw_ops qm_hw_ops_v2 = { 1814 .get_vft = qm_get_vft_v2, 1815 .qm_db = qm_db_v2, 1816 .hw_error_init = qm_hw_error_init_v2, 1817 .hw_error_uninit = qm_hw_error_uninit_v2, 1818 .hw_error_handle = qm_hw_error_handle_v2, 1819 .set_msi = qm_set_msi, 1820 }; 1821 1822 static const struct hisi_qm_hw_ops qm_hw_ops_v3 = { 1823 .get_vft = qm_get_vft_v2, 1824 .qm_db = qm_db_v2, 1825 .hw_error_init = qm_hw_error_init_v3, 1826 .hw_error_uninit = qm_hw_error_uninit_v3, 1827 .hw_error_handle = qm_hw_error_handle_v2, 1828 .set_msi = qm_set_msi_v3, 1829 }; 1830 1831 static void *qm_get_avail_sqe(struct hisi_qp *qp) 1832 { 1833 struct hisi_qp_status *qp_status = &qp->qp_status; 1834 u16 sq_tail = qp_status->sq_tail; 1835 1836 if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1)) 1837 return NULL; 1838 1839 return qp->sqe + sq_tail * qp->qm->sqe_size; 1840 } 1841 1842 static void hisi_qm_unset_hw_reset(struct hisi_qp *qp) 1843 { 1844 u64 *addr; 1845 1846 /* Use last 64 bits of DUS to reset status. */ 1847 addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET; 1848 *addr = 0; 1849 } 1850 1851 static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) 1852 { 1853 struct device *dev = &qm->pdev->dev; 1854 struct hisi_qp *qp; 1855 int qp_id; 1856 1857 if (!qm_qp_avail_state(qm, NULL, QP_INIT)) 1858 return ERR_PTR(-EPERM); 1859 1860 if (qm->qp_in_used == qm->qp_num) { 1861 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", 1862 qm->qp_num); 1863 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); 1864 return ERR_PTR(-EBUSY); 1865 } 1866 1867 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC); 1868 if (qp_id < 0) { 1869 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", 1870 qm->qp_num); 1871 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); 1872 return ERR_PTR(-EBUSY); 1873 } 1874 1875 qp = &qm->qp_array[qp_id]; 1876 hisi_qm_unset_hw_reset(qp); 1877 memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth); 1878 1879 qp->event_cb = NULL; 1880 qp->req_cb = NULL; 1881 qp->qp_id = qp_id; 1882 qp->alg_type = alg_type; 1883 qp->is_in_kernel = true; 1884 qm->qp_in_used++; 1885 atomic_set(&qp->qp_status.flags, QP_INIT); 1886 1887 return qp; 1888 } 1889 1890 /** 1891 * hisi_qm_create_qp() - Create a queue pair from qm. 1892 * @qm: The qm we create a qp from. 1893 * @alg_type: Accelerator specific algorithm type in sqc. 1894 * 1895 * Return created qp, negative error code if failed. 1896 */ 1897 static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) 1898 { 1899 struct hisi_qp *qp; 1900 int ret; 1901 1902 ret = qm_pm_get_sync(qm); 1903 if (ret) 1904 return ERR_PTR(ret); 1905 1906 down_write(&qm->qps_lock); 1907 qp = qm_create_qp_nolock(qm, alg_type); 1908 up_write(&qm->qps_lock); 1909 1910 if (IS_ERR(qp)) 1911 qm_pm_put_sync(qm); 1912 1913 return qp; 1914 } 1915 1916 /** 1917 * hisi_qm_release_qp() - Release a qp back to its qm. 1918 * @qp: The qp we want to release. 1919 * 1920 * This function releases the resource of a qp. 1921 */ 1922 static void hisi_qm_release_qp(struct hisi_qp *qp) 1923 { 1924 struct hisi_qm *qm = qp->qm; 1925 1926 down_write(&qm->qps_lock); 1927 1928 if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) { 1929 up_write(&qm->qps_lock); 1930 return; 1931 } 1932 1933 qm->qp_in_used--; 1934 idr_remove(&qm->qp_idr, qp->qp_id); 1935 1936 up_write(&qm->qps_lock); 1937 1938 qm_pm_put_sync(qm); 1939 } 1940 1941 static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 1942 { 1943 struct hisi_qm *qm = qp->qm; 1944 struct device *dev = &qm->pdev->dev; 1945 enum qm_hw_ver ver = qm->ver; 1946 struct qm_sqc *sqc; 1947 dma_addr_t sqc_dma; 1948 int ret; 1949 1950 sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL); 1951 if (!sqc) 1952 return -ENOMEM; 1953 1954 INIT_QC_COMMON(sqc, qp->sqe_dma, pasid); 1955 if (ver == QM_HW_V1) { 1956 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); 1957 sqc->w8 = cpu_to_le16(qp->sq_depth - 1); 1958 } else { 1959 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); 1960 sqc->w8 = 0; /* rand_qc */ 1961 } 1962 sqc->cq_num = cpu_to_le16(qp_id); 1963 sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); 1964 1965 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) 1966 sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE << 1967 QM_QC_PASID_ENABLE_SHIFT); 1968 1969 sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc), 1970 DMA_TO_DEVICE); 1971 if (dma_mapping_error(dev, sqc_dma)) { 1972 kfree(sqc); 1973 return -ENOMEM; 1974 } 1975 1976 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0); 1977 dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE); 1978 kfree(sqc); 1979 1980 return ret; 1981 } 1982 1983 static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 1984 { 1985 struct hisi_qm *qm = qp->qm; 1986 struct device *dev = &qm->pdev->dev; 1987 enum qm_hw_ver ver = qm->ver; 1988 struct qm_cqc *cqc; 1989 dma_addr_t cqc_dma; 1990 int ret; 1991 1992 cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL); 1993 if (!cqc) 1994 return -ENOMEM; 1995 1996 INIT_QC_COMMON(cqc, qp->cqe_dma, pasid); 1997 if (ver == QM_HW_V1) { 1998 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, 1999 QM_QC_CQE_SIZE)); 2000 cqc->w8 = cpu_to_le16(qp->cq_depth - 1); 2001 } else { 2002 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); 2003 cqc->w8 = 0; /* rand_qc */ 2004 } 2005 cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); 2006 2007 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) 2008 cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE); 2009 2010 cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc), 2011 DMA_TO_DEVICE); 2012 if (dma_mapping_error(dev, cqc_dma)) { 2013 kfree(cqc); 2014 return -ENOMEM; 2015 } 2016 2017 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0); 2018 dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE); 2019 kfree(cqc); 2020 2021 return ret; 2022 } 2023 2024 static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 2025 { 2026 int ret; 2027 2028 qm_init_qp_status(qp); 2029 2030 ret = qm_sq_ctx_cfg(qp, qp_id, pasid); 2031 if (ret) 2032 return ret; 2033 2034 return qm_cq_ctx_cfg(qp, qp_id, pasid); 2035 } 2036 2037 static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg) 2038 { 2039 struct hisi_qm *qm = qp->qm; 2040 struct device *dev = &qm->pdev->dev; 2041 int qp_id = qp->qp_id; 2042 u32 pasid = arg; 2043 int ret; 2044 2045 if (!qm_qp_avail_state(qm, qp, QP_START)) 2046 return -EPERM; 2047 2048 ret = qm_qp_ctx_cfg(qp, qp_id, pasid); 2049 if (ret) 2050 return ret; 2051 2052 atomic_set(&qp->qp_status.flags, QP_START); 2053 dev_dbg(dev, "queue %d started\n", qp_id); 2054 2055 return 0; 2056 } 2057 2058 /** 2059 * hisi_qm_start_qp() - Start a qp into running. 2060 * @qp: The qp we want to start to run. 2061 * @arg: Accelerator specific argument. 2062 * 2063 * After this function, qp can receive request from user. Return 0 if 2064 * successful, negative error code if failed. 2065 */ 2066 int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) 2067 { 2068 struct hisi_qm *qm = qp->qm; 2069 int ret; 2070 2071 down_write(&qm->qps_lock); 2072 ret = qm_start_qp_nolock(qp, arg); 2073 up_write(&qm->qps_lock); 2074 2075 return ret; 2076 } 2077 EXPORT_SYMBOL_GPL(hisi_qm_start_qp); 2078 2079 /** 2080 * qp_stop_fail_cb() - call request cb. 2081 * @qp: stopped failed qp. 2082 * 2083 * Callback function should be called whether task completed or not. 2084 */ 2085 static void qp_stop_fail_cb(struct hisi_qp *qp) 2086 { 2087 int qp_used = atomic_read(&qp->qp_status.used); 2088 u16 cur_tail = qp->qp_status.sq_tail; 2089 u16 sq_depth = qp->sq_depth; 2090 u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth; 2091 struct hisi_qm *qm = qp->qm; 2092 u16 pos; 2093 int i; 2094 2095 for (i = 0; i < qp_used; i++) { 2096 pos = (i + cur_head) % sq_depth; 2097 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos)); 2098 atomic_dec(&qp->qp_status.used); 2099 } 2100 } 2101 2102 /** 2103 * qm_drain_qp() - Drain a qp. 2104 * @qp: The qp we want to drain. 2105 * 2106 * Determine whether the queue is cleared by judging the tail pointers of 2107 * sq and cq. 2108 */ 2109 static int qm_drain_qp(struct hisi_qp *qp) 2110 { 2111 size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc); 2112 struct hisi_qm *qm = qp->qm; 2113 struct device *dev = &qm->pdev->dev; 2114 struct qm_sqc *sqc; 2115 struct qm_cqc *cqc; 2116 dma_addr_t dma_addr; 2117 int ret = 0, i = 0; 2118 void *addr; 2119 2120 /* No need to judge if master OOO is blocked. */ 2121 if (qm_check_dev_error(qm)) 2122 return 0; 2123 2124 /* Kunpeng930 supports drain qp by device */ 2125 if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) { 2126 ret = qm_stop_qp(qp); 2127 if (ret) 2128 dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id); 2129 return ret; 2130 } 2131 2132 addr = hisi_qm_ctx_alloc(qm, size, &dma_addr); 2133 if (IS_ERR(addr)) { 2134 dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n"); 2135 return -ENOMEM; 2136 } 2137 2138 while (++i) { 2139 ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id); 2140 if (ret) { 2141 dev_err_ratelimited(dev, "Failed to dump sqc!\n"); 2142 break; 2143 } 2144 sqc = addr; 2145 2146 ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)), 2147 qp->qp_id); 2148 if (ret) { 2149 dev_err_ratelimited(dev, "Failed to dump cqc!\n"); 2150 break; 2151 } 2152 cqc = addr + sizeof(struct qm_sqc); 2153 2154 if ((sqc->tail == cqc->tail) && 2155 (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc))) 2156 break; 2157 2158 if (i == MAX_WAIT_COUNTS) { 2159 dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id); 2160 ret = -EBUSY; 2161 break; 2162 } 2163 2164 usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); 2165 } 2166 2167 hisi_qm_ctx_free(qm, size, addr, &dma_addr); 2168 2169 return ret; 2170 } 2171 2172 static int qm_stop_qp_nolock(struct hisi_qp *qp) 2173 { 2174 struct device *dev = &qp->qm->pdev->dev; 2175 int ret; 2176 2177 /* 2178 * It is allowed to stop and release qp when reset, If the qp is 2179 * stopped when reset but still want to be released then, the 2180 * is_resetting flag should be set negative so that this qp will not 2181 * be restarted after reset. 2182 */ 2183 if (atomic_read(&qp->qp_status.flags) == QP_STOP) { 2184 qp->is_resetting = false; 2185 return 0; 2186 } 2187 2188 if (!qm_qp_avail_state(qp->qm, qp, QP_STOP)) 2189 return -EPERM; 2190 2191 atomic_set(&qp->qp_status.flags, QP_STOP); 2192 2193 ret = qm_drain_qp(qp); 2194 if (ret) 2195 dev_err(dev, "Failed to drain out data for stopping!\n"); 2196 2197 2198 flush_workqueue(qp->qm->wq); 2199 if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used))) 2200 qp_stop_fail_cb(qp); 2201 2202 dev_dbg(dev, "stop queue %u!", qp->qp_id); 2203 2204 return 0; 2205 } 2206 2207 /** 2208 * hisi_qm_stop_qp() - Stop a qp in qm. 2209 * @qp: The qp we want to stop. 2210 * 2211 * This function is reverse of hisi_qm_start_qp. Return 0 if successful. 2212 */ 2213 int hisi_qm_stop_qp(struct hisi_qp *qp) 2214 { 2215 int ret; 2216 2217 down_write(&qp->qm->qps_lock); 2218 ret = qm_stop_qp_nolock(qp); 2219 up_write(&qp->qm->qps_lock); 2220 2221 return ret; 2222 } 2223 EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); 2224 2225 /** 2226 * hisi_qp_send() - Queue up a task in the hardware queue. 2227 * @qp: The qp in which to put the message. 2228 * @msg: The message. 2229 * 2230 * This function will return -EBUSY if qp is currently full, and -EAGAIN 2231 * if qp related qm is resetting. 2232 * 2233 * Note: This function may run with qm_irq_thread and ACC reset at same time. 2234 * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC 2235 * reset may happen, we have no lock here considering performance. This 2236 * causes current qm_db sending fail or can not receive sended sqe. QM 2237 * sync/async receive function should handle the error sqe. ACC reset 2238 * done function should clear used sqe to 0. 2239 */ 2240 int hisi_qp_send(struct hisi_qp *qp, const void *msg) 2241 { 2242 struct hisi_qp_status *qp_status = &qp->qp_status; 2243 u16 sq_tail = qp_status->sq_tail; 2244 u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth; 2245 void *sqe = qm_get_avail_sqe(qp); 2246 2247 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || 2248 atomic_read(&qp->qm->status.flags) == QM_STOP || 2249 qp->is_resetting)) { 2250 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); 2251 return -EAGAIN; 2252 } 2253 2254 if (!sqe) 2255 return -EBUSY; 2256 2257 memcpy(sqe, msg, qp->qm->sqe_size); 2258 2259 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); 2260 atomic_inc(&qp->qp_status.used); 2261 qp_status->sq_tail = sq_tail_next; 2262 2263 return 0; 2264 } 2265 EXPORT_SYMBOL_GPL(hisi_qp_send); 2266 2267 static void hisi_qm_cache_wb(struct hisi_qm *qm) 2268 { 2269 unsigned int val; 2270 2271 if (qm->ver == QM_HW_V1) 2272 return; 2273 2274 writel(0x1, qm->io_base + QM_CACHE_WB_START); 2275 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, 2276 val, val & BIT(0), POLL_PERIOD, 2277 POLL_TIMEOUT)) 2278 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); 2279 } 2280 2281 static void qm_qp_event_notifier(struct hisi_qp *qp) 2282 { 2283 wake_up_interruptible(&qp->uacce_q->wait); 2284 } 2285 2286 /* This function returns free number of qp in qm. */ 2287 static int hisi_qm_get_available_instances(struct uacce_device *uacce) 2288 { 2289 struct hisi_qm *qm = uacce->priv; 2290 int ret; 2291 2292 down_read(&qm->qps_lock); 2293 ret = qm->qp_num - qm->qp_in_used; 2294 up_read(&qm->qps_lock); 2295 2296 return ret; 2297 } 2298 2299 static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset) 2300 { 2301 int i; 2302 2303 for (i = 0; i < qm->qp_num; i++) 2304 qm_set_qp_disable(&qm->qp_array[i], offset); 2305 } 2306 2307 static int hisi_qm_uacce_get_queue(struct uacce_device *uacce, 2308 unsigned long arg, 2309 struct uacce_queue *q) 2310 { 2311 struct hisi_qm *qm = uacce->priv; 2312 struct hisi_qp *qp; 2313 u8 alg_type = 0; 2314 2315 qp = hisi_qm_create_qp(qm, alg_type); 2316 if (IS_ERR(qp)) 2317 return PTR_ERR(qp); 2318 2319 q->priv = qp; 2320 q->uacce = uacce; 2321 qp->uacce_q = q; 2322 qp->event_cb = qm_qp_event_notifier; 2323 qp->pasid = arg; 2324 qp->is_in_kernel = false; 2325 2326 return 0; 2327 } 2328 2329 static void hisi_qm_uacce_put_queue(struct uacce_queue *q) 2330 { 2331 struct hisi_qp *qp = q->priv; 2332 2333 hisi_qm_release_qp(qp); 2334 } 2335 2336 /* map sq/cq/doorbell to user space */ 2337 static int hisi_qm_uacce_mmap(struct uacce_queue *q, 2338 struct vm_area_struct *vma, 2339 struct uacce_qfile_region *qfr) 2340 { 2341 struct hisi_qp *qp = q->priv; 2342 struct hisi_qm *qm = qp->qm; 2343 resource_size_t phys_base = qm->db_phys_base + 2344 qp->qp_id * qm->db_interval; 2345 size_t sz = vma->vm_end - vma->vm_start; 2346 struct pci_dev *pdev = qm->pdev; 2347 struct device *dev = &pdev->dev; 2348 unsigned long vm_pgoff; 2349 int ret; 2350 2351 switch (qfr->type) { 2352 case UACCE_QFRT_MMIO: 2353 if (qm->ver == QM_HW_V1) { 2354 if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR) 2355 return -EINVAL; 2356 } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { 2357 if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR + 2358 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE)) 2359 return -EINVAL; 2360 } else { 2361 if (sz > qm->db_interval) 2362 return -EINVAL; 2363 } 2364 2365 vm_flags_set(vma, VM_IO); 2366 2367 return remap_pfn_range(vma, vma->vm_start, 2368 phys_base >> PAGE_SHIFT, 2369 sz, pgprot_noncached(vma->vm_page_prot)); 2370 case UACCE_QFRT_DUS: 2371 if (sz != qp->qdma.size) 2372 return -EINVAL; 2373 2374 /* 2375 * dma_mmap_coherent() requires vm_pgoff as 0 2376 * restore vm_pfoff to initial value for mmap() 2377 */ 2378 vm_pgoff = vma->vm_pgoff; 2379 vma->vm_pgoff = 0; 2380 ret = dma_mmap_coherent(dev, vma, qp->qdma.va, 2381 qp->qdma.dma, sz); 2382 vma->vm_pgoff = vm_pgoff; 2383 return ret; 2384 2385 default: 2386 return -EINVAL; 2387 } 2388 } 2389 2390 static int hisi_qm_uacce_start_queue(struct uacce_queue *q) 2391 { 2392 struct hisi_qp *qp = q->priv; 2393 2394 return hisi_qm_start_qp(qp, qp->pasid); 2395 } 2396 2397 static void hisi_qm_uacce_stop_queue(struct uacce_queue *q) 2398 { 2399 hisi_qm_stop_qp(q->priv); 2400 } 2401 2402 static int hisi_qm_is_q_updated(struct uacce_queue *q) 2403 { 2404 struct hisi_qp *qp = q->priv; 2405 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; 2406 int updated = 0; 2407 2408 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { 2409 /* make sure to read data from memory */ 2410 dma_rmb(); 2411 qm_cq_head_update(qp); 2412 cqe = qp->cqe + qp->qp_status.cq_head; 2413 updated = 1; 2414 } 2415 2416 return updated; 2417 } 2418 2419 static void qm_set_sqctype(struct uacce_queue *q, u16 type) 2420 { 2421 struct hisi_qm *qm = q->uacce->priv; 2422 struct hisi_qp *qp = q->priv; 2423 2424 down_write(&qm->qps_lock); 2425 qp->alg_type = type; 2426 up_write(&qm->qps_lock); 2427 } 2428 2429 static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd, 2430 unsigned long arg) 2431 { 2432 struct hisi_qp *qp = q->priv; 2433 struct hisi_qp_info qp_info; 2434 struct hisi_qp_ctx qp_ctx; 2435 2436 if (cmd == UACCE_CMD_QM_SET_QP_CTX) { 2437 if (copy_from_user(&qp_ctx, (void __user *)arg, 2438 sizeof(struct hisi_qp_ctx))) 2439 return -EFAULT; 2440 2441 if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1) 2442 return -EINVAL; 2443 2444 qm_set_sqctype(q, qp_ctx.qc_type); 2445 qp_ctx.id = qp->qp_id; 2446 2447 if (copy_to_user((void __user *)arg, &qp_ctx, 2448 sizeof(struct hisi_qp_ctx))) 2449 return -EFAULT; 2450 2451 return 0; 2452 } else if (cmd == UACCE_CMD_QM_SET_QP_INFO) { 2453 if (copy_from_user(&qp_info, (void __user *)arg, 2454 sizeof(struct hisi_qp_info))) 2455 return -EFAULT; 2456 2457 qp_info.sqe_size = qp->qm->sqe_size; 2458 qp_info.sq_depth = qp->sq_depth; 2459 qp_info.cq_depth = qp->cq_depth; 2460 2461 if (copy_to_user((void __user *)arg, &qp_info, 2462 sizeof(struct hisi_qp_info))) 2463 return -EFAULT; 2464 2465 return 0; 2466 } 2467 2468 return -EINVAL; 2469 } 2470 2471 /** 2472 * qm_hw_err_isolate() - Try to set the isolation status of the uacce device 2473 * according to user's configuration of error threshold. 2474 * @qm: the uacce device 2475 */ 2476 static int qm_hw_err_isolate(struct hisi_qm *qm) 2477 { 2478 struct qm_hw_err *err, *tmp, *hw_err; 2479 struct qm_err_isolate *isolate; 2480 u32 count = 0; 2481 2482 isolate = &qm->isolate_data; 2483 2484 #define SECONDS_PER_HOUR 3600 2485 2486 /* All the hw errs are processed by PF driver */ 2487 if (qm->uacce->is_vf || isolate->is_isolate || !isolate->err_threshold) 2488 return 0; 2489 2490 hw_err = kzalloc(sizeof(*hw_err), GFP_KERNEL); 2491 if (!hw_err) 2492 return -ENOMEM; 2493 2494 /* 2495 * Time-stamp every slot AER error. Then check the AER error log when the 2496 * next device AER error occurred. if the device slot AER error count exceeds 2497 * the setting error threshold in one hour, the isolated state will be set 2498 * to true. And the AER error logs that exceed one hour will be cleared. 2499 */ 2500 mutex_lock(&isolate->isolate_lock); 2501 hw_err->timestamp = jiffies; 2502 list_for_each_entry_safe(err, tmp, &isolate->qm_hw_errs, list) { 2503 if ((hw_err->timestamp - err->timestamp) / HZ > 2504 SECONDS_PER_HOUR) { 2505 list_del(&err->list); 2506 kfree(err); 2507 } else { 2508 count++; 2509 } 2510 } 2511 list_add(&hw_err->list, &isolate->qm_hw_errs); 2512 mutex_unlock(&isolate->isolate_lock); 2513 2514 if (count >= isolate->err_threshold) 2515 isolate->is_isolate = true; 2516 2517 return 0; 2518 } 2519 2520 static void qm_hw_err_destroy(struct hisi_qm *qm) 2521 { 2522 struct qm_hw_err *err, *tmp; 2523 2524 mutex_lock(&qm->isolate_data.isolate_lock); 2525 list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) { 2526 list_del(&err->list); 2527 kfree(err); 2528 } 2529 mutex_unlock(&qm->isolate_data.isolate_lock); 2530 } 2531 2532 static enum uacce_dev_state hisi_qm_get_isolate_state(struct uacce_device *uacce) 2533 { 2534 struct hisi_qm *qm = uacce->priv; 2535 struct hisi_qm *pf_qm; 2536 2537 if (uacce->is_vf) 2538 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 2539 else 2540 pf_qm = qm; 2541 2542 return pf_qm->isolate_data.is_isolate ? 2543 UACCE_DEV_ISOLATE : UACCE_DEV_NORMAL; 2544 } 2545 2546 static int hisi_qm_isolate_threshold_write(struct uacce_device *uacce, u32 num) 2547 { 2548 struct hisi_qm *qm = uacce->priv; 2549 2550 /* Must be set by PF */ 2551 if (uacce->is_vf) 2552 return -EPERM; 2553 2554 if (qm->isolate_data.is_isolate) 2555 return -EPERM; 2556 2557 qm->isolate_data.err_threshold = num; 2558 2559 /* After the policy is updated, need to reset the hardware err list */ 2560 qm_hw_err_destroy(qm); 2561 2562 return 0; 2563 } 2564 2565 static u32 hisi_qm_isolate_threshold_read(struct uacce_device *uacce) 2566 { 2567 struct hisi_qm *qm = uacce->priv; 2568 struct hisi_qm *pf_qm; 2569 2570 if (uacce->is_vf) { 2571 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 2572 return pf_qm->isolate_data.err_threshold; 2573 } 2574 2575 return qm->isolate_data.err_threshold; 2576 } 2577 2578 static const struct uacce_ops uacce_qm_ops = { 2579 .get_available_instances = hisi_qm_get_available_instances, 2580 .get_queue = hisi_qm_uacce_get_queue, 2581 .put_queue = hisi_qm_uacce_put_queue, 2582 .start_queue = hisi_qm_uacce_start_queue, 2583 .stop_queue = hisi_qm_uacce_stop_queue, 2584 .mmap = hisi_qm_uacce_mmap, 2585 .ioctl = hisi_qm_uacce_ioctl, 2586 .is_q_updated = hisi_qm_is_q_updated, 2587 .get_isolate_state = hisi_qm_get_isolate_state, 2588 .isolate_err_threshold_write = hisi_qm_isolate_threshold_write, 2589 .isolate_err_threshold_read = hisi_qm_isolate_threshold_read, 2590 }; 2591 2592 static void qm_remove_uacce(struct hisi_qm *qm) 2593 { 2594 struct uacce_device *uacce = qm->uacce; 2595 2596 if (qm->use_sva) { 2597 qm_hw_err_destroy(qm); 2598 uacce_remove(uacce); 2599 qm->uacce = NULL; 2600 } 2601 } 2602 2603 static int qm_alloc_uacce(struct hisi_qm *qm) 2604 { 2605 struct pci_dev *pdev = qm->pdev; 2606 struct uacce_device *uacce; 2607 unsigned long mmio_page_nr; 2608 unsigned long dus_page_nr; 2609 u16 sq_depth, cq_depth; 2610 struct uacce_interface interface = { 2611 .flags = UACCE_DEV_SVA, 2612 .ops = &uacce_qm_ops, 2613 }; 2614 int ret; 2615 2616 ret = strscpy(interface.name, dev_driver_string(&pdev->dev), 2617 sizeof(interface.name)); 2618 if (ret < 0) 2619 return -ENAMETOOLONG; 2620 2621 uacce = uacce_alloc(&pdev->dev, &interface); 2622 if (IS_ERR(uacce)) 2623 return PTR_ERR(uacce); 2624 2625 if (uacce->flags & UACCE_DEV_SVA) { 2626 qm->use_sva = true; 2627 } else { 2628 /* only consider sva case */ 2629 qm_remove_uacce(qm); 2630 return -EINVAL; 2631 } 2632 2633 uacce->is_vf = pdev->is_virtfn; 2634 uacce->priv = qm; 2635 2636 if (qm->ver == QM_HW_V1) 2637 uacce->api_ver = HISI_QM_API_VER_BASE; 2638 else if (qm->ver == QM_HW_V2) 2639 uacce->api_ver = HISI_QM_API_VER2_BASE; 2640 else 2641 uacce->api_ver = HISI_QM_API_VER3_BASE; 2642 2643 if (qm->ver == QM_HW_V1) 2644 mmio_page_nr = QM_DOORBELL_PAGE_NR; 2645 else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 2646 mmio_page_nr = QM_DOORBELL_PAGE_NR + 2647 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE; 2648 else 2649 mmio_page_nr = qm->db_interval / PAGE_SIZE; 2650 2651 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); 2652 2653 /* Add one more page for device or qp status */ 2654 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth + 2655 sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >> 2656 PAGE_SHIFT; 2657 2658 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr; 2659 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr; 2660 2661 qm->uacce = uacce; 2662 INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs); 2663 mutex_init(&qm->isolate_data.isolate_lock); 2664 2665 return 0; 2666 } 2667 2668 /** 2669 * qm_frozen() - Try to froze QM to cut continuous queue request. If 2670 * there is user on the QM, return failure without doing anything. 2671 * @qm: The qm needed to be fronzen. 2672 * 2673 * This function frozes QM, then we can do SRIOV disabling. 2674 */ 2675 static int qm_frozen(struct hisi_qm *qm) 2676 { 2677 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) 2678 return 0; 2679 2680 down_write(&qm->qps_lock); 2681 2682 if (!qm->qp_in_used) { 2683 qm->qp_in_used = qm->qp_num; 2684 up_write(&qm->qps_lock); 2685 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl); 2686 return 0; 2687 } 2688 2689 up_write(&qm->qps_lock); 2690 2691 return -EBUSY; 2692 } 2693 2694 static int qm_try_frozen_vfs(struct pci_dev *pdev, 2695 struct hisi_qm_list *qm_list) 2696 { 2697 struct hisi_qm *qm, *vf_qm; 2698 struct pci_dev *dev; 2699 int ret = 0; 2700 2701 if (!qm_list || !pdev) 2702 return -EINVAL; 2703 2704 /* Try to frozen all the VFs as disable SRIOV */ 2705 mutex_lock(&qm_list->lock); 2706 list_for_each_entry(qm, &qm_list->list, list) { 2707 dev = qm->pdev; 2708 if (dev == pdev) 2709 continue; 2710 if (pci_physfn(dev) == pdev) { 2711 vf_qm = pci_get_drvdata(dev); 2712 ret = qm_frozen(vf_qm); 2713 if (ret) 2714 goto frozen_fail; 2715 } 2716 } 2717 2718 frozen_fail: 2719 mutex_unlock(&qm_list->lock); 2720 2721 return ret; 2722 } 2723 2724 /** 2725 * hisi_qm_wait_task_finish() - Wait until the task is finished 2726 * when removing the driver. 2727 * @qm: The qm needed to wait for the task to finish. 2728 * @qm_list: The list of all available devices. 2729 */ 2730 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list) 2731 { 2732 while (qm_frozen(qm) || 2733 ((qm->fun_type == QM_HW_PF) && 2734 qm_try_frozen_vfs(qm->pdev, qm_list))) { 2735 msleep(WAIT_PERIOD); 2736 } 2737 2738 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) || 2739 test_bit(QM_RESETTING, &qm->misc_ctl)) 2740 msleep(WAIT_PERIOD); 2741 2742 udelay(REMOVE_WAIT_DELAY); 2743 } 2744 EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish); 2745 2746 static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) 2747 { 2748 struct device *dev = &qm->pdev->dev; 2749 struct qm_dma *qdma; 2750 int i; 2751 2752 for (i = num - 1; i >= 0; i--) { 2753 qdma = &qm->qp_array[i].qdma; 2754 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); 2755 kfree(qm->poll_data[i].qp_finish_id); 2756 } 2757 2758 kfree(qm->poll_data); 2759 kfree(qm->qp_array); 2760 } 2761 2762 static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, 2763 u16 sq_depth, u16 cq_depth) 2764 { 2765 struct device *dev = &qm->pdev->dev; 2766 size_t off = qm->sqe_size * sq_depth; 2767 struct hisi_qp *qp; 2768 int ret = -ENOMEM; 2769 2770 qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16), 2771 GFP_KERNEL); 2772 if (!qm->poll_data[id].qp_finish_id) 2773 return -ENOMEM; 2774 2775 qp = &qm->qp_array[id]; 2776 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, 2777 GFP_KERNEL); 2778 if (!qp->qdma.va) 2779 goto err_free_qp_finish_id; 2780 2781 qp->sqe = qp->qdma.va; 2782 qp->sqe_dma = qp->qdma.dma; 2783 qp->cqe = qp->qdma.va + off; 2784 qp->cqe_dma = qp->qdma.dma + off; 2785 qp->qdma.size = dma_size; 2786 qp->sq_depth = sq_depth; 2787 qp->cq_depth = cq_depth; 2788 qp->qm = qm; 2789 qp->qp_id = id; 2790 2791 return 0; 2792 2793 err_free_qp_finish_id: 2794 kfree(qm->poll_data[id].qp_finish_id); 2795 return ret; 2796 } 2797 2798 static void hisi_qm_pre_init(struct hisi_qm *qm) 2799 { 2800 struct pci_dev *pdev = qm->pdev; 2801 2802 if (qm->ver == QM_HW_V1) 2803 qm->ops = &qm_hw_ops_v1; 2804 else if (qm->ver == QM_HW_V2) 2805 qm->ops = &qm_hw_ops_v2; 2806 else 2807 qm->ops = &qm_hw_ops_v3; 2808 2809 pci_set_drvdata(pdev, qm); 2810 mutex_init(&qm->mailbox_lock); 2811 init_rwsem(&qm->qps_lock); 2812 qm->qp_in_used = 0; 2813 qm->misc_ctl = false; 2814 if (test_bit(QM_SUPPORT_RPM, &qm->caps)) { 2815 if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev))) 2816 dev_info(&pdev->dev, "_PS0 and _PR0 are not defined"); 2817 } 2818 } 2819 2820 static void qm_cmd_uninit(struct hisi_qm *qm) 2821 { 2822 u32 val; 2823 2824 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 2825 return; 2826 2827 val = readl(qm->io_base + QM_IFC_INT_MASK); 2828 val |= QM_IFC_INT_DISABLE; 2829 writel(val, qm->io_base + QM_IFC_INT_MASK); 2830 } 2831 2832 static void qm_cmd_init(struct hisi_qm *qm) 2833 { 2834 u32 val; 2835 2836 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 2837 return; 2838 2839 /* Clear communication interrupt source */ 2840 qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR); 2841 2842 /* Enable pf to vf communication reg. */ 2843 val = readl(qm->io_base + QM_IFC_INT_MASK); 2844 val &= ~QM_IFC_INT_DISABLE; 2845 writel(val, qm->io_base + QM_IFC_INT_MASK); 2846 } 2847 2848 static void qm_put_pci_res(struct hisi_qm *qm) 2849 { 2850 struct pci_dev *pdev = qm->pdev; 2851 2852 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 2853 iounmap(qm->db_io_base); 2854 2855 iounmap(qm->io_base); 2856 pci_release_mem_regions(pdev); 2857 } 2858 2859 static void hisi_qm_pci_uninit(struct hisi_qm *qm) 2860 { 2861 struct pci_dev *pdev = qm->pdev; 2862 2863 pci_free_irq_vectors(pdev); 2864 qm_put_pci_res(qm); 2865 pci_disable_device(pdev); 2866 } 2867 2868 static void hisi_qm_set_state(struct hisi_qm *qm, u8 state) 2869 { 2870 if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF) 2871 writel(state, qm->io_base + QM_VF_STATE); 2872 } 2873 2874 static void hisi_qm_unint_work(struct hisi_qm *qm) 2875 { 2876 destroy_workqueue(qm->wq); 2877 } 2878 2879 static void hisi_qm_memory_uninit(struct hisi_qm *qm) 2880 { 2881 struct device *dev = &qm->pdev->dev; 2882 2883 hisi_qp_memory_uninit(qm, qm->qp_num); 2884 if (qm->qdma.va) { 2885 hisi_qm_cache_wb(qm); 2886 dma_free_coherent(dev, qm->qdma.size, 2887 qm->qdma.va, qm->qdma.dma); 2888 } 2889 2890 idr_destroy(&qm->qp_idr); 2891 2892 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 2893 kfree(qm->factor); 2894 } 2895 2896 /** 2897 * hisi_qm_uninit() - Uninitialize qm. 2898 * @qm: The qm needed uninit. 2899 * 2900 * This function uninits qm related device resources. 2901 */ 2902 void hisi_qm_uninit(struct hisi_qm *qm) 2903 { 2904 qm_cmd_uninit(qm); 2905 hisi_qm_unint_work(qm); 2906 down_write(&qm->qps_lock); 2907 2908 if (!qm_avail_state(qm, QM_CLOSE)) { 2909 up_write(&qm->qps_lock); 2910 return; 2911 } 2912 2913 hisi_qm_memory_uninit(qm); 2914 hisi_qm_set_state(qm, QM_NOT_READY); 2915 up_write(&qm->qps_lock); 2916 2917 qm_irqs_unregister(qm); 2918 hisi_qm_pci_uninit(qm); 2919 if (qm->use_sva) { 2920 uacce_remove(qm->uacce); 2921 qm->uacce = NULL; 2922 } 2923 } 2924 EXPORT_SYMBOL_GPL(hisi_qm_uninit); 2925 2926 /** 2927 * hisi_qm_get_vft() - Get vft from a qm. 2928 * @qm: The qm we want to get its vft. 2929 * @base: The base number of queue in vft. 2930 * @number: The number of queues in vft. 2931 * 2932 * We can allocate multiple queues to a qm by configuring virtual function 2933 * table. We get related configures by this function. Normally, we call this 2934 * function in VF driver to get the queue information. 2935 * 2936 * qm hw v1 does not support this interface. 2937 */ 2938 static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) 2939 { 2940 if (!base || !number) 2941 return -EINVAL; 2942 2943 if (!qm->ops->get_vft) { 2944 dev_err(&qm->pdev->dev, "Don't support vft read!\n"); 2945 return -EINVAL; 2946 } 2947 2948 return qm->ops->get_vft(qm, base, number); 2949 } 2950 2951 /** 2952 * hisi_qm_set_vft() - Set vft to a qm. 2953 * @qm: The qm we want to set its vft. 2954 * @fun_num: The function number. 2955 * @base: The base number of queue in vft. 2956 * @number: The number of queues in vft. 2957 * 2958 * This function is alway called in PF driver, it is used to assign queues 2959 * among PF and VFs. 2960 * 2961 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1) 2962 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1) 2963 * (VF function number 0x2) 2964 */ 2965 static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, 2966 u32 number) 2967 { 2968 u32 max_q_num = qm->ctrl_qp_num; 2969 2970 if (base >= max_q_num || number > max_q_num || 2971 (base + number) > max_q_num) 2972 return -EINVAL; 2973 2974 return qm_set_sqc_cqc_vft(qm, fun_num, base, number); 2975 } 2976 2977 static void qm_init_eq_aeq_status(struct hisi_qm *qm) 2978 { 2979 struct hisi_qm_status *status = &qm->status; 2980 2981 status->eq_head = 0; 2982 status->aeq_head = 0; 2983 status->eqc_phase = true; 2984 status->aeqc_phase = true; 2985 } 2986 2987 static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm) 2988 { 2989 /* Clear eq/aeq interrupt source */ 2990 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); 2991 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 2992 2993 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); 2994 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); 2995 } 2996 2997 static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm) 2998 { 2999 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); 3000 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); 3001 } 3002 3003 static int qm_eq_ctx_cfg(struct hisi_qm *qm) 3004 { 3005 struct device *dev = &qm->pdev->dev; 3006 struct qm_eqc *eqc; 3007 dma_addr_t eqc_dma; 3008 int ret; 3009 3010 eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL); 3011 if (!eqc) 3012 return -ENOMEM; 3013 3014 eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); 3015 eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); 3016 if (qm->ver == QM_HW_V1) 3017 eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); 3018 eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); 3019 3020 eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc), 3021 DMA_TO_DEVICE); 3022 if (dma_mapping_error(dev, eqc_dma)) { 3023 kfree(eqc); 3024 return -ENOMEM; 3025 } 3026 3027 ret = hisi_qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0); 3028 dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE); 3029 kfree(eqc); 3030 3031 return ret; 3032 } 3033 3034 static int qm_aeq_ctx_cfg(struct hisi_qm *qm) 3035 { 3036 struct device *dev = &qm->pdev->dev; 3037 struct qm_aeqc *aeqc; 3038 dma_addr_t aeqc_dma; 3039 int ret; 3040 3041 aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL); 3042 if (!aeqc) 3043 return -ENOMEM; 3044 3045 aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); 3046 aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); 3047 aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); 3048 3049 aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc), 3050 DMA_TO_DEVICE); 3051 if (dma_mapping_error(dev, aeqc_dma)) { 3052 kfree(aeqc); 3053 return -ENOMEM; 3054 } 3055 3056 ret = hisi_qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0); 3057 dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE); 3058 kfree(aeqc); 3059 3060 return ret; 3061 } 3062 3063 static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm) 3064 { 3065 struct device *dev = &qm->pdev->dev; 3066 int ret; 3067 3068 qm_init_eq_aeq_status(qm); 3069 3070 ret = qm_eq_ctx_cfg(qm); 3071 if (ret) { 3072 dev_err(dev, "Set eqc failed!\n"); 3073 return ret; 3074 } 3075 3076 return qm_aeq_ctx_cfg(qm); 3077 } 3078 3079 static int __hisi_qm_start(struct hisi_qm *qm) 3080 { 3081 int ret; 3082 3083 WARN_ON(!qm->qdma.va); 3084 3085 if (qm->fun_type == QM_HW_PF) { 3086 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); 3087 if (ret) 3088 return ret; 3089 } 3090 3091 ret = qm_eq_aeq_ctx_cfg(qm); 3092 if (ret) 3093 return ret; 3094 3095 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); 3096 if (ret) 3097 return ret; 3098 3099 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); 3100 if (ret) 3101 return ret; 3102 3103 qm_init_prefetch(qm); 3104 qm_enable_eq_aeq_interrupts(qm); 3105 3106 return 0; 3107 } 3108 3109 /** 3110 * hisi_qm_start() - start qm 3111 * @qm: The qm to be started. 3112 * 3113 * This function starts a qm, then we can allocate qp from this qm. 3114 */ 3115 int hisi_qm_start(struct hisi_qm *qm) 3116 { 3117 struct device *dev = &qm->pdev->dev; 3118 int ret = 0; 3119 3120 down_write(&qm->qps_lock); 3121 3122 if (!qm_avail_state(qm, QM_START)) { 3123 up_write(&qm->qps_lock); 3124 return -EPERM; 3125 } 3126 3127 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num); 3128 3129 if (!qm->qp_num) { 3130 dev_err(dev, "qp_num should not be 0\n"); 3131 ret = -EINVAL; 3132 goto err_unlock; 3133 } 3134 3135 ret = __hisi_qm_start(qm); 3136 if (!ret) 3137 atomic_set(&qm->status.flags, QM_START); 3138 3139 hisi_qm_set_state(qm, QM_READY); 3140 err_unlock: 3141 up_write(&qm->qps_lock); 3142 return ret; 3143 } 3144 EXPORT_SYMBOL_GPL(hisi_qm_start); 3145 3146 static int qm_restart(struct hisi_qm *qm) 3147 { 3148 struct device *dev = &qm->pdev->dev; 3149 struct hisi_qp *qp; 3150 int ret, i; 3151 3152 ret = hisi_qm_start(qm); 3153 if (ret < 0) 3154 return ret; 3155 3156 down_write(&qm->qps_lock); 3157 for (i = 0; i < qm->qp_num; i++) { 3158 qp = &qm->qp_array[i]; 3159 if (atomic_read(&qp->qp_status.flags) == QP_STOP && 3160 qp->is_resetting == true) { 3161 ret = qm_start_qp_nolock(qp, 0); 3162 if (ret < 0) { 3163 dev_err(dev, "Failed to start qp%d!\n", i); 3164 3165 up_write(&qm->qps_lock); 3166 return ret; 3167 } 3168 qp->is_resetting = false; 3169 } 3170 } 3171 up_write(&qm->qps_lock); 3172 3173 return 0; 3174 } 3175 3176 /* Stop started qps in reset flow */ 3177 static int qm_stop_started_qp(struct hisi_qm *qm) 3178 { 3179 struct device *dev = &qm->pdev->dev; 3180 struct hisi_qp *qp; 3181 int i, ret; 3182 3183 for (i = 0; i < qm->qp_num; i++) { 3184 qp = &qm->qp_array[i]; 3185 if (qp && atomic_read(&qp->qp_status.flags) == QP_START) { 3186 qp->is_resetting = true; 3187 ret = qm_stop_qp_nolock(qp); 3188 if (ret < 0) { 3189 dev_err(dev, "Failed to stop qp%d!\n", i); 3190 return ret; 3191 } 3192 } 3193 } 3194 3195 return 0; 3196 } 3197 3198 /** 3199 * qm_clear_queues() - Clear all queues memory in a qm. 3200 * @qm: The qm in which the queues will be cleared. 3201 * 3202 * This function clears all queues memory in a qm. Reset of accelerator can 3203 * use this to clear queues. 3204 */ 3205 static void qm_clear_queues(struct hisi_qm *qm) 3206 { 3207 struct hisi_qp *qp; 3208 int i; 3209 3210 for (i = 0; i < qm->qp_num; i++) { 3211 qp = &qm->qp_array[i]; 3212 if (qp->is_in_kernel && qp->is_resetting) 3213 memset(qp->qdma.va, 0, qp->qdma.size); 3214 } 3215 3216 memset(qm->qdma.va, 0, qm->qdma.size); 3217 } 3218 3219 /** 3220 * hisi_qm_stop() - Stop a qm. 3221 * @qm: The qm which will be stopped. 3222 * @r: The reason to stop qm. 3223 * 3224 * This function stops qm and its qps, then qm can not accept request. 3225 * Related resources are not released at this state, we can use hisi_qm_start 3226 * to let qm start again. 3227 */ 3228 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) 3229 { 3230 struct device *dev = &qm->pdev->dev; 3231 int ret = 0; 3232 3233 down_write(&qm->qps_lock); 3234 3235 qm->status.stop_reason = r; 3236 if (!qm_avail_state(qm, QM_STOP)) { 3237 ret = -EPERM; 3238 goto err_unlock; 3239 } 3240 3241 if (qm->status.stop_reason == QM_SOFT_RESET || 3242 qm->status.stop_reason == QM_FLR) { 3243 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 3244 ret = qm_stop_started_qp(qm); 3245 if (ret < 0) { 3246 dev_err(dev, "Failed to stop started qp!\n"); 3247 goto err_unlock; 3248 } 3249 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 3250 } 3251 3252 qm_disable_eq_aeq_interrupts(qm); 3253 if (qm->fun_type == QM_HW_PF) { 3254 ret = hisi_qm_set_vft(qm, 0, 0, 0); 3255 if (ret < 0) { 3256 dev_err(dev, "Failed to set vft!\n"); 3257 ret = -EBUSY; 3258 goto err_unlock; 3259 } 3260 } 3261 3262 qm_clear_queues(qm); 3263 atomic_set(&qm->status.flags, QM_STOP); 3264 3265 err_unlock: 3266 up_write(&qm->qps_lock); 3267 return ret; 3268 } 3269 EXPORT_SYMBOL_GPL(hisi_qm_stop); 3270 3271 static void qm_hw_error_init(struct hisi_qm *qm) 3272 { 3273 if (!qm->ops->hw_error_init) { 3274 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); 3275 return; 3276 } 3277 3278 qm->ops->hw_error_init(qm); 3279 } 3280 3281 static void qm_hw_error_uninit(struct hisi_qm *qm) 3282 { 3283 if (!qm->ops->hw_error_uninit) { 3284 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n"); 3285 return; 3286 } 3287 3288 qm->ops->hw_error_uninit(qm); 3289 } 3290 3291 static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm) 3292 { 3293 if (!qm->ops->hw_error_handle) { 3294 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n"); 3295 return ACC_ERR_NONE; 3296 } 3297 3298 return qm->ops->hw_error_handle(qm); 3299 } 3300 3301 /** 3302 * hisi_qm_dev_err_init() - Initialize device error configuration. 3303 * @qm: The qm for which we want to do error initialization. 3304 * 3305 * Initialize QM and device error related configuration. 3306 */ 3307 void hisi_qm_dev_err_init(struct hisi_qm *qm) 3308 { 3309 if (qm->fun_type == QM_HW_VF) 3310 return; 3311 3312 qm_hw_error_init(qm); 3313 3314 if (!qm->err_ini->hw_err_enable) { 3315 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n"); 3316 return; 3317 } 3318 qm->err_ini->hw_err_enable(qm); 3319 } 3320 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init); 3321 3322 /** 3323 * hisi_qm_dev_err_uninit() - Uninitialize device error configuration. 3324 * @qm: The qm for which we want to do error uninitialization. 3325 * 3326 * Uninitialize QM and device error related configuration. 3327 */ 3328 void hisi_qm_dev_err_uninit(struct hisi_qm *qm) 3329 { 3330 if (qm->fun_type == QM_HW_VF) 3331 return; 3332 3333 qm_hw_error_uninit(qm); 3334 3335 if (!qm->err_ini->hw_err_disable) { 3336 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n"); 3337 return; 3338 } 3339 qm->err_ini->hw_err_disable(qm); 3340 } 3341 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit); 3342 3343 /** 3344 * hisi_qm_free_qps() - free multiple queue pairs. 3345 * @qps: The queue pairs need to be freed. 3346 * @qp_num: The num of queue pairs. 3347 */ 3348 void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num) 3349 { 3350 int i; 3351 3352 if (!qps || qp_num <= 0) 3353 return; 3354 3355 for (i = qp_num - 1; i >= 0; i--) 3356 hisi_qm_release_qp(qps[i]); 3357 } 3358 EXPORT_SYMBOL_GPL(hisi_qm_free_qps); 3359 3360 static void free_list(struct list_head *head) 3361 { 3362 struct hisi_qm_resource *res, *tmp; 3363 3364 list_for_each_entry_safe(res, tmp, head, list) { 3365 list_del(&res->list); 3366 kfree(res); 3367 } 3368 } 3369 3370 static int hisi_qm_sort_devices(int node, struct list_head *head, 3371 struct hisi_qm_list *qm_list) 3372 { 3373 struct hisi_qm_resource *res, *tmp; 3374 struct hisi_qm *qm; 3375 struct list_head *n; 3376 struct device *dev; 3377 int dev_node; 3378 3379 list_for_each_entry(qm, &qm_list->list, list) { 3380 dev = &qm->pdev->dev; 3381 3382 dev_node = dev_to_node(dev); 3383 if (dev_node < 0) 3384 dev_node = 0; 3385 3386 res = kzalloc(sizeof(*res), GFP_KERNEL); 3387 if (!res) 3388 return -ENOMEM; 3389 3390 res->qm = qm; 3391 res->distance = node_distance(dev_node, node); 3392 n = head; 3393 list_for_each_entry(tmp, head, list) { 3394 if (res->distance < tmp->distance) { 3395 n = &tmp->list; 3396 break; 3397 } 3398 } 3399 list_add_tail(&res->list, n); 3400 } 3401 3402 return 0; 3403 } 3404 3405 /** 3406 * hisi_qm_alloc_qps_node() - Create multiple queue pairs. 3407 * @qm_list: The list of all available devices. 3408 * @qp_num: The number of queue pairs need created. 3409 * @alg_type: The algorithm type. 3410 * @node: The numa node. 3411 * @qps: The queue pairs need created. 3412 * 3413 * This function will sort all available device according to numa distance. 3414 * Then try to create all queue pairs from one device, if all devices do 3415 * not meet the requirements will return error. 3416 */ 3417 int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, 3418 u8 alg_type, int node, struct hisi_qp **qps) 3419 { 3420 struct hisi_qm_resource *tmp; 3421 int ret = -ENODEV; 3422 LIST_HEAD(head); 3423 int i; 3424 3425 if (!qps || !qm_list || qp_num <= 0) 3426 return -EINVAL; 3427 3428 mutex_lock(&qm_list->lock); 3429 if (hisi_qm_sort_devices(node, &head, qm_list)) { 3430 mutex_unlock(&qm_list->lock); 3431 goto err; 3432 } 3433 3434 list_for_each_entry(tmp, &head, list) { 3435 for (i = 0; i < qp_num; i++) { 3436 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type); 3437 if (IS_ERR(qps[i])) { 3438 hisi_qm_free_qps(qps, i); 3439 break; 3440 } 3441 } 3442 3443 if (i == qp_num) { 3444 ret = 0; 3445 break; 3446 } 3447 } 3448 3449 mutex_unlock(&qm_list->lock); 3450 if (ret) 3451 pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n", 3452 node, alg_type, qp_num); 3453 3454 err: 3455 free_list(&head); 3456 return ret; 3457 } 3458 EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node); 3459 3460 static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs) 3461 { 3462 u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j; 3463 u32 max_qp_num = qm->max_qp_num; 3464 u32 q_base = qm->qp_num; 3465 int ret; 3466 3467 if (!num_vfs) 3468 return -EINVAL; 3469 3470 vfs_q_num = qm->ctrl_qp_num - qm->qp_num; 3471 3472 /* If vfs_q_num is less than num_vfs, return error. */ 3473 if (vfs_q_num < num_vfs) 3474 return -EINVAL; 3475 3476 q_num = vfs_q_num / num_vfs; 3477 remain_q_num = vfs_q_num % num_vfs; 3478 3479 for (i = num_vfs; i > 0; i--) { 3480 /* 3481 * if q_num + remain_q_num > max_qp_num in last vf, divide the 3482 * remaining queues equally. 3483 */ 3484 if (i == num_vfs && q_num + remain_q_num <= max_qp_num) { 3485 act_q_num = q_num + remain_q_num; 3486 remain_q_num = 0; 3487 } else if (remain_q_num > 0) { 3488 act_q_num = q_num + 1; 3489 remain_q_num--; 3490 } else { 3491 act_q_num = q_num; 3492 } 3493 3494 act_q_num = min(act_q_num, max_qp_num); 3495 ret = hisi_qm_set_vft(qm, i, q_base, act_q_num); 3496 if (ret) { 3497 for (j = num_vfs; j > i; j--) 3498 hisi_qm_set_vft(qm, j, 0, 0); 3499 return ret; 3500 } 3501 q_base += act_q_num; 3502 } 3503 3504 return 0; 3505 } 3506 3507 static int qm_clear_vft_config(struct hisi_qm *qm) 3508 { 3509 int ret; 3510 u32 i; 3511 3512 for (i = 1; i <= qm->vfs_num; i++) { 3513 ret = hisi_qm_set_vft(qm, i, 0, 0); 3514 if (ret) 3515 return ret; 3516 } 3517 qm->vfs_num = 0; 3518 3519 return 0; 3520 } 3521 3522 static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos) 3523 { 3524 struct device *dev = &qm->pdev->dev; 3525 u32 ir = qos * QM_QOS_RATE; 3526 int ret, total_vfs, i; 3527 3528 total_vfs = pci_sriov_get_totalvfs(qm->pdev); 3529 if (fun_index > total_vfs) 3530 return -EINVAL; 3531 3532 qm->factor[fun_index].func_qos = qos; 3533 3534 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]); 3535 if (ret) { 3536 dev_err(dev, "failed to calculate shaper parameter!\n"); 3537 return -EINVAL; 3538 } 3539 3540 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) { 3541 /* The base number of queue reuse for different alg type */ 3542 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1); 3543 if (ret) { 3544 dev_err(dev, "type: %d, failed to set shaper vft!\n", i); 3545 return -EINVAL; 3546 } 3547 } 3548 3549 return 0; 3550 } 3551 3552 static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index) 3553 { 3554 u64 cir_u = 0, cir_b = 0, cir_s = 0; 3555 u64 shaper_vft, ir_calc, ir; 3556 unsigned int val; 3557 u32 error_rate; 3558 int ret; 3559 3560 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 3561 val & BIT(0), POLL_PERIOD, 3562 POLL_TIMEOUT); 3563 if (ret) 3564 return 0; 3565 3566 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR); 3567 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE); 3568 writel(fun_index, qm->io_base + QM_VFT_CFG); 3569 3570 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); 3571 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); 3572 3573 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 3574 val & BIT(0), POLL_PERIOD, 3575 POLL_TIMEOUT); 3576 if (ret) 3577 return 0; 3578 3579 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) | 3580 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32); 3581 3582 cir_b = shaper_vft & QM_SHAPER_CIR_B_MASK; 3583 cir_u = shaper_vft & QM_SHAPER_CIR_U_MASK; 3584 cir_u = cir_u >> QM_SHAPER_FACTOR_CIR_U_SHIFT; 3585 3586 cir_s = shaper_vft & QM_SHAPER_CIR_S_MASK; 3587 cir_s = cir_s >> QM_SHAPER_FACTOR_CIR_S_SHIFT; 3588 3589 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s); 3590 3591 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE; 3592 3593 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; 3594 if (error_rate > QM_QOS_MIN_ERROR_RATE) { 3595 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate); 3596 return 0; 3597 } 3598 3599 return ir; 3600 } 3601 3602 static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num) 3603 { 3604 struct device *dev = &qm->pdev->dev; 3605 u64 mb_cmd; 3606 u32 qos; 3607 int ret; 3608 3609 qos = qm_get_shaper_vft_qos(qm, fun_num); 3610 if (!qos) { 3611 dev_err(dev, "function(%u) failed to get qos by PF!\n", fun_num); 3612 return; 3613 } 3614 3615 mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT; 3616 ret = qm_ping_single_vf(qm, mb_cmd, fun_num); 3617 if (ret) 3618 dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num); 3619 } 3620 3621 static int qm_vf_read_qos(struct hisi_qm *qm) 3622 { 3623 int cnt = 0; 3624 int ret = -EINVAL; 3625 3626 /* reset mailbox qos val */ 3627 qm->mb_qos = 0; 3628 3629 /* vf ping pf to get function qos */ 3630 ret = qm_ping_pf(qm, QM_VF_GET_QOS); 3631 if (ret) { 3632 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n"); 3633 return ret; 3634 } 3635 3636 while (true) { 3637 msleep(QM_WAIT_DST_ACK); 3638 if (qm->mb_qos) 3639 break; 3640 3641 if (++cnt > QM_MAX_VF_WAIT_COUNT) { 3642 pci_err(qm->pdev, "PF ping VF timeout!\n"); 3643 return -ETIMEDOUT; 3644 } 3645 } 3646 3647 return ret; 3648 } 3649 3650 static ssize_t qm_algqos_read(struct file *filp, char __user *buf, 3651 size_t count, loff_t *pos) 3652 { 3653 struct hisi_qm *qm = filp->private_data; 3654 char tbuf[QM_DBG_READ_LEN]; 3655 u32 qos_val, ir; 3656 int ret; 3657 3658 ret = hisi_qm_get_dfx_access(qm); 3659 if (ret) 3660 return ret; 3661 3662 /* Mailbox and reset cannot be operated at the same time */ 3663 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 3664 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n"); 3665 ret = -EAGAIN; 3666 goto err_put_dfx_access; 3667 } 3668 3669 if (qm->fun_type == QM_HW_PF) { 3670 ir = qm_get_shaper_vft_qos(qm, 0); 3671 } else { 3672 ret = qm_vf_read_qos(qm); 3673 if (ret) 3674 goto err_get_status; 3675 ir = qm->mb_qos; 3676 } 3677 3678 qos_val = ir / QM_QOS_RATE; 3679 ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val); 3680 3681 ret = simple_read_from_buffer(buf, count, pos, tbuf, ret); 3682 3683 err_get_status: 3684 clear_bit(QM_RESETTING, &qm->misc_ctl); 3685 err_put_dfx_access: 3686 hisi_qm_put_dfx_access(qm); 3687 return ret; 3688 } 3689 3690 static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf, 3691 unsigned long *val, 3692 unsigned int *fun_index) 3693 { 3694 struct bus_type *bus_type = qm->pdev->dev.bus; 3695 char tbuf_bdf[QM_DBG_READ_LEN] = {0}; 3696 char val_buf[QM_DBG_READ_LEN] = {0}; 3697 struct pci_dev *pdev; 3698 struct device *dev; 3699 int ret; 3700 3701 ret = sscanf(buf, "%s %s", tbuf_bdf, val_buf); 3702 if (ret != QM_QOS_PARAM_NUM) 3703 return -EINVAL; 3704 3705 ret = kstrtoul(val_buf, 10, val); 3706 if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) { 3707 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n"); 3708 return -EINVAL; 3709 } 3710 3711 dev = bus_find_device_by_name(bus_type, NULL, tbuf_bdf); 3712 if (!dev) { 3713 pci_err(qm->pdev, "input pci bdf number is error!\n"); 3714 return -ENODEV; 3715 } 3716 3717 pdev = container_of(dev, struct pci_dev, dev); 3718 3719 *fun_index = pdev->devfn; 3720 3721 return 0; 3722 } 3723 3724 static ssize_t qm_algqos_write(struct file *filp, const char __user *buf, 3725 size_t count, loff_t *pos) 3726 { 3727 struct hisi_qm *qm = filp->private_data; 3728 char tbuf[QM_DBG_READ_LEN]; 3729 unsigned int fun_index; 3730 unsigned long val; 3731 int len, ret; 3732 3733 if (*pos != 0) 3734 return 0; 3735 3736 if (count >= QM_DBG_READ_LEN) 3737 return -ENOSPC; 3738 3739 len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count); 3740 if (len < 0) 3741 return len; 3742 3743 tbuf[len] = '\0'; 3744 ret = qm_get_qos_value(qm, tbuf, &val, &fun_index); 3745 if (ret) 3746 return ret; 3747 3748 /* Mailbox and reset cannot be operated at the same time */ 3749 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 3750 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n"); 3751 return -EAGAIN; 3752 } 3753 3754 ret = qm_pm_get_sync(qm); 3755 if (ret) { 3756 ret = -EINVAL; 3757 goto err_get_status; 3758 } 3759 3760 ret = qm_func_shaper_enable(qm, fun_index, val); 3761 if (ret) { 3762 pci_err(qm->pdev, "failed to enable function shaper!\n"); 3763 ret = -EINVAL; 3764 goto err_put_sync; 3765 } 3766 3767 pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n", 3768 fun_index, val); 3769 ret = count; 3770 3771 err_put_sync: 3772 qm_pm_put_sync(qm); 3773 err_get_status: 3774 clear_bit(QM_RESETTING, &qm->misc_ctl); 3775 return ret; 3776 } 3777 3778 static const struct file_operations qm_algqos_fops = { 3779 .owner = THIS_MODULE, 3780 .open = simple_open, 3781 .read = qm_algqos_read, 3782 .write = qm_algqos_write, 3783 }; 3784 3785 /** 3786 * hisi_qm_set_algqos_init() - Initialize function qos debugfs files. 3787 * @qm: The qm for which we want to add debugfs files. 3788 * 3789 * Create function qos debugfs files, VF ping PF to get function qos. 3790 */ 3791 void hisi_qm_set_algqos_init(struct hisi_qm *qm) 3792 { 3793 if (qm->fun_type == QM_HW_PF) 3794 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root, 3795 qm, &qm_algqos_fops); 3796 else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 3797 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root, 3798 qm, &qm_algqos_fops); 3799 } 3800 3801 static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func) 3802 { 3803 int i; 3804 3805 for (i = 1; i <= total_func; i++) 3806 qm->factor[i].func_qos = QM_QOS_MAX_VAL; 3807 } 3808 3809 /** 3810 * hisi_qm_sriov_enable() - enable virtual functions 3811 * @pdev: the PCIe device 3812 * @max_vfs: the number of virtual functions to enable 3813 * 3814 * Returns the number of enabled VFs. If there are VFs enabled already or 3815 * max_vfs is more than the total number of device can be enabled, returns 3816 * failure. 3817 */ 3818 int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs) 3819 { 3820 struct hisi_qm *qm = pci_get_drvdata(pdev); 3821 int pre_existing_vfs, num_vfs, total_vfs, ret; 3822 3823 ret = qm_pm_get_sync(qm); 3824 if (ret) 3825 return ret; 3826 3827 total_vfs = pci_sriov_get_totalvfs(pdev); 3828 pre_existing_vfs = pci_num_vf(pdev); 3829 if (pre_existing_vfs) { 3830 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n", 3831 pre_existing_vfs); 3832 goto err_put_sync; 3833 } 3834 3835 if (max_vfs > total_vfs) { 3836 pci_err(pdev, "%d VFs is more than total VFs %d!\n", max_vfs, total_vfs); 3837 ret = -ERANGE; 3838 goto err_put_sync; 3839 } 3840 3841 num_vfs = max_vfs; 3842 3843 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 3844 hisi_qm_init_vf_qos(qm, num_vfs); 3845 3846 ret = qm_vf_q_assign(qm, num_vfs); 3847 if (ret) { 3848 pci_err(pdev, "Can't assign queues for VF!\n"); 3849 goto err_put_sync; 3850 } 3851 3852 qm->vfs_num = num_vfs; 3853 3854 ret = pci_enable_sriov(pdev, num_vfs); 3855 if (ret) { 3856 pci_err(pdev, "Can't enable VF!\n"); 3857 qm_clear_vft_config(qm); 3858 goto err_put_sync; 3859 } 3860 3861 pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs); 3862 3863 return num_vfs; 3864 3865 err_put_sync: 3866 qm_pm_put_sync(qm); 3867 return ret; 3868 } 3869 EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable); 3870 3871 /** 3872 * hisi_qm_sriov_disable - disable virtual functions 3873 * @pdev: the PCI device. 3874 * @is_frozen: true when all the VFs are frozen. 3875 * 3876 * Return failure if there are VFs assigned already or VF is in used. 3877 */ 3878 int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen) 3879 { 3880 struct hisi_qm *qm = pci_get_drvdata(pdev); 3881 int ret; 3882 3883 if (pci_vfs_assigned(pdev)) { 3884 pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n"); 3885 return -EPERM; 3886 } 3887 3888 /* While VF is in used, SRIOV cannot be disabled. */ 3889 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) { 3890 pci_err(pdev, "Task is using its VF!\n"); 3891 return -EBUSY; 3892 } 3893 3894 pci_disable_sriov(pdev); 3895 3896 ret = qm_clear_vft_config(qm); 3897 if (ret) 3898 return ret; 3899 3900 qm_pm_put_sync(qm); 3901 3902 return 0; 3903 } 3904 EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable); 3905 3906 /** 3907 * hisi_qm_sriov_configure - configure the number of VFs 3908 * @pdev: The PCI device 3909 * @num_vfs: The number of VFs need enabled 3910 * 3911 * Enable SR-IOV according to num_vfs, 0 means disable. 3912 */ 3913 int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs) 3914 { 3915 if (num_vfs == 0) 3916 return hisi_qm_sriov_disable(pdev, false); 3917 else 3918 return hisi_qm_sriov_enable(pdev, num_vfs); 3919 } 3920 EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure); 3921 3922 static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) 3923 { 3924 u32 err_sts; 3925 3926 if (!qm->err_ini->get_dev_hw_err_status) { 3927 dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n"); 3928 return ACC_ERR_NONE; 3929 } 3930 3931 /* get device hardware error status */ 3932 err_sts = qm->err_ini->get_dev_hw_err_status(qm); 3933 if (err_sts) { 3934 if (err_sts & qm->err_info.ecc_2bits_mask) 3935 qm->err_status.is_dev_ecc_mbit = true; 3936 3937 if (qm->err_ini->log_dev_hw_err) 3938 qm->err_ini->log_dev_hw_err(qm, err_sts); 3939 3940 if (err_sts & qm->err_info.dev_reset_mask) 3941 return ACC_ERR_NEED_RESET; 3942 3943 if (qm->err_ini->clear_dev_hw_err_status) 3944 qm->err_ini->clear_dev_hw_err_status(qm, err_sts); 3945 } 3946 3947 return ACC_ERR_RECOVERED; 3948 } 3949 3950 static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm) 3951 { 3952 enum acc_err_result qm_ret, dev_ret; 3953 3954 /* log qm error */ 3955 qm_ret = qm_hw_error_handle(qm); 3956 3957 /* log device error */ 3958 dev_ret = qm_dev_err_handle(qm); 3959 3960 return (qm_ret == ACC_ERR_NEED_RESET || 3961 dev_ret == ACC_ERR_NEED_RESET) ? 3962 ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED; 3963 } 3964 3965 /** 3966 * hisi_qm_dev_err_detected() - Get device and qm error status then log it. 3967 * @pdev: The PCI device which need report error. 3968 * @state: The connectivity between CPU and device. 3969 * 3970 * We register this function into PCIe AER handlers, It will report device or 3971 * qm hardware error status when error occur. 3972 */ 3973 pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, 3974 pci_channel_state_t state) 3975 { 3976 struct hisi_qm *qm = pci_get_drvdata(pdev); 3977 enum acc_err_result ret; 3978 3979 if (pdev->is_virtfn) 3980 return PCI_ERS_RESULT_NONE; 3981 3982 pci_info(pdev, "PCI error detected, state(=%u)!!\n", state); 3983 if (state == pci_channel_io_perm_failure) 3984 return PCI_ERS_RESULT_DISCONNECT; 3985 3986 ret = qm_process_dev_error(qm); 3987 if (ret == ACC_ERR_NEED_RESET) 3988 return PCI_ERS_RESULT_NEED_RESET; 3989 3990 return PCI_ERS_RESULT_RECOVERED; 3991 } 3992 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected); 3993 3994 static int qm_check_req_recv(struct hisi_qm *qm) 3995 { 3996 struct pci_dev *pdev = qm->pdev; 3997 int ret; 3998 u32 val; 3999 4000 if (qm->ver >= QM_HW_V3) 4001 return 0; 4002 4003 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID); 4004 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, 4005 (val == ACC_VENDOR_ID_VALUE), 4006 POLL_PERIOD, POLL_TIMEOUT); 4007 if (ret) { 4008 dev_err(&pdev->dev, "Fails to read QM reg!\n"); 4009 return ret; 4010 } 4011 4012 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID); 4013 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, 4014 (val == PCI_VENDOR_ID_HUAWEI), 4015 POLL_PERIOD, POLL_TIMEOUT); 4016 if (ret) 4017 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n"); 4018 4019 return ret; 4020 } 4021 4022 static int qm_set_pf_mse(struct hisi_qm *qm, bool set) 4023 { 4024 struct pci_dev *pdev = qm->pdev; 4025 u16 cmd; 4026 int i; 4027 4028 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 4029 if (set) 4030 cmd |= PCI_COMMAND_MEMORY; 4031 else 4032 cmd &= ~PCI_COMMAND_MEMORY; 4033 4034 pci_write_config_word(pdev, PCI_COMMAND, cmd); 4035 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 4036 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 4037 if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1)) 4038 return 0; 4039 4040 udelay(1); 4041 } 4042 4043 return -ETIMEDOUT; 4044 } 4045 4046 static int qm_set_vf_mse(struct hisi_qm *qm, bool set) 4047 { 4048 struct pci_dev *pdev = qm->pdev; 4049 u16 sriov_ctrl; 4050 int pos; 4051 int i; 4052 4053 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 4054 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); 4055 if (set) 4056 sriov_ctrl |= PCI_SRIOV_CTRL_MSE; 4057 else 4058 sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE; 4059 pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl); 4060 4061 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 4062 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); 4063 if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >> 4064 ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT) 4065 return 0; 4066 4067 udelay(1); 4068 } 4069 4070 return -ETIMEDOUT; 4071 } 4072 4073 static int qm_vf_reset_prepare(struct hisi_qm *qm, 4074 enum qm_stop_reason stop_reason) 4075 { 4076 struct hisi_qm_list *qm_list = qm->qm_list; 4077 struct pci_dev *pdev = qm->pdev; 4078 struct pci_dev *virtfn; 4079 struct hisi_qm *vf_qm; 4080 int ret = 0; 4081 4082 mutex_lock(&qm_list->lock); 4083 list_for_each_entry(vf_qm, &qm_list->list, list) { 4084 virtfn = vf_qm->pdev; 4085 if (virtfn == pdev) 4086 continue; 4087 4088 if (pci_physfn(virtfn) == pdev) { 4089 /* save VFs PCIE BAR configuration */ 4090 pci_save_state(virtfn); 4091 4092 ret = hisi_qm_stop(vf_qm, stop_reason); 4093 if (ret) 4094 goto stop_fail; 4095 } 4096 } 4097 4098 stop_fail: 4099 mutex_unlock(&qm_list->lock); 4100 return ret; 4101 } 4102 4103 static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd, 4104 enum qm_stop_reason stop_reason) 4105 { 4106 struct pci_dev *pdev = qm->pdev; 4107 int ret; 4108 4109 if (!qm->vfs_num) 4110 return 0; 4111 4112 /* Kunpeng930 supports to notify VFs to stop before PF reset */ 4113 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { 4114 ret = qm_ping_all_vfs(qm, cmd); 4115 if (ret) 4116 pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n"); 4117 } else { 4118 ret = qm_vf_reset_prepare(qm, stop_reason); 4119 if (ret) 4120 pci_err(pdev, "failed to prepare reset, ret = %d.\n", ret); 4121 } 4122 4123 return ret; 4124 } 4125 4126 static int qm_controller_reset_prepare(struct hisi_qm *qm) 4127 { 4128 struct pci_dev *pdev = qm->pdev; 4129 int ret; 4130 4131 ret = qm_reset_prepare_ready(qm); 4132 if (ret) { 4133 pci_err(pdev, "Controller reset not ready!\n"); 4134 return ret; 4135 } 4136 4137 /* PF obtains the information of VF by querying the register. */ 4138 qm_cmd_uninit(qm); 4139 4140 /* Whether VFs stop successfully, soft reset will continue. */ 4141 ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET); 4142 if (ret) 4143 pci_err(pdev, "failed to stop vfs by pf in soft reset.\n"); 4144 4145 ret = hisi_qm_stop(qm, QM_SOFT_RESET); 4146 if (ret) { 4147 pci_err(pdev, "Fails to stop QM!\n"); 4148 qm_reset_bit_clear(qm); 4149 return ret; 4150 } 4151 4152 if (qm->use_sva) { 4153 ret = qm_hw_err_isolate(qm); 4154 if (ret) 4155 pci_err(pdev, "failed to isolate hw err!\n"); 4156 } 4157 4158 ret = qm_wait_vf_prepare_finish(qm); 4159 if (ret) 4160 pci_err(pdev, "failed to stop by vfs in soft reset!\n"); 4161 4162 clear_bit(QM_RST_SCHED, &qm->misc_ctl); 4163 4164 return 0; 4165 } 4166 4167 static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) 4168 { 4169 u32 nfe_enb = 0; 4170 4171 /* Kunpeng930 hardware automatically close master ooo when NFE occurs */ 4172 if (qm->ver >= QM_HW_V3) 4173 return; 4174 4175 if (!qm->err_status.is_dev_ecc_mbit && 4176 qm->err_status.is_qm_ecc_mbit && 4177 qm->err_ini->close_axi_master_ooo) { 4178 qm->err_ini->close_axi_master_ooo(qm); 4179 } else if (qm->err_status.is_dev_ecc_mbit && 4180 !qm->err_status.is_qm_ecc_mbit && 4181 !qm->err_ini->close_axi_master_ooo) { 4182 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); 4183 writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, 4184 qm->io_base + QM_RAS_NFE_ENABLE); 4185 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); 4186 } 4187 } 4188 4189 static int qm_soft_reset(struct hisi_qm *qm) 4190 { 4191 struct pci_dev *pdev = qm->pdev; 4192 int ret; 4193 u32 val; 4194 4195 /* Ensure all doorbells and mailboxes received by QM */ 4196 ret = qm_check_req_recv(qm); 4197 if (ret) 4198 return ret; 4199 4200 if (qm->vfs_num) { 4201 ret = qm_set_vf_mse(qm, false); 4202 if (ret) { 4203 pci_err(pdev, "Fails to disable vf MSE bit.\n"); 4204 return ret; 4205 } 4206 } 4207 4208 ret = qm->ops->set_msi(qm, false); 4209 if (ret) { 4210 pci_err(pdev, "Fails to disable PEH MSI bit.\n"); 4211 return ret; 4212 } 4213 4214 qm_dev_ecc_mbit_handle(qm); 4215 4216 /* OOO register set and check */ 4217 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, 4218 qm->io_base + ACC_MASTER_GLOBAL_CTRL); 4219 4220 /* If bus lock, reset chip */ 4221 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, 4222 val, 4223 (val == ACC_MASTER_TRANS_RETURN_RW), 4224 POLL_PERIOD, POLL_TIMEOUT); 4225 if (ret) { 4226 pci_emerg(pdev, "Bus lock! Please reset system.\n"); 4227 return ret; 4228 } 4229 4230 if (qm->err_ini->close_sva_prefetch) 4231 qm->err_ini->close_sva_prefetch(qm); 4232 4233 ret = qm_set_pf_mse(qm, false); 4234 if (ret) { 4235 pci_err(pdev, "Fails to disable pf MSE bit.\n"); 4236 return ret; 4237 } 4238 4239 /* The reset related sub-control registers are not in PCI BAR */ 4240 if (ACPI_HANDLE(&pdev->dev)) { 4241 unsigned long long value = 0; 4242 acpi_status s; 4243 4244 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), 4245 qm->err_info.acpi_rst, 4246 NULL, &value); 4247 if (ACPI_FAILURE(s)) { 4248 pci_err(pdev, "NO controller reset method!\n"); 4249 return -EIO; 4250 } 4251 4252 if (value) { 4253 pci_err(pdev, "Reset step %llu failed!\n", value); 4254 return -EIO; 4255 } 4256 } else { 4257 pci_err(pdev, "No reset method!\n"); 4258 return -EINVAL; 4259 } 4260 4261 return 0; 4262 } 4263 4264 static int qm_vf_reset_done(struct hisi_qm *qm) 4265 { 4266 struct hisi_qm_list *qm_list = qm->qm_list; 4267 struct pci_dev *pdev = qm->pdev; 4268 struct pci_dev *virtfn; 4269 struct hisi_qm *vf_qm; 4270 int ret = 0; 4271 4272 mutex_lock(&qm_list->lock); 4273 list_for_each_entry(vf_qm, &qm_list->list, list) { 4274 virtfn = vf_qm->pdev; 4275 if (virtfn == pdev) 4276 continue; 4277 4278 if (pci_physfn(virtfn) == pdev) { 4279 /* enable VFs PCIE BAR configuration */ 4280 pci_restore_state(virtfn); 4281 4282 ret = qm_restart(vf_qm); 4283 if (ret) 4284 goto restart_fail; 4285 } 4286 } 4287 4288 restart_fail: 4289 mutex_unlock(&qm_list->lock); 4290 return ret; 4291 } 4292 4293 static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd) 4294 { 4295 struct pci_dev *pdev = qm->pdev; 4296 int ret; 4297 4298 if (!qm->vfs_num) 4299 return 0; 4300 4301 ret = qm_vf_q_assign(qm, qm->vfs_num); 4302 if (ret) { 4303 pci_err(pdev, "failed to assign VFs, ret = %d.\n", ret); 4304 return ret; 4305 } 4306 4307 /* Kunpeng930 supports to notify VFs to start after PF reset. */ 4308 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { 4309 ret = qm_ping_all_vfs(qm, cmd); 4310 if (ret) 4311 pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n"); 4312 } else { 4313 ret = qm_vf_reset_done(qm); 4314 if (ret) 4315 pci_warn(pdev, "failed to start vfs, ret = %d.\n", ret); 4316 } 4317 4318 return ret; 4319 } 4320 4321 static int qm_dev_hw_init(struct hisi_qm *qm) 4322 { 4323 return qm->err_ini->hw_init(qm); 4324 } 4325 4326 static void qm_restart_prepare(struct hisi_qm *qm) 4327 { 4328 u32 value; 4329 4330 if (qm->err_ini->open_sva_prefetch) 4331 qm->err_ini->open_sva_prefetch(qm); 4332 4333 if (qm->ver >= QM_HW_V3) 4334 return; 4335 4336 if (!qm->err_status.is_qm_ecc_mbit && 4337 !qm->err_status.is_dev_ecc_mbit) 4338 return; 4339 4340 /* temporarily close the OOO port used for PEH to write out MSI */ 4341 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4342 writel(value & ~qm->err_info.msi_wr_port, 4343 qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4344 4345 /* clear dev ecc 2bit error source if having */ 4346 value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask; 4347 if (value && qm->err_ini->clear_dev_hw_err_status) 4348 qm->err_ini->clear_dev_hw_err_status(qm, value); 4349 4350 /* clear QM ecc mbit error source */ 4351 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); 4352 4353 /* clear AM Reorder Buffer ecc mbit source */ 4354 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS); 4355 } 4356 4357 static void qm_restart_done(struct hisi_qm *qm) 4358 { 4359 u32 value; 4360 4361 if (qm->ver >= QM_HW_V3) 4362 goto clear_flags; 4363 4364 if (!qm->err_status.is_qm_ecc_mbit && 4365 !qm->err_status.is_dev_ecc_mbit) 4366 return; 4367 4368 /* open the OOO port for PEH to write out MSI */ 4369 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4370 value |= qm->err_info.msi_wr_port; 4371 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4372 4373 clear_flags: 4374 qm->err_status.is_qm_ecc_mbit = false; 4375 qm->err_status.is_dev_ecc_mbit = false; 4376 } 4377 4378 static int qm_controller_reset_done(struct hisi_qm *qm) 4379 { 4380 struct pci_dev *pdev = qm->pdev; 4381 int ret; 4382 4383 ret = qm->ops->set_msi(qm, true); 4384 if (ret) { 4385 pci_err(pdev, "Fails to enable PEH MSI bit!\n"); 4386 return ret; 4387 } 4388 4389 ret = qm_set_pf_mse(qm, true); 4390 if (ret) { 4391 pci_err(pdev, "Fails to enable pf MSE bit!\n"); 4392 return ret; 4393 } 4394 4395 if (qm->vfs_num) { 4396 ret = qm_set_vf_mse(qm, true); 4397 if (ret) { 4398 pci_err(pdev, "Fails to enable vf MSE bit!\n"); 4399 return ret; 4400 } 4401 } 4402 4403 ret = qm_dev_hw_init(qm); 4404 if (ret) { 4405 pci_err(pdev, "Failed to init device\n"); 4406 return ret; 4407 } 4408 4409 qm_restart_prepare(qm); 4410 hisi_qm_dev_err_init(qm); 4411 if (qm->err_ini->open_axi_master_ooo) 4412 qm->err_ini->open_axi_master_ooo(qm); 4413 4414 ret = qm_dev_mem_reset(qm); 4415 if (ret) { 4416 pci_err(pdev, "failed to reset device memory\n"); 4417 return ret; 4418 } 4419 4420 ret = qm_restart(qm); 4421 if (ret) { 4422 pci_err(pdev, "Failed to start QM!\n"); 4423 return ret; 4424 } 4425 4426 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE); 4427 if (ret) 4428 pci_err(pdev, "failed to start vfs by pf in soft reset.\n"); 4429 4430 ret = qm_wait_vf_prepare_finish(qm); 4431 if (ret) 4432 pci_err(pdev, "failed to start by vfs in soft reset!\n"); 4433 4434 qm_cmd_init(qm); 4435 qm_restart_done(qm); 4436 4437 qm_reset_bit_clear(qm); 4438 4439 return 0; 4440 } 4441 4442 static int qm_controller_reset(struct hisi_qm *qm) 4443 { 4444 struct pci_dev *pdev = qm->pdev; 4445 int ret; 4446 4447 pci_info(pdev, "Controller resetting...\n"); 4448 4449 ret = qm_controller_reset_prepare(qm); 4450 if (ret) { 4451 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 4452 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 4453 clear_bit(QM_RST_SCHED, &qm->misc_ctl); 4454 return ret; 4455 } 4456 4457 hisi_qm_show_last_dfx_regs(qm); 4458 if (qm->err_ini->show_last_dfx_regs) 4459 qm->err_ini->show_last_dfx_regs(qm); 4460 4461 ret = qm_soft_reset(qm); 4462 if (ret) 4463 goto err_reset; 4464 4465 ret = qm_controller_reset_done(qm); 4466 if (ret) 4467 goto err_reset; 4468 4469 pci_info(pdev, "Controller reset complete\n"); 4470 4471 return 0; 4472 4473 err_reset: 4474 pci_err(pdev, "Controller reset failed (%d)\n", ret); 4475 qm_reset_bit_clear(qm); 4476 4477 /* if resetting fails, isolate the device */ 4478 if (qm->use_sva) 4479 qm->isolate_data.is_isolate = true; 4480 return ret; 4481 } 4482 4483 /** 4484 * hisi_qm_dev_slot_reset() - slot reset 4485 * @pdev: the PCIe device 4486 * 4487 * This function offers QM relate PCIe device reset interface. Drivers which 4488 * use QM can use this function as slot_reset in its struct pci_error_handlers. 4489 */ 4490 pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev) 4491 { 4492 struct hisi_qm *qm = pci_get_drvdata(pdev); 4493 int ret; 4494 4495 if (pdev->is_virtfn) 4496 return PCI_ERS_RESULT_RECOVERED; 4497 4498 /* reset pcie device controller */ 4499 ret = qm_controller_reset(qm); 4500 if (ret) { 4501 pci_err(pdev, "Controller reset failed (%d)\n", ret); 4502 return PCI_ERS_RESULT_DISCONNECT; 4503 } 4504 4505 return PCI_ERS_RESULT_RECOVERED; 4506 } 4507 EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset); 4508 4509 void hisi_qm_reset_prepare(struct pci_dev *pdev) 4510 { 4511 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 4512 struct hisi_qm *qm = pci_get_drvdata(pdev); 4513 u32 delay = 0; 4514 int ret; 4515 4516 hisi_qm_dev_err_uninit(pf_qm); 4517 4518 /* 4519 * Check whether there is an ECC mbit error, If it occurs, need to 4520 * wait for soft reset to fix it. 4521 */ 4522 while (qm_check_dev_error(pf_qm)) { 4523 msleep(++delay); 4524 if (delay > QM_RESET_WAIT_TIMEOUT) 4525 return; 4526 } 4527 4528 ret = qm_reset_prepare_ready(qm); 4529 if (ret) { 4530 pci_err(pdev, "FLR not ready!\n"); 4531 return; 4532 } 4533 4534 /* PF obtains the information of VF by querying the register. */ 4535 if (qm->fun_type == QM_HW_PF) 4536 qm_cmd_uninit(qm); 4537 4538 ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_FLR); 4539 if (ret) 4540 pci_err(pdev, "failed to stop vfs by pf in FLR.\n"); 4541 4542 ret = hisi_qm_stop(qm, QM_FLR); 4543 if (ret) { 4544 pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret); 4545 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 4546 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 4547 return; 4548 } 4549 4550 ret = qm_wait_vf_prepare_finish(qm); 4551 if (ret) 4552 pci_err(pdev, "failed to stop by vfs in FLR!\n"); 4553 4554 pci_info(pdev, "FLR resetting...\n"); 4555 } 4556 EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare); 4557 4558 static bool qm_flr_reset_complete(struct pci_dev *pdev) 4559 { 4560 struct pci_dev *pf_pdev = pci_physfn(pdev); 4561 struct hisi_qm *qm = pci_get_drvdata(pf_pdev); 4562 u32 id; 4563 4564 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id); 4565 if (id == QM_PCI_COMMAND_INVALID) { 4566 pci_err(pdev, "Device can not be used!\n"); 4567 return false; 4568 } 4569 4570 return true; 4571 } 4572 4573 void hisi_qm_reset_done(struct pci_dev *pdev) 4574 { 4575 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 4576 struct hisi_qm *qm = pci_get_drvdata(pdev); 4577 int ret; 4578 4579 if (qm->fun_type == QM_HW_PF) { 4580 ret = qm_dev_hw_init(qm); 4581 if (ret) { 4582 pci_err(pdev, "Failed to init PF, ret = %d.\n", ret); 4583 goto flr_done; 4584 } 4585 } 4586 4587 hisi_qm_dev_err_init(pf_qm); 4588 4589 ret = qm_restart(qm); 4590 if (ret) { 4591 pci_err(pdev, "Failed to start QM, ret = %d.\n", ret); 4592 goto flr_done; 4593 } 4594 4595 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE); 4596 if (ret) 4597 pci_err(pdev, "failed to start vfs by pf in FLR.\n"); 4598 4599 ret = qm_wait_vf_prepare_finish(qm); 4600 if (ret) 4601 pci_err(pdev, "failed to start by vfs in FLR!\n"); 4602 4603 flr_done: 4604 if (qm->fun_type == QM_HW_PF) 4605 qm_cmd_init(qm); 4606 4607 if (qm_flr_reset_complete(pdev)) 4608 pci_info(pdev, "FLR reset complete\n"); 4609 4610 qm_reset_bit_clear(qm); 4611 } 4612 EXPORT_SYMBOL_GPL(hisi_qm_reset_done); 4613 4614 static irqreturn_t qm_abnormal_irq(int irq, void *data) 4615 { 4616 struct hisi_qm *qm = data; 4617 enum acc_err_result ret; 4618 4619 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt); 4620 ret = qm_process_dev_error(qm); 4621 if (ret == ACC_ERR_NEED_RESET && 4622 !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) && 4623 !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl)) 4624 schedule_work(&qm->rst_work); 4625 4626 return IRQ_HANDLED; 4627 } 4628 4629 /** 4630 * hisi_qm_dev_shutdown() - Shutdown device. 4631 * @pdev: The device will be shutdown. 4632 * 4633 * This function will stop qm when OS shutdown or rebooting. 4634 */ 4635 void hisi_qm_dev_shutdown(struct pci_dev *pdev) 4636 { 4637 struct hisi_qm *qm = pci_get_drvdata(pdev); 4638 int ret; 4639 4640 ret = hisi_qm_stop(qm, QM_NORMAL); 4641 if (ret) 4642 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n"); 4643 } 4644 EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown); 4645 4646 static void hisi_qm_controller_reset(struct work_struct *rst_work) 4647 { 4648 struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work); 4649 int ret; 4650 4651 ret = qm_pm_get_sync(qm); 4652 if (ret) { 4653 clear_bit(QM_RST_SCHED, &qm->misc_ctl); 4654 return; 4655 } 4656 4657 /* reset pcie device controller */ 4658 ret = qm_controller_reset(qm); 4659 if (ret) 4660 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret); 4661 4662 qm_pm_put_sync(qm); 4663 } 4664 4665 static void qm_pf_reset_vf_prepare(struct hisi_qm *qm, 4666 enum qm_stop_reason stop_reason) 4667 { 4668 enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE; 4669 struct pci_dev *pdev = qm->pdev; 4670 int ret; 4671 4672 ret = qm_reset_prepare_ready(qm); 4673 if (ret) { 4674 dev_err(&pdev->dev, "reset prepare not ready!\n"); 4675 atomic_set(&qm->status.flags, QM_STOP); 4676 cmd = QM_VF_PREPARE_FAIL; 4677 goto err_prepare; 4678 } 4679 4680 ret = hisi_qm_stop(qm, stop_reason); 4681 if (ret) { 4682 dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret); 4683 atomic_set(&qm->status.flags, QM_STOP); 4684 cmd = QM_VF_PREPARE_FAIL; 4685 goto err_prepare; 4686 } else { 4687 goto out; 4688 } 4689 4690 err_prepare: 4691 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 4692 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 4693 out: 4694 pci_save_state(pdev); 4695 ret = qm_ping_pf(qm, cmd); 4696 if (ret) 4697 dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n"); 4698 } 4699 4700 static void qm_pf_reset_vf_done(struct hisi_qm *qm) 4701 { 4702 enum qm_mb_cmd cmd = QM_VF_START_DONE; 4703 struct pci_dev *pdev = qm->pdev; 4704 int ret; 4705 4706 pci_restore_state(pdev); 4707 ret = hisi_qm_start(qm); 4708 if (ret) { 4709 dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret); 4710 cmd = QM_VF_START_FAIL; 4711 } 4712 4713 qm_cmd_init(qm); 4714 ret = qm_ping_pf(qm, cmd); 4715 if (ret) 4716 dev_warn(&pdev->dev, "PF responds timeout in reset done!\n"); 4717 4718 qm_reset_bit_clear(qm); 4719 } 4720 4721 static int qm_wait_pf_reset_finish(struct hisi_qm *qm) 4722 { 4723 struct device *dev = &qm->pdev->dev; 4724 u32 val, cmd; 4725 u64 msg; 4726 int ret; 4727 4728 /* Wait for reset to finish */ 4729 ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val, 4730 val == BIT(0), QM_VF_RESET_WAIT_US, 4731 QM_VF_RESET_WAIT_TIMEOUT_US); 4732 /* hardware completion status should be available by this time */ 4733 if (ret) { 4734 dev_err(dev, "couldn't get reset done status from PF, timeout!\n"); 4735 return -ETIMEDOUT; 4736 } 4737 4738 /* 4739 * Whether message is got successfully, 4740 * VF needs to ack PF by clearing the interrupt. 4741 */ 4742 ret = qm_get_mb_cmd(qm, &msg, 0); 4743 qm_clear_cmd_interrupt(qm, 0); 4744 if (ret) { 4745 dev_err(dev, "failed to get msg from PF in reset done!\n"); 4746 return ret; 4747 } 4748 4749 cmd = msg & QM_MB_CMD_DATA_MASK; 4750 if (cmd != QM_PF_RESET_DONE) { 4751 dev_err(dev, "the cmd(%u) is not reset done!\n", cmd); 4752 ret = -EINVAL; 4753 } 4754 4755 return ret; 4756 } 4757 4758 static void qm_pf_reset_vf_process(struct hisi_qm *qm, 4759 enum qm_stop_reason stop_reason) 4760 { 4761 struct device *dev = &qm->pdev->dev; 4762 int ret; 4763 4764 dev_info(dev, "device reset start...\n"); 4765 4766 /* The message is obtained by querying the register during resetting */ 4767 qm_cmd_uninit(qm); 4768 qm_pf_reset_vf_prepare(qm, stop_reason); 4769 4770 ret = qm_wait_pf_reset_finish(qm); 4771 if (ret) 4772 goto err_get_status; 4773 4774 qm_pf_reset_vf_done(qm); 4775 4776 dev_info(dev, "device reset done.\n"); 4777 4778 return; 4779 4780 err_get_status: 4781 qm_cmd_init(qm); 4782 qm_reset_bit_clear(qm); 4783 } 4784 4785 static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) 4786 { 4787 struct device *dev = &qm->pdev->dev; 4788 u64 msg; 4789 u32 cmd; 4790 int ret; 4791 4792 /* 4793 * Get the msg from source by sending mailbox. Whether message is got 4794 * successfully, destination needs to ack source by clearing the interrupt. 4795 */ 4796 ret = qm_get_mb_cmd(qm, &msg, fun_num); 4797 qm_clear_cmd_interrupt(qm, BIT(fun_num)); 4798 if (ret) { 4799 dev_err(dev, "failed to get msg from source!\n"); 4800 return; 4801 } 4802 4803 cmd = msg & QM_MB_CMD_DATA_MASK; 4804 switch (cmd) { 4805 case QM_PF_FLR_PREPARE: 4806 qm_pf_reset_vf_process(qm, QM_FLR); 4807 break; 4808 case QM_PF_SRST_PREPARE: 4809 qm_pf_reset_vf_process(qm, QM_SOFT_RESET); 4810 break; 4811 case QM_VF_GET_QOS: 4812 qm_vf_get_qos(qm, fun_num); 4813 break; 4814 case QM_PF_SET_QOS: 4815 qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT; 4816 break; 4817 default: 4818 dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num); 4819 break; 4820 } 4821 } 4822 4823 static void qm_cmd_process(struct work_struct *cmd_process) 4824 { 4825 struct hisi_qm *qm = container_of(cmd_process, 4826 struct hisi_qm, cmd_process); 4827 u32 vfs_num = qm->vfs_num; 4828 u64 val; 4829 u32 i; 4830 4831 if (qm->fun_type == QM_HW_PF) { 4832 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); 4833 if (!val) 4834 return; 4835 4836 for (i = 1; i <= vfs_num; i++) { 4837 if (val & BIT(i)) 4838 qm_handle_cmd_msg(qm, i); 4839 } 4840 4841 return; 4842 } 4843 4844 qm_handle_cmd_msg(qm, 0); 4845 } 4846 4847 /** 4848 * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list. 4849 * @qm: The qm needs add. 4850 * @qm_list: The qm list. 4851 * 4852 * This function adds qm to qm list, and will register algorithm to 4853 * crypto when the qm list is empty. 4854 */ 4855 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list) 4856 { 4857 struct device *dev = &qm->pdev->dev; 4858 int flag = 0; 4859 int ret = 0; 4860 4861 mutex_lock(&qm_list->lock); 4862 if (list_empty(&qm_list->list)) 4863 flag = 1; 4864 list_add_tail(&qm->list, &qm_list->list); 4865 mutex_unlock(&qm_list->lock); 4866 4867 if (qm->ver <= QM_HW_V2 && qm->use_sva) { 4868 dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n"); 4869 return 0; 4870 } 4871 4872 if (flag) { 4873 ret = qm_list->register_to_crypto(qm); 4874 if (ret) { 4875 mutex_lock(&qm_list->lock); 4876 list_del(&qm->list); 4877 mutex_unlock(&qm_list->lock); 4878 } 4879 } 4880 4881 return ret; 4882 } 4883 EXPORT_SYMBOL_GPL(hisi_qm_alg_register); 4884 4885 /** 4886 * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from 4887 * qm list. 4888 * @qm: The qm needs delete. 4889 * @qm_list: The qm list. 4890 * 4891 * This function deletes qm from qm list, and will unregister algorithm 4892 * from crypto when the qm list is empty. 4893 */ 4894 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list) 4895 { 4896 mutex_lock(&qm_list->lock); 4897 list_del(&qm->list); 4898 mutex_unlock(&qm_list->lock); 4899 4900 if (qm->ver <= QM_HW_V2 && qm->use_sva) 4901 return; 4902 4903 if (list_empty(&qm_list->list)) 4904 qm_list->unregister_from_crypto(qm); 4905 } 4906 EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister); 4907 4908 static void qm_unregister_abnormal_irq(struct hisi_qm *qm) 4909 { 4910 struct pci_dev *pdev = qm->pdev; 4911 u32 irq_vector, val; 4912 4913 if (qm->fun_type == QM_HW_VF) 4914 return; 4915 4916 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver); 4917 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) 4918 return; 4919 4920 irq_vector = val & QM_IRQ_VECTOR_MASK; 4921 free_irq(pci_irq_vector(pdev, irq_vector), qm); 4922 } 4923 4924 static int qm_register_abnormal_irq(struct hisi_qm *qm) 4925 { 4926 struct pci_dev *pdev = qm->pdev; 4927 u32 irq_vector, val; 4928 int ret; 4929 4930 if (qm->fun_type == QM_HW_VF) 4931 return 0; 4932 4933 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver); 4934 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) 4935 return 0; 4936 4937 irq_vector = val & QM_IRQ_VECTOR_MASK; 4938 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm); 4939 if (ret) 4940 dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret); 4941 4942 return ret; 4943 } 4944 4945 static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm) 4946 { 4947 struct pci_dev *pdev = qm->pdev; 4948 u32 irq_vector, val; 4949 4950 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver); 4951 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4952 return; 4953 4954 irq_vector = val & QM_IRQ_VECTOR_MASK; 4955 free_irq(pci_irq_vector(pdev, irq_vector), qm); 4956 } 4957 4958 static int qm_register_mb_cmd_irq(struct hisi_qm *qm) 4959 { 4960 struct pci_dev *pdev = qm->pdev; 4961 u32 irq_vector, val; 4962 int ret; 4963 4964 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver); 4965 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4966 return 0; 4967 4968 irq_vector = val & QM_IRQ_VECTOR_MASK; 4969 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm); 4970 if (ret) 4971 dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret); 4972 4973 return ret; 4974 } 4975 4976 static void qm_unregister_aeq_irq(struct hisi_qm *qm) 4977 { 4978 struct pci_dev *pdev = qm->pdev; 4979 u32 irq_vector, val; 4980 4981 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver); 4982 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4983 return; 4984 4985 irq_vector = val & QM_IRQ_VECTOR_MASK; 4986 free_irq(pci_irq_vector(pdev, irq_vector), qm); 4987 } 4988 4989 static int qm_register_aeq_irq(struct hisi_qm *qm) 4990 { 4991 struct pci_dev *pdev = qm->pdev; 4992 u32 irq_vector, val; 4993 int ret; 4994 4995 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver); 4996 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4997 return 0; 4998 4999 irq_vector = val & QM_IRQ_VECTOR_MASK; 5000 ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), qm_aeq_irq, 5001 qm_aeq_thread, 0, qm->dev_name, qm); 5002 if (ret) 5003 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); 5004 5005 return ret; 5006 } 5007 5008 static void qm_unregister_eq_irq(struct hisi_qm *qm) 5009 { 5010 struct pci_dev *pdev = qm->pdev; 5011 u32 irq_vector, val; 5012 5013 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver); 5014 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 5015 return; 5016 5017 irq_vector = val & QM_IRQ_VECTOR_MASK; 5018 free_irq(pci_irq_vector(pdev, irq_vector), qm); 5019 } 5020 5021 static int qm_register_eq_irq(struct hisi_qm *qm) 5022 { 5023 struct pci_dev *pdev = qm->pdev; 5024 u32 irq_vector, val; 5025 int ret; 5026 5027 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver); 5028 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 5029 return 0; 5030 5031 irq_vector = val & QM_IRQ_VECTOR_MASK; 5032 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm); 5033 if (ret) 5034 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); 5035 5036 return ret; 5037 } 5038 5039 static void qm_irqs_unregister(struct hisi_qm *qm) 5040 { 5041 qm_unregister_mb_cmd_irq(qm); 5042 qm_unregister_abnormal_irq(qm); 5043 qm_unregister_aeq_irq(qm); 5044 qm_unregister_eq_irq(qm); 5045 } 5046 5047 static int qm_irqs_register(struct hisi_qm *qm) 5048 { 5049 int ret; 5050 5051 ret = qm_register_eq_irq(qm); 5052 if (ret) 5053 return ret; 5054 5055 ret = qm_register_aeq_irq(qm); 5056 if (ret) 5057 goto free_eq_irq; 5058 5059 ret = qm_register_abnormal_irq(qm); 5060 if (ret) 5061 goto free_aeq_irq; 5062 5063 ret = qm_register_mb_cmd_irq(qm); 5064 if (ret) 5065 goto free_abnormal_irq; 5066 5067 return 0; 5068 5069 free_abnormal_irq: 5070 qm_unregister_abnormal_irq(qm); 5071 free_aeq_irq: 5072 qm_unregister_aeq_irq(qm); 5073 free_eq_irq: 5074 qm_unregister_eq_irq(qm); 5075 return ret; 5076 } 5077 5078 static int qm_get_qp_num(struct hisi_qm *qm) 5079 { 5080 bool is_db_isolation; 5081 5082 /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */ 5083 if (qm->fun_type == QM_HW_VF) { 5084 if (qm->ver != QM_HW_V1) 5085 /* v2 starts to support get vft by mailbox */ 5086 return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); 5087 5088 return 0; 5089 } 5090 5091 is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); 5092 qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true); 5093 qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, 5094 QM_FUNC_MAX_QP_CAP, is_db_isolation); 5095 5096 /* check if qp number is valid */ 5097 if (qm->qp_num > qm->max_qp_num) { 5098 dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n", 5099 qm->qp_num, qm->max_qp_num); 5100 return -EINVAL; 5101 } 5102 5103 return 0; 5104 } 5105 5106 static void qm_get_hw_caps(struct hisi_qm *qm) 5107 { 5108 const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ? 5109 qm_cap_info_pf : qm_cap_info_vf; 5110 u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) : 5111 ARRAY_SIZE(qm_cap_info_vf); 5112 u32 val, i; 5113 5114 /* Doorbell isolate register is a independent register. */ 5115 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true); 5116 if (val) 5117 set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); 5118 5119 if (qm->ver >= QM_HW_V3) { 5120 val = readl(qm->io_base + QM_FUNC_CAPS_REG); 5121 qm->cap_ver = val & QM_CAPBILITY_VERSION; 5122 } 5123 5124 /* Get PF/VF common capbility */ 5125 for (i = 1; i < ARRAY_SIZE(qm_cap_info_comm); i++) { 5126 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver); 5127 if (val) 5128 set_bit(qm_cap_info_comm[i].type, &qm->caps); 5129 } 5130 5131 /* Get PF/VF different capbility */ 5132 for (i = 0; i < size; i++) { 5133 val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver); 5134 if (val) 5135 set_bit(cap_info[i].type, &qm->caps); 5136 } 5137 } 5138 5139 static int qm_get_pci_res(struct hisi_qm *qm) 5140 { 5141 struct pci_dev *pdev = qm->pdev; 5142 struct device *dev = &pdev->dev; 5143 int ret; 5144 5145 ret = pci_request_mem_regions(pdev, qm->dev_name); 5146 if (ret < 0) { 5147 dev_err(dev, "Failed to request mem regions!\n"); 5148 return ret; 5149 } 5150 5151 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); 5152 qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2)); 5153 if (!qm->io_base) { 5154 ret = -EIO; 5155 goto err_request_mem_regions; 5156 } 5157 5158 qm_get_hw_caps(qm); 5159 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { 5160 qm->db_interval = QM_QP_DB_INTERVAL; 5161 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); 5162 qm->db_io_base = ioremap(qm->db_phys_base, 5163 pci_resource_len(pdev, PCI_BAR_4)); 5164 if (!qm->db_io_base) { 5165 ret = -EIO; 5166 goto err_ioremap; 5167 } 5168 } else { 5169 qm->db_phys_base = qm->phys_base; 5170 qm->db_io_base = qm->io_base; 5171 qm->db_interval = 0; 5172 } 5173 5174 ret = qm_get_qp_num(qm); 5175 if (ret) 5176 goto err_db_ioremap; 5177 5178 return 0; 5179 5180 err_db_ioremap: 5181 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 5182 iounmap(qm->db_io_base); 5183 err_ioremap: 5184 iounmap(qm->io_base); 5185 err_request_mem_regions: 5186 pci_release_mem_regions(pdev); 5187 return ret; 5188 } 5189 5190 static int hisi_qm_pci_init(struct hisi_qm *qm) 5191 { 5192 struct pci_dev *pdev = qm->pdev; 5193 struct device *dev = &pdev->dev; 5194 unsigned int num_vec; 5195 int ret; 5196 5197 ret = pci_enable_device_mem(pdev); 5198 if (ret < 0) { 5199 dev_err(dev, "Failed to enable device mem!\n"); 5200 return ret; 5201 } 5202 5203 ret = qm_get_pci_res(qm); 5204 if (ret) 5205 goto err_disable_pcidev; 5206 5207 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 5208 if (ret < 0) 5209 goto err_get_pci_res; 5210 pci_set_master(pdev); 5211 5212 num_vec = qm_get_irq_num(qm); 5213 ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); 5214 if (ret < 0) { 5215 dev_err(dev, "Failed to enable MSI vectors!\n"); 5216 goto err_get_pci_res; 5217 } 5218 5219 return 0; 5220 5221 err_get_pci_res: 5222 qm_put_pci_res(qm); 5223 err_disable_pcidev: 5224 pci_disable_device(pdev); 5225 return ret; 5226 } 5227 5228 static int hisi_qm_init_work(struct hisi_qm *qm) 5229 { 5230 int i; 5231 5232 for (i = 0; i < qm->qp_num; i++) 5233 INIT_WORK(&qm->poll_data[i].work, qm_work_process); 5234 5235 if (qm->fun_type == QM_HW_PF) 5236 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); 5237 5238 if (qm->ver > QM_HW_V2) 5239 INIT_WORK(&qm->cmd_process, qm_cmd_process); 5240 5241 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | 5242 WQ_UNBOUND, num_online_cpus(), 5243 pci_name(qm->pdev)); 5244 if (!qm->wq) { 5245 pci_err(qm->pdev, "failed to alloc workqueue!\n"); 5246 return -ENOMEM; 5247 } 5248 5249 return 0; 5250 } 5251 5252 static int hisi_qp_alloc_memory(struct hisi_qm *qm) 5253 { 5254 struct device *dev = &qm->pdev->dev; 5255 u16 sq_depth, cq_depth; 5256 size_t qp_dma_size; 5257 int i, ret; 5258 5259 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL); 5260 if (!qm->qp_array) 5261 return -ENOMEM; 5262 5263 qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL); 5264 if (!qm->poll_data) { 5265 kfree(qm->qp_array); 5266 return -ENOMEM; 5267 } 5268 5269 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); 5270 5271 /* one more page for device or qp statuses */ 5272 qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth; 5273 qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE; 5274 for (i = 0; i < qm->qp_num; i++) { 5275 qm->poll_data[i].qm = qm; 5276 ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth); 5277 if (ret) 5278 goto err_init_qp_mem; 5279 5280 dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size); 5281 } 5282 5283 return 0; 5284 err_init_qp_mem: 5285 hisi_qp_memory_uninit(qm, i); 5286 5287 return ret; 5288 } 5289 5290 static int hisi_qm_memory_init(struct hisi_qm *qm) 5291 { 5292 struct device *dev = &qm->pdev->dev; 5293 int ret, total_func; 5294 size_t off = 0; 5295 5296 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { 5297 total_func = pci_sriov_get_totalvfs(qm->pdev) + 1; 5298 qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL); 5299 if (!qm->factor) 5300 return -ENOMEM; 5301 5302 /* Only the PF value needs to be initialized */ 5303 qm->factor[0].func_qos = QM_QOS_MAX_VAL; 5304 } 5305 5306 #define QM_INIT_BUF(qm, type, num) do { \ 5307 (qm)->type = ((qm)->qdma.va + (off)); \ 5308 (qm)->type##_dma = (qm)->qdma.dma + (off); \ 5309 off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \ 5310 } while (0) 5311 5312 idr_init(&qm->qp_idr); 5313 qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP); 5314 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) + 5315 QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) + 5316 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + 5317 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); 5318 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, 5319 GFP_ATOMIC); 5320 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); 5321 if (!qm->qdma.va) { 5322 ret = -ENOMEM; 5323 goto err_destroy_idr; 5324 } 5325 5326 QM_INIT_BUF(qm, eqe, qm->eq_depth); 5327 QM_INIT_BUF(qm, aeqe, qm->aeq_depth); 5328 QM_INIT_BUF(qm, sqc, qm->qp_num); 5329 QM_INIT_BUF(qm, cqc, qm->qp_num); 5330 5331 ret = hisi_qp_alloc_memory(qm); 5332 if (ret) 5333 goto err_alloc_qp_array; 5334 5335 return 0; 5336 5337 err_alloc_qp_array: 5338 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); 5339 err_destroy_idr: 5340 idr_destroy(&qm->qp_idr); 5341 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 5342 kfree(qm->factor); 5343 5344 return ret; 5345 } 5346 5347 /** 5348 * hisi_qm_init() - Initialize configures about qm. 5349 * @qm: The qm needing init. 5350 * 5351 * This function init qm, then we can call hisi_qm_start to put qm into work. 5352 */ 5353 int hisi_qm_init(struct hisi_qm *qm) 5354 { 5355 struct pci_dev *pdev = qm->pdev; 5356 struct device *dev = &pdev->dev; 5357 int ret; 5358 5359 hisi_qm_pre_init(qm); 5360 5361 ret = hisi_qm_pci_init(qm); 5362 if (ret) 5363 return ret; 5364 5365 ret = qm_irqs_register(qm); 5366 if (ret) 5367 goto err_pci_init; 5368 5369 if (qm->fun_type == QM_HW_PF) { 5370 qm_disable_clock_gate(qm); 5371 ret = qm_dev_mem_reset(qm); 5372 if (ret) { 5373 dev_err(dev, "failed to reset device memory\n"); 5374 goto err_irq_register; 5375 } 5376 } 5377 5378 if (qm->mode == UACCE_MODE_SVA) { 5379 ret = qm_alloc_uacce(qm); 5380 if (ret < 0) 5381 dev_warn(dev, "fail to alloc uacce (%d)\n", ret); 5382 } 5383 5384 ret = hisi_qm_memory_init(qm); 5385 if (ret) 5386 goto err_alloc_uacce; 5387 5388 ret = hisi_qm_init_work(qm); 5389 if (ret) 5390 goto err_free_qm_memory; 5391 5392 qm_cmd_init(qm); 5393 atomic_set(&qm->status.flags, QM_INIT); 5394 5395 return 0; 5396 5397 err_free_qm_memory: 5398 hisi_qm_memory_uninit(qm); 5399 err_alloc_uacce: 5400 qm_remove_uacce(qm); 5401 err_irq_register: 5402 qm_irqs_unregister(qm); 5403 err_pci_init: 5404 hisi_qm_pci_uninit(qm); 5405 return ret; 5406 } 5407 EXPORT_SYMBOL_GPL(hisi_qm_init); 5408 5409 /** 5410 * hisi_qm_get_dfx_access() - Try to get dfx access. 5411 * @qm: pointer to accelerator device. 5412 * 5413 * Try to get dfx access, then user can get message. 5414 * 5415 * If device is in suspended, return failure, otherwise 5416 * bump up the runtime PM usage counter. 5417 */ 5418 int hisi_qm_get_dfx_access(struct hisi_qm *qm) 5419 { 5420 struct device *dev = &qm->pdev->dev; 5421 5422 if (pm_runtime_suspended(dev)) { 5423 dev_info(dev, "can not read/write - device in suspended.\n"); 5424 return -EAGAIN; 5425 } 5426 5427 return qm_pm_get_sync(qm); 5428 } 5429 EXPORT_SYMBOL_GPL(hisi_qm_get_dfx_access); 5430 5431 /** 5432 * hisi_qm_put_dfx_access() - Put dfx access. 5433 * @qm: pointer to accelerator device. 5434 * 5435 * Put dfx access, drop runtime PM usage counter. 5436 */ 5437 void hisi_qm_put_dfx_access(struct hisi_qm *qm) 5438 { 5439 qm_pm_put_sync(qm); 5440 } 5441 EXPORT_SYMBOL_GPL(hisi_qm_put_dfx_access); 5442 5443 /** 5444 * hisi_qm_pm_init() - Initialize qm runtime PM. 5445 * @qm: pointer to accelerator device. 5446 * 5447 * Function that initialize qm runtime PM. 5448 */ 5449 void hisi_qm_pm_init(struct hisi_qm *qm) 5450 { 5451 struct device *dev = &qm->pdev->dev; 5452 5453 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 5454 return; 5455 5456 pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY); 5457 pm_runtime_use_autosuspend(dev); 5458 pm_runtime_put_noidle(dev); 5459 } 5460 EXPORT_SYMBOL_GPL(hisi_qm_pm_init); 5461 5462 /** 5463 * hisi_qm_pm_uninit() - Uninitialize qm runtime PM. 5464 * @qm: pointer to accelerator device. 5465 * 5466 * Function that uninitialize qm runtime PM. 5467 */ 5468 void hisi_qm_pm_uninit(struct hisi_qm *qm) 5469 { 5470 struct device *dev = &qm->pdev->dev; 5471 5472 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 5473 return; 5474 5475 pm_runtime_get_noresume(dev); 5476 pm_runtime_dont_use_autosuspend(dev); 5477 } 5478 EXPORT_SYMBOL_GPL(hisi_qm_pm_uninit); 5479 5480 static int qm_prepare_for_suspend(struct hisi_qm *qm) 5481 { 5482 struct pci_dev *pdev = qm->pdev; 5483 int ret; 5484 u32 val; 5485 5486 ret = qm->ops->set_msi(qm, false); 5487 if (ret) { 5488 pci_err(pdev, "failed to disable MSI before suspending!\n"); 5489 return ret; 5490 } 5491 5492 /* shutdown OOO register */ 5493 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, 5494 qm->io_base + ACC_MASTER_GLOBAL_CTRL); 5495 5496 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, 5497 val, 5498 (val == ACC_MASTER_TRANS_RETURN_RW), 5499 POLL_PERIOD, POLL_TIMEOUT); 5500 if (ret) { 5501 pci_emerg(pdev, "Bus lock! Please reset system.\n"); 5502 return ret; 5503 } 5504 5505 ret = qm_set_pf_mse(qm, false); 5506 if (ret) 5507 pci_err(pdev, "failed to disable MSE before suspending!\n"); 5508 5509 return ret; 5510 } 5511 5512 static int qm_rebuild_for_resume(struct hisi_qm *qm) 5513 { 5514 struct pci_dev *pdev = qm->pdev; 5515 int ret; 5516 5517 ret = qm_set_pf_mse(qm, true); 5518 if (ret) { 5519 pci_err(pdev, "failed to enable MSE after resuming!\n"); 5520 return ret; 5521 } 5522 5523 ret = qm->ops->set_msi(qm, true); 5524 if (ret) { 5525 pci_err(pdev, "failed to enable MSI after resuming!\n"); 5526 return ret; 5527 } 5528 5529 ret = qm_dev_hw_init(qm); 5530 if (ret) { 5531 pci_err(pdev, "failed to init device after resuming\n"); 5532 return ret; 5533 } 5534 5535 qm_cmd_init(qm); 5536 hisi_qm_dev_err_init(qm); 5537 qm_disable_clock_gate(qm); 5538 ret = qm_dev_mem_reset(qm); 5539 if (ret) 5540 pci_err(pdev, "failed to reset device memory\n"); 5541 5542 return ret; 5543 } 5544 5545 /** 5546 * hisi_qm_suspend() - Runtime suspend of given device. 5547 * @dev: device to suspend. 5548 * 5549 * Function that suspend the device. 5550 */ 5551 int hisi_qm_suspend(struct device *dev) 5552 { 5553 struct pci_dev *pdev = to_pci_dev(dev); 5554 struct hisi_qm *qm = pci_get_drvdata(pdev); 5555 int ret; 5556 5557 pci_info(pdev, "entering suspended state\n"); 5558 5559 ret = hisi_qm_stop(qm, QM_NORMAL); 5560 if (ret) { 5561 pci_err(pdev, "failed to stop qm(%d)\n", ret); 5562 return ret; 5563 } 5564 5565 ret = qm_prepare_for_suspend(qm); 5566 if (ret) 5567 pci_err(pdev, "failed to prepare suspended(%d)\n", ret); 5568 5569 return ret; 5570 } 5571 EXPORT_SYMBOL_GPL(hisi_qm_suspend); 5572 5573 /** 5574 * hisi_qm_resume() - Runtime resume of given device. 5575 * @dev: device to resume. 5576 * 5577 * Function that resume the device. 5578 */ 5579 int hisi_qm_resume(struct device *dev) 5580 { 5581 struct pci_dev *pdev = to_pci_dev(dev); 5582 struct hisi_qm *qm = pci_get_drvdata(pdev); 5583 int ret; 5584 5585 pci_info(pdev, "resuming from suspend state\n"); 5586 5587 ret = qm_rebuild_for_resume(qm); 5588 if (ret) { 5589 pci_err(pdev, "failed to rebuild resume(%d)\n", ret); 5590 return ret; 5591 } 5592 5593 ret = hisi_qm_start(qm); 5594 if (ret) { 5595 if (qm_check_dev_error(qm)) { 5596 pci_info(pdev, "failed to start qm due to device error, device will be reset!\n"); 5597 return 0; 5598 } 5599 5600 pci_err(pdev, "failed to start qm(%d)!\n", ret); 5601 } 5602 5603 return ret; 5604 } 5605 EXPORT_SYMBOL_GPL(hisi_qm_resume); 5606 5607 MODULE_LICENSE("GPL v2"); 5608 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); 5609 MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver"); 5610