1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 #include <asm/page.h> 4 #include <linux/acpi.h> 5 #include <linux/aer.h> 6 #include <linux/bitmap.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/idr.h> 9 #include <linux/io.h> 10 #include <linux/irqreturn.h> 11 #include <linux/log2.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/seq_file.h> 14 #include <linux/slab.h> 15 #include <linux/uacce.h> 16 #include <linux/uaccess.h> 17 #include <uapi/misc/uacce/hisi_qm.h> 18 #include <linux/hisi_acc_qm.h> 19 #include "qm_common.h" 20 21 /* eq/aeq irq enable */ 22 #define QM_VF_AEQ_INT_SOURCE 0x0 23 #define QM_VF_AEQ_INT_MASK 0x4 24 #define QM_VF_EQ_INT_SOURCE 0x8 25 #define QM_VF_EQ_INT_MASK 0xc 26 27 #define QM_IRQ_VECTOR_MASK GENMASK(15, 0) 28 #define QM_IRQ_TYPE_MASK GENMASK(15, 0) 29 #define QM_IRQ_TYPE_SHIFT 16 30 #define QM_ABN_IRQ_TYPE_MASK GENMASK(7, 0) 31 32 /* mailbox */ 33 #define QM_MB_PING_ALL_VFS 0xffff 34 #define QM_MB_CMD_DATA_SHIFT 32 35 #define QM_MB_CMD_DATA_MASK GENMASK(31, 0) 36 #define QM_MB_STATUS_MASK GENMASK(12, 9) 37 38 /* sqc shift */ 39 #define QM_SQ_HOP_NUM_SHIFT 0 40 #define QM_SQ_PAGE_SIZE_SHIFT 4 41 #define QM_SQ_BUF_SIZE_SHIFT 8 42 #define QM_SQ_SQE_SIZE_SHIFT 12 43 #define QM_SQ_PRIORITY_SHIFT 0 44 #define QM_SQ_ORDERS_SHIFT 4 45 #define QM_SQ_TYPE_SHIFT 8 46 #define QM_QC_PASID_ENABLE 0x1 47 #define QM_QC_PASID_ENABLE_SHIFT 7 48 49 #define QM_SQ_TYPE_MASK GENMASK(3, 0) 50 #define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1) 51 52 /* cqc shift */ 53 #define QM_CQ_HOP_NUM_SHIFT 0 54 #define QM_CQ_PAGE_SIZE_SHIFT 4 55 #define QM_CQ_BUF_SIZE_SHIFT 8 56 #define QM_CQ_CQE_SIZE_SHIFT 12 57 #define QM_CQ_PHASE_SHIFT 0 58 #define QM_CQ_FLAG_SHIFT 1 59 60 #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1) 61 #define QM_QC_CQE_SIZE 4 62 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1) 63 64 /* eqc shift */ 65 #define QM_EQE_AEQE_SIZE (2UL << 12) 66 #define QM_EQC_PHASE_SHIFT 16 67 68 #define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1) 69 #define QM_EQE_CQN_MASK GENMASK(15, 0) 70 71 #define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1) 72 #define QM_AEQE_TYPE_SHIFT 17 73 #define QM_AEQE_CQN_MASK GENMASK(15, 0) 74 #define QM_CQ_OVERFLOW 0 75 #define QM_EQ_OVERFLOW 1 76 #define QM_CQE_ERROR 2 77 78 #define QM_XQ_DEPTH_SHIFT 16 79 #define QM_XQ_DEPTH_MASK GENMASK(15, 0) 80 81 #define QM_DOORBELL_CMD_SQ 0 82 #define QM_DOORBELL_CMD_CQ 1 83 #define QM_DOORBELL_CMD_EQ 2 84 #define QM_DOORBELL_CMD_AEQ 3 85 86 #define QM_DOORBELL_BASE_V1 0x340 87 #define QM_DB_CMD_SHIFT_V1 16 88 #define QM_DB_INDEX_SHIFT_V1 32 89 #define QM_DB_PRIORITY_SHIFT_V1 48 90 #define QM_PAGE_SIZE 0x0034 91 #define QM_QP_DB_INTERVAL 0x10000 92 93 #define QM_MEM_START_INIT 0x100040 94 #define QM_MEM_INIT_DONE 0x100044 95 #define QM_VFT_CFG_RDY 0x10006c 96 #define QM_VFT_CFG_OP_WR 0x100058 97 #define QM_VFT_CFG_TYPE 0x10005c 98 #define QM_SQC_VFT 0x0 99 #define QM_CQC_VFT 0x1 100 #define QM_VFT_CFG 0x100060 101 #define QM_VFT_CFG_OP_ENABLE 0x100054 102 #define QM_PM_CTRL 0x100148 103 #define QM_IDLE_DISABLE BIT(9) 104 105 #define QM_VFT_CFG_DATA_L 0x100064 106 #define QM_VFT_CFG_DATA_H 0x100068 107 #define QM_SQC_VFT_BUF_SIZE (7ULL << 8) 108 #define QM_SQC_VFT_SQC_SIZE (5ULL << 12) 109 #define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16) 110 #define QM_SQC_VFT_START_SQN_SHIFT 28 111 #define QM_SQC_VFT_VALID (1ULL << 44) 112 #define QM_SQC_VFT_SQN_SHIFT 45 113 #define QM_CQC_VFT_BUF_SIZE (7ULL << 8) 114 #define QM_CQC_VFT_SQC_SIZE (5ULL << 12) 115 #define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16) 116 #define QM_CQC_VFT_VALID (1ULL << 28) 117 118 #define QM_SQC_VFT_BASE_SHIFT_V2 28 119 #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0) 120 #define QM_SQC_VFT_NUM_SHIFT_V2 45 121 #define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0) 122 123 #define QM_ABNORMAL_INT_SOURCE 0x100000 124 #define QM_ABNORMAL_INT_MASK 0x100004 125 #define QM_ABNORMAL_INT_MASK_VALUE 0x7fff 126 #define QM_ABNORMAL_INT_STATUS 0x100008 127 #define QM_ABNORMAL_INT_SET 0x10000c 128 #define QM_ABNORMAL_INF00 0x100010 129 #define QM_FIFO_OVERFLOW_TYPE 0xc0 130 #define QM_FIFO_OVERFLOW_TYPE_SHIFT 6 131 #define QM_FIFO_OVERFLOW_VF 0x3f 132 #define QM_ABNORMAL_INF01 0x100014 133 #define QM_DB_TIMEOUT_TYPE 0xc0 134 #define QM_DB_TIMEOUT_TYPE_SHIFT 6 135 #define QM_DB_TIMEOUT_VF 0x3f 136 #define QM_RAS_CE_ENABLE 0x1000ec 137 #define QM_RAS_FE_ENABLE 0x1000f0 138 #define QM_RAS_NFE_ENABLE 0x1000f4 139 #define QM_RAS_CE_THRESHOLD 0x1000f8 140 #define QM_RAS_CE_TIMES_PER_IRQ 1 141 #define QM_OOO_SHUTDOWN_SEL 0x1040f8 142 #define QM_ECC_MBIT BIT(2) 143 #define QM_DB_TIMEOUT BIT(10) 144 #define QM_OF_FIFO_OF BIT(11) 145 146 #define QM_RESET_WAIT_TIMEOUT 400 147 #define QM_PEH_VENDOR_ID 0x1000d8 148 #define ACC_VENDOR_ID_VALUE 0x5a5a 149 #define QM_PEH_DFX_INFO0 0x1000fc 150 #define QM_PEH_DFX_INFO1 0x100100 151 #define QM_PEH_DFX_MASK (BIT(0) | BIT(2)) 152 #define QM_PEH_MSI_FINISH_MASK GENMASK(19, 16) 153 #define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3 154 #define ACC_PEH_MSI_DISABLE GENMASK(31, 0) 155 #define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1 156 #define ACC_MASTER_TRANS_RETURN_RW 3 157 #define ACC_MASTER_TRANS_RETURN 0x300150 158 #define ACC_MASTER_GLOBAL_CTRL 0x300000 159 #define ACC_AM_CFG_PORT_WR_EN 0x30001c 160 #define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT 161 #define ACC_AM_ROB_ECC_INT_STS 0x300104 162 #define ACC_ROB_ECC_ERR_MULTPL BIT(1) 163 #define QM_MSI_CAP_ENABLE BIT(16) 164 165 /* interfunction communication */ 166 #define QM_IFC_READY_STATUS 0x100128 167 #define QM_IFC_C_STS_M 0x10012C 168 #define QM_IFC_INT_SET_P 0x100130 169 #define QM_IFC_INT_CFG 0x100134 170 #define QM_IFC_INT_SOURCE_P 0x100138 171 #define QM_IFC_INT_SOURCE_V 0x0020 172 #define QM_IFC_INT_MASK 0x0024 173 #define QM_IFC_INT_STATUS 0x0028 174 #define QM_IFC_INT_SET_V 0x002C 175 #define QM_IFC_SEND_ALL_VFS GENMASK(6, 0) 176 #define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0) 177 #define QM_IFC_INT_SOURCE_MASK BIT(0) 178 #define QM_IFC_INT_DISABLE BIT(0) 179 #define QM_IFC_INT_STATUS_MASK BIT(0) 180 #define QM_IFC_INT_SET_MASK BIT(0) 181 #define QM_WAIT_DST_ACK 10 182 #define QM_MAX_PF_WAIT_COUNT 10 183 #define QM_MAX_VF_WAIT_COUNT 40 184 #define QM_VF_RESET_WAIT_US 20000 185 #define QM_VF_RESET_WAIT_CNT 3000 186 #define QM_VF_RESET_WAIT_TIMEOUT_US \ 187 (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT) 188 189 #define POLL_PERIOD 10 190 #define POLL_TIMEOUT 1000 191 #define WAIT_PERIOD_US_MAX 200 192 #define WAIT_PERIOD_US_MIN 100 193 #define MAX_WAIT_COUNTS 1000 194 #define QM_CACHE_WB_START 0x204 195 #define QM_CACHE_WB_DONE 0x208 196 #define QM_FUNC_CAPS_REG 0x3100 197 #define QM_CAPBILITY_VERSION GENMASK(7, 0) 198 199 #define PCI_BAR_2 2 200 #define PCI_BAR_4 4 201 #define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0) 202 #define QMC_ALIGN(sz) ALIGN(sz, 32) 203 204 #define QM_DBG_READ_LEN 256 205 #define QM_PCI_COMMAND_INVALID ~0 206 #define QM_RESET_STOP_TX_OFFSET 1 207 #define QM_RESET_STOP_RX_OFFSET 2 208 209 #define WAIT_PERIOD 20 210 #define REMOVE_WAIT_DELAY 10 211 212 #define QM_DRIVER_REMOVING 0 213 #define QM_RST_SCHED 1 214 #define QM_QOS_PARAM_NUM 2 215 #define QM_QOS_VAL_NUM 1 216 #define QM_QOS_BDF_PARAM_NUM 4 217 #define QM_QOS_MAX_VAL 1000 218 #define QM_QOS_RATE 100 219 #define QM_QOS_EXPAND_RATE 1000 220 #define QM_SHAPER_CIR_B_MASK GENMASK(7, 0) 221 #define QM_SHAPER_CIR_U_MASK GENMASK(10, 8) 222 #define QM_SHAPER_CIR_S_MASK GENMASK(14, 11) 223 #define QM_SHAPER_FACTOR_CIR_U_SHIFT 8 224 #define QM_SHAPER_FACTOR_CIR_S_SHIFT 11 225 #define QM_SHAPER_FACTOR_CBS_B_SHIFT 15 226 #define QM_SHAPER_FACTOR_CBS_S_SHIFT 19 227 #define QM_SHAPER_CBS_B 1 228 #define QM_SHAPER_CBS_S 16 229 #define QM_SHAPER_VFT_OFFSET 6 230 #define WAIT_FOR_QOS_VF 100 231 #define QM_QOS_MIN_ERROR_RATE 5 232 #define QM_QOS_TYPICAL_NUM 8 233 #define QM_SHAPER_MIN_CBS_S 8 234 #define QM_QOS_TICK 0x300U 235 #define QM_QOS_DIVISOR_CLK 0x1f40U 236 #define QM_QOS_MAX_CIR_B 200 237 #define QM_QOS_MIN_CIR_B 100 238 #define QM_QOS_MAX_CIR_U 6 239 #define QM_QOS_MAX_CIR_S 11 240 #define QM_AUTOSUSPEND_DELAY 3000 241 242 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ 243 (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ 244 ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \ 245 ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \ 246 ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) 247 248 #define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \ 249 ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) 250 251 #define QM_MK_SQC_W13(priority, orders, alg_type) \ 252 (((priority) << QM_SQ_PRIORITY_SHIFT) | \ 253 ((orders) << QM_SQ_ORDERS_SHIFT) | \ 254 (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT)) 255 256 #define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \ 257 (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \ 258 ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \ 259 ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \ 260 ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) 261 262 #define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \ 263 ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) 264 265 #define INIT_QC_COMMON(qc, base, pasid) do { \ 266 (qc)->head = 0; \ 267 (qc)->tail = 0; \ 268 (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \ 269 (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \ 270 (qc)->dw3 = 0; \ 271 (qc)->w8 = 0; \ 272 (qc)->rsvd0 = 0; \ 273 (qc)->pasid = cpu_to_le16(pasid); \ 274 (qc)->w11 = 0; \ 275 (qc)->rsvd1 = 0; \ 276 } while (0) 277 278 enum vft_type { 279 SQC_VFT = 0, 280 CQC_VFT, 281 SHAPER_VFT, 282 }; 283 284 enum acc_err_result { 285 ACC_ERR_NONE, 286 ACC_ERR_NEED_RESET, 287 ACC_ERR_RECOVERED, 288 }; 289 290 enum qm_alg_type { 291 ALG_TYPE_0, 292 ALG_TYPE_1, 293 }; 294 295 enum qm_mb_cmd { 296 QM_PF_FLR_PREPARE = 0x01, 297 QM_PF_SRST_PREPARE, 298 QM_PF_RESET_DONE, 299 QM_VF_PREPARE_DONE, 300 QM_VF_PREPARE_FAIL, 301 QM_VF_START_DONE, 302 QM_VF_START_FAIL, 303 QM_PF_SET_QOS, 304 QM_VF_GET_QOS, 305 }; 306 307 enum qm_basic_type { 308 QM_TOTAL_QP_NUM_CAP = 0x0, 309 QM_FUNC_MAX_QP_CAP, 310 QM_XEQ_DEPTH_CAP, 311 QM_QP_DEPTH_CAP, 312 QM_EQ_IRQ_TYPE_CAP, 313 QM_AEQ_IRQ_TYPE_CAP, 314 QM_ABN_IRQ_TYPE_CAP, 315 QM_PF2VF_IRQ_TYPE_CAP, 316 QM_PF_IRQ_NUM_CAP, 317 QM_VF_IRQ_NUM_CAP, 318 }; 319 320 static const struct hisi_qm_cap_info qm_cap_info_comm[] = { 321 {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0}, 322 {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1}, 323 {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1}, 324 {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1}, 325 {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1}, 326 }; 327 328 static const struct hisi_qm_cap_info qm_cap_info_pf[] = { 329 {QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1}, 330 }; 331 332 static const struct hisi_qm_cap_info qm_cap_info_vf[] = { 333 {QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0}, 334 }; 335 336 static const struct hisi_qm_cap_info qm_basic_info[] = { 337 {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400}, 338 {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400}, 339 {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(31, 0), 0x800, 0x4000800, 0x4000800}, 340 {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400}, 341 {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000}, 342 {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001}, 343 {QM_ABN_IRQ_TYPE_CAP, 0x3114, 0, GENMASK(31, 0), 0x0, 0x10003, 0x10003}, 344 {QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002}, 345 {QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4}, 346 {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3}, 347 }; 348 349 struct qm_mailbox { 350 __le16 w0; 351 __le16 queue_num; 352 __le32 base_l; 353 __le32 base_h; 354 __le32 rsvd; 355 }; 356 357 struct qm_doorbell { 358 __le16 queue_num; 359 __le16 cmd; 360 __le16 index; 361 __le16 priority; 362 }; 363 364 struct hisi_qm_resource { 365 struct hisi_qm *qm; 366 int distance; 367 struct list_head list; 368 }; 369 370 struct hisi_qm_hw_ops { 371 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number); 372 void (*qm_db)(struct hisi_qm *qm, u16 qn, 373 u8 cmd, u16 index, u8 priority); 374 int (*debug_init)(struct hisi_qm *qm); 375 void (*hw_error_init)(struct hisi_qm *qm); 376 void (*hw_error_uninit)(struct hisi_qm *qm); 377 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); 378 int (*set_msi)(struct hisi_qm *qm, bool set); 379 }; 380 381 struct hisi_qm_hw_error { 382 u32 int_msk; 383 const char *msg; 384 }; 385 386 static const struct hisi_qm_hw_error qm_hw_error[] = { 387 { .int_msk = BIT(0), .msg = "qm_axi_rresp" }, 388 { .int_msk = BIT(1), .msg = "qm_axi_bresp" }, 389 { .int_msk = BIT(2), .msg = "qm_ecc_mbit" }, 390 { .int_msk = BIT(3), .msg = "qm_ecc_1bit" }, 391 { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" }, 392 { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" }, 393 { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" }, 394 { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" }, 395 { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" }, 396 { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" }, 397 { .int_msk = BIT(10), .msg = "qm_db_timeout" }, 398 { .int_msk = BIT(11), .msg = "qm_of_fifo_of" }, 399 { .int_msk = BIT(12), .msg = "qm_db_random_invalid" }, 400 { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" }, 401 { .int_msk = BIT(14), .msg = "qm_flr_timeout" }, 402 { /* sentinel */ } 403 }; 404 405 static const char * const qm_db_timeout[] = { 406 "sq", "cq", "eq", "aeq", 407 }; 408 409 static const char * const qm_fifo_overflow[] = { 410 "cq", "eq", "aeq", 411 }; 412 413 static const char * const qp_s[] = { 414 "none", "init", "start", "stop", "close", 415 }; 416 417 struct qm_typical_qos_table { 418 u32 start; 419 u32 end; 420 u32 val; 421 }; 422 423 /* the qos step is 100 */ 424 static struct qm_typical_qos_table shaper_cir_s[] = { 425 {100, 100, 4}, 426 {200, 200, 3}, 427 {300, 500, 2}, 428 {600, 1000, 1}, 429 {1100, 100000, 0}, 430 }; 431 432 static struct qm_typical_qos_table shaper_cbs_s[] = { 433 {100, 200, 9}, 434 {300, 500, 11}, 435 {600, 1000, 12}, 436 {1100, 10000, 16}, 437 {10100, 25000, 17}, 438 {25100, 50000, 18}, 439 {50100, 100000, 19} 440 }; 441 442 static void qm_irqs_unregister(struct hisi_qm *qm); 443 444 static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new) 445 { 446 enum qm_state curr = atomic_read(&qm->status.flags); 447 bool avail = false; 448 449 switch (curr) { 450 case QM_INIT: 451 if (new == QM_START || new == QM_CLOSE) 452 avail = true; 453 break; 454 case QM_START: 455 if (new == QM_STOP) 456 avail = true; 457 break; 458 case QM_STOP: 459 if (new == QM_CLOSE || new == QM_START) 460 avail = true; 461 break; 462 default: 463 break; 464 } 465 466 dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n", 467 qm_s[curr], qm_s[new]); 468 469 if (!avail) 470 dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n", 471 qm_s[curr], qm_s[new]); 472 473 return avail; 474 } 475 476 static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp, 477 enum qp_state new) 478 { 479 enum qm_state qm_curr = atomic_read(&qm->status.flags); 480 enum qp_state qp_curr = 0; 481 bool avail = false; 482 483 if (qp) 484 qp_curr = atomic_read(&qp->qp_status.flags); 485 486 switch (new) { 487 case QP_INIT: 488 if (qm_curr == QM_START || qm_curr == QM_INIT) 489 avail = true; 490 break; 491 case QP_START: 492 if ((qm_curr == QM_START && qp_curr == QP_INIT) || 493 (qm_curr == QM_START && qp_curr == QP_STOP)) 494 avail = true; 495 break; 496 case QP_STOP: 497 if ((qm_curr == QM_START && qp_curr == QP_START) || 498 (qp_curr == QP_INIT)) 499 avail = true; 500 break; 501 case QP_CLOSE: 502 if ((qm_curr == QM_START && qp_curr == QP_INIT) || 503 (qm_curr == QM_START && qp_curr == QP_STOP) || 504 (qm_curr == QM_STOP && qp_curr == QP_STOP) || 505 (qm_curr == QM_STOP && qp_curr == QP_INIT)) 506 avail = true; 507 break; 508 default: 509 break; 510 } 511 512 dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n", 513 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]); 514 515 if (!avail) 516 dev_warn(&qm->pdev->dev, 517 "Can not change qp state from %s to %s in QM %s\n", 518 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]); 519 520 return avail; 521 } 522 523 static u32 qm_get_hw_error_status(struct hisi_qm *qm) 524 { 525 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS); 526 } 527 528 static u32 qm_get_dev_err_status(struct hisi_qm *qm) 529 { 530 return qm->err_ini->get_dev_hw_err_status(qm); 531 } 532 533 /* Check if the error causes the master ooo block */ 534 static bool qm_check_dev_error(struct hisi_qm *qm) 535 { 536 u32 val, dev_val; 537 538 if (qm->fun_type == QM_HW_VF) 539 return false; 540 541 val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask; 542 dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask; 543 544 return val || dev_val; 545 } 546 547 static int qm_wait_reset_finish(struct hisi_qm *qm) 548 { 549 int delay = 0; 550 551 /* All reset requests need to be queued for processing */ 552 while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 553 msleep(++delay); 554 if (delay > QM_RESET_WAIT_TIMEOUT) 555 return -EBUSY; 556 } 557 558 return 0; 559 } 560 561 static int qm_reset_prepare_ready(struct hisi_qm *qm) 562 { 563 struct pci_dev *pdev = qm->pdev; 564 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 565 566 /* 567 * PF and VF on host doesnot support resetting at the 568 * same time on Kunpeng920. 569 */ 570 if (qm->ver < QM_HW_V3) 571 return qm_wait_reset_finish(pf_qm); 572 573 return qm_wait_reset_finish(qm); 574 } 575 576 static void qm_reset_bit_clear(struct hisi_qm *qm) 577 { 578 struct pci_dev *pdev = qm->pdev; 579 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 580 581 if (qm->ver < QM_HW_V3) 582 clear_bit(QM_RESETTING, &pf_qm->misc_ctl); 583 584 clear_bit(QM_RESETTING, &qm->misc_ctl); 585 } 586 587 static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd, 588 u64 base, u16 queue, bool op) 589 { 590 mailbox->w0 = cpu_to_le16((cmd) | 591 ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) | 592 (0x1 << QM_MB_BUSY_SHIFT)); 593 mailbox->queue_num = cpu_to_le16(queue); 594 mailbox->base_l = cpu_to_le32(lower_32_bits(base)); 595 mailbox->base_h = cpu_to_le32(upper_32_bits(base)); 596 mailbox->rsvd = 0; 597 } 598 599 /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ 600 int hisi_qm_wait_mb_ready(struct hisi_qm *qm) 601 { 602 u32 val; 603 604 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, 605 val, !((val >> QM_MB_BUSY_SHIFT) & 606 0x1), POLL_PERIOD, POLL_TIMEOUT); 607 } 608 EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready); 609 610 /* 128 bit should be written to hardware at one time to trigger a mailbox */ 611 static void qm_mb_write(struct hisi_qm *qm, const void *src) 612 { 613 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; 614 unsigned long tmp0 = 0, tmp1 = 0; 615 616 if (!IS_ENABLED(CONFIG_ARM64)) { 617 memcpy_toio(fun_base, src, 16); 618 dma_wmb(); 619 return; 620 } 621 622 asm volatile("ldp %0, %1, %3\n" 623 "stp %0, %1, %2\n" 624 "dmb oshst\n" 625 : "=&r" (tmp0), 626 "=&r" (tmp1), 627 "+Q" (*((char __iomem *)fun_base)) 628 : "Q" (*((char *)src)) 629 : "memory"); 630 } 631 632 static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) 633 { 634 int ret; 635 u32 val; 636 637 if (unlikely(hisi_qm_wait_mb_ready(qm))) { 638 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); 639 ret = -EBUSY; 640 goto mb_busy; 641 } 642 643 qm_mb_write(qm, mailbox); 644 645 if (unlikely(hisi_qm_wait_mb_ready(qm))) { 646 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); 647 ret = -ETIMEDOUT; 648 goto mb_busy; 649 } 650 651 val = readl(qm->io_base + QM_MB_CMD_SEND_BASE); 652 if (val & QM_MB_STATUS_MASK) { 653 dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); 654 ret = -EIO; 655 goto mb_busy; 656 } 657 658 return 0; 659 660 mb_busy: 661 atomic64_inc(&qm->debug.dfx.mb_err_cnt); 662 return ret; 663 } 664 665 int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, 666 bool op) 667 { 668 struct qm_mailbox mailbox; 669 int ret; 670 671 dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n", 672 queue, cmd, (unsigned long long)dma_addr); 673 674 qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op); 675 676 mutex_lock(&qm->mailbox_lock); 677 ret = qm_mb_nolock(qm, &mailbox); 678 mutex_unlock(&qm->mailbox_lock); 679 680 return ret; 681 } 682 EXPORT_SYMBOL_GPL(hisi_qm_mb); 683 684 static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 685 { 686 u64 doorbell; 687 688 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) | 689 ((u64)index << QM_DB_INDEX_SHIFT_V1) | 690 ((u64)priority << QM_DB_PRIORITY_SHIFT_V1); 691 692 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1); 693 } 694 695 static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 696 { 697 void __iomem *io_base = qm->io_base; 698 u16 randata = 0; 699 u64 doorbell; 700 701 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ) 702 io_base = qm->db_io_base + (u64)qn * qm->db_interval + 703 QM_DOORBELL_SQ_CQ_BASE_V2; 704 else 705 io_base += QM_DOORBELL_EQ_AEQ_BASE_V2; 706 707 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) | 708 ((u64)randata << QM_DB_RAND_SHIFT_V2) | 709 ((u64)index << QM_DB_INDEX_SHIFT_V2) | 710 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2); 711 712 writeq(doorbell, io_base); 713 } 714 715 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 716 { 717 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n", 718 qn, cmd, index); 719 720 qm->ops->qm_db(qm, qn, cmd, index, priority); 721 } 722 723 static void qm_disable_clock_gate(struct hisi_qm *qm) 724 { 725 u32 val; 726 727 /* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */ 728 if (qm->ver < QM_HW_V3) 729 return; 730 731 val = readl(qm->io_base + QM_PM_CTRL); 732 val |= QM_IDLE_DISABLE; 733 writel(val, qm->io_base + QM_PM_CTRL); 734 } 735 736 static int qm_dev_mem_reset(struct hisi_qm *qm) 737 { 738 u32 val; 739 740 writel(0x1, qm->io_base + QM_MEM_START_INIT); 741 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, 742 val & BIT(0), POLL_PERIOD, 743 POLL_TIMEOUT); 744 } 745 746 /** 747 * hisi_qm_get_hw_info() - Get device information. 748 * @qm: The qm which want to get information. 749 * @info_table: Array for storing device information. 750 * @index: Index in info_table. 751 * @is_read: Whether read from reg, 0: not support read from reg. 752 * 753 * This function returns device information the caller needs. 754 */ 755 u32 hisi_qm_get_hw_info(struct hisi_qm *qm, 756 const struct hisi_qm_cap_info *info_table, 757 u32 index, bool is_read) 758 { 759 u32 val; 760 761 switch (qm->ver) { 762 case QM_HW_V1: 763 return info_table[index].v1_val; 764 case QM_HW_V2: 765 return info_table[index].v2_val; 766 default: 767 if (!is_read) 768 return info_table[index].v3_val; 769 770 val = readl(qm->io_base + info_table[index].offset); 771 return (val >> info_table[index].shift) & info_table[index].mask; 772 } 773 } 774 EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info); 775 776 static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits, 777 u16 *high_bits, enum qm_basic_type type) 778 { 779 u32 depth; 780 781 depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver); 782 *low_bits = depth & QM_XQ_DEPTH_MASK; 783 *high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK; 784 } 785 786 static u32 qm_get_irq_num(struct hisi_qm *qm) 787 { 788 if (qm->fun_type == QM_HW_PF) 789 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver); 790 791 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver); 792 } 793 794 static int qm_pm_get_sync(struct hisi_qm *qm) 795 { 796 struct device *dev = &qm->pdev->dev; 797 int ret; 798 799 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 800 return 0; 801 802 ret = pm_runtime_resume_and_get(dev); 803 if (ret < 0) { 804 dev_err(dev, "failed to get_sync(%d).\n", ret); 805 return ret; 806 } 807 808 return 0; 809 } 810 811 static void qm_pm_put_sync(struct hisi_qm *qm) 812 { 813 struct device *dev = &qm->pdev->dev; 814 815 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 816 return; 817 818 pm_runtime_mark_last_busy(dev); 819 pm_runtime_put_autosuspend(dev); 820 } 821 822 static void qm_cq_head_update(struct hisi_qp *qp) 823 { 824 if (qp->qp_status.cq_head == qp->cq_depth - 1) { 825 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase; 826 qp->qp_status.cq_head = 0; 827 } else { 828 qp->qp_status.cq_head++; 829 } 830 } 831 832 static void qm_poll_req_cb(struct hisi_qp *qp) 833 { 834 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; 835 struct hisi_qm *qm = qp->qm; 836 837 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { 838 dma_rmb(); 839 qp->req_cb(qp, qp->sqe + qm->sqe_size * 840 le16_to_cpu(cqe->sq_head)); 841 qm_cq_head_update(qp); 842 cqe = qp->cqe + qp->qp_status.cq_head; 843 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, 844 qp->qp_status.cq_head, 0); 845 atomic_dec(&qp->qp_status.used); 846 } 847 848 /* set c_flag */ 849 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1); 850 } 851 852 static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data) 853 { 854 struct hisi_qm *qm = poll_data->qm; 855 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; 856 u16 eq_depth = qm->eq_depth; 857 int eqe_num = 0; 858 u16 cqn; 859 860 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { 861 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; 862 poll_data->qp_finish_id[eqe_num] = cqn; 863 eqe_num++; 864 865 if (qm->status.eq_head == eq_depth - 1) { 866 qm->status.eqc_phase = !qm->status.eqc_phase; 867 eqe = qm->eqe; 868 qm->status.eq_head = 0; 869 } else { 870 eqe++; 871 qm->status.eq_head++; 872 } 873 874 if (eqe_num == (eq_depth >> 1) - 1) 875 break; 876 } 877 878 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 879 880 return eqe_num; 881 } 882 883 static void qm_work_process(struct work_struct *work) 884 { 885 struct hisi_qm_poll_data *poll_data = 886 container_of(work, struct hisi_qm_poll_data, work); 887 struct hisi_qm *qm = poll_data->qm; 888 struct hisi_qp *qp; 889 int eqe_num, i; 890 891 /* Get qp id of completed tasks and re-enable the interrupt. */ 892 eqe_num = qm_get_complete_eqe_num(poll_data); 893 for (i = eqe_num - 1; i >= 0; i--) { 894 qp = &qm->qp_array[poll_data->qp_finish_id[i]]; 895 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) 896 continue; 897 898 if (qp->event_cb) { 899 qp->event_cb(qp); 900 continue; 901 } 902 903 if (likely(qp->req_cb)) 904 qm_poll_req_cb(qp); 905 } 906 } 907 908 static bool do_qm_irq(struct hisi_qm *qm) 909 { 910 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; 911 struct hisi_qm_poll_data *poll_data; 912 u16 cqn; 913 914 if (!readl(qm->io_base + QM_VF_EQ_INT_SOURCE)) 915 return false; 916 917 if (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { 918 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; 919 poll_data = &qm->poll_data[cqn]; 920 queue_work(qm->wq, &poll_data->work); 921 922 return true; 923 } 924 925 return false; 926 } 927 928 static irqreturn_t qm_irq(int irq, void *data) 929 { 930 struct hisi_qm *qm = data; 931 bool ret; 932 933 ret = do_qm_irq(qm); 934 if (ret) 935 return IRQ_HANDLED; 936 937 atomic64_inc(&qm->debug.dfx.err_irq_cnt); 938 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 939 940 return IRQ_NONE; 941 } 942 943 static irqreturn_t qm_mb_cmd_irq(int irq, void *data) 944 { 945 struct hisi_qm *qm = data; 946 u32 val; 947 948 val = readl(qm->io_base + QM_IFC_INT_STATUS); 949 val &= QM_IFC_INT_STATUS_MASK; 950 if (!val) 951 return IRQ_NONE; 952 953 schedule_work(&qm->cmd_process); 954 955 return IRQ_HANDLED; 956 } 957 958 static void qm_set_qp_disable(struct hisi_qp *qp, int offset) 959 { 960 u32 *addr; 961 962 if (qp->is_in_kernel) 963 return; 964 965 addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset; 966 *addr = 1; 967 968 /* make sure setup is completed */ 969 smp_wmb(); 970 } 971 972 static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id) 973 { 974 struct hisi_qp *qp = &qm->qp_array[qp_id]; 975 976 qm_set_qp_disable(qp, QM_RESET_STOP_TX_OFFSET); 977 hisi_qm_stop_qp(qp); 978 qm_set_qp_disable(qp, QM_RESET_STOP_RX_OFFSET); 979 } 980 981 static void qm_reset_function(struct hisi_qm *qm) 982 { 983 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 984 struct device *dev = &qm->pdev->dev; 985 int ret; 986 987 if (qm_check_dev_error(pf_qm)) 988 return; 989 990 ret = qm_reset_prepare_ready(qm); 991 if (ret) { 992 dev_err(dev, "reset function not ready\n"); 993 return; 994 } 995 996 ret = hisi_qm_stop(qm, QM_FLR); 997 if (ret) { 998 dev_err(dev, "failed to stop qm when reset function\n"); 999 goto clear_bit; 1000 } 1001 1002 ret = hisi_qm_start(qm); 1003 if (ret) 1004 dev_err(dev, "failed to start qm when reset function\n"); 1005 1006 clear_bit: 1007 qm_reset_bit_clear(qm); 1008 } 1009 1010 static irqreturn_t qm_aeq_thread(int irq, void *data) 1011 { 1012 struct hisi_qm *qm = data; 1013 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; 1014 u16 aeq_depth = qm->aeq_depth; 1015 u32 type, qp_id; 1016 1017 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { 1018 type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT; 1019 qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK; 1020 1021 switch (type) { 1022 case QM_EQ_OVERFLOW: 1023 dev_err(&qm->pdev->dev, "eq overflow, reset function\n"); 1024 qm_reset_function(qm); 1025 return IRQ_HANDLED; 1026 case QM_CQ_OVERFLOW: 1027 dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n", 1028 qp_id); 1029 fallthrough; 1030 case QM_CQE_ERROR: 1031 qm_disable_qp(qm, qp_id); 1032 break; 1033 default: 1034 dev_err(&qm->pdev->dev, "unknown error type %u\n", 1035 type); 1036 break; 1037 } 1038 1039 if (qm->status.aeq_head == aeq_depth - 1) { 1040 qm->status.aeqc_phase = !qm->status.aeqc_phase; 1041 aeqe = qm->aeqe; 1042 qm->status.aeq_head = 0; 1043 } else { 1044 aeqe++; 1045 qm->status.aeq_head++; 1046 } 1047 } 1048 1049 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); 1050 1051 return IRQ_HANDLED; 1052 } 1053 1054 static irqreturn_t qm_aeq_irq(int irq, void *data) 1055 { 1056 struct hisi_qm *qm = data; 1057 1058 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); 1059 if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE)) 1060 return IRQ_NONE; 1061 1062 return IRQ_WAKE_THREAD; 1063 } 1064 1065 static void qm_init_qp_status(struct hisi_qp *qp) 1066 { 1067 struct hisi_qp_status *qp_status = &qp->qp_status; 1068 1069 qp_status->sq_tail = 0; 1070 qp_status->cq_head = 0; 1071 qp_status->cqc_phase = true; 1072 atomic_set(&qp_status->used, 0); 1073 } 1074 1075 static void qm_init_prefetch(struct hisi_qm *qm) 1076 { 1077 struct device *dev = &qm->pdev->dev; 1078 u32 page_type = 0x0; 1079 1080 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) 1081 return; 1082 1083 switch (PAGE_SIZE) { 1084 case SZ_4K: 1085 page_type = 0x0; 1086 break; 1087 case SZ_16K: 1088 page_type = 0x1; 1089 break; 1090 case SZ_64K: 1091 page_type = 0x2; 1092 break; 1093 default: 1094 dev_err(dev, "system page size is not support: %lu, default set to 4KB", 1095 PAGE_SIZE); 1096 } 1097 1098 writel(page_type, qm->io_base + QM_PAGE_SIZE); 1099 } 1100 1101 /* 1102 * acc_shaper_para_calc() Get the IR value by the qos formula, the return value 1103 * is the expected qos calculated. 1104 * the formula: 1105 * IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps 1106 * 1107 * IR_b * (2 ^ IR_u) * 8000 1108 * IR(Mbps) = ------------------------- 1109 * Tick * (2 ^ IR_s) 1110 */ 1111 static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s) 1112 { 1113 return ((cir_b * QM_QOS_DIVISOR_CLK) * (1 << cir_u)) / 1114 (QM_QOS_TICK * (1 << cir_s)); 1115 } 1116 1117 static u32 acc_shaper_calc_cbs_s(u32 ir) 1118 { 1119 int table_size = ARRAY_SIZE(shaper_cbs_s); 1120 int i; 1121 1122 for (i = 0; i < table_size; i++) { 1123 if (ir >= shaper_cbs_s[i].start && ir <= shaper_cbs_s[i].end) 1124 return shaper_cbs_s[i].val; 1125 } 1126 1127 return QM_SHAPER_MIN_CBS_S; 1128 } 1129 1130 static u32 acc_shaper_calc_cir_s(u32 ir) 1131 { 1132 int table_size = ARRAY_SIZE(shaper_cir_s); 1133 int i; 1134 1135 for (i = 0; i < table_size; i++) { 1136 if (ir >= shaper_cir_s[i].start && ir <= shaper_cir_s[i].end) 1137 return shaper_cir_s[i].val; 1138 } 1139 1140 return 0; 1141 } 1142 1143 static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor) 1144 { 1145 u32 cir_b, cir_u, cir_s, ir_calc; 1146 u32 error_rate; 1147 1148 factor->cbs_s = acc_shaper_calc_cbs_s(ir); 1149 cir_s = acc_shaper_calc_cir_s(ir); 1150 1151 for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) { 1152 for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) { 1153 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s); 1154 1155 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; 1156 if (error_rate <= QM_QOS_MIN_ERROR_RATE) { 1157 factor->cir_b = cir_b; 1158 factor->cir_u = cir_u; 1159 factor->cir_s = cir_s; 1160 return 0; 1161 } 1162 } 1163 } 1164 1165 return -EINVAL; 1166 } 1167 1168 static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, 1169 u32 number, struct qm_shaper_factor *factor) 1170 { 1171 u64 tmp = 0; 1172 1173 if (number > 0) { 1174 switch (type) { 1175 case SQC_VFT: 1176 if (qm->ver == QM_HW_V1) { 1177 tmp = QM_SQC_VFT_BUF_SIZE | 1178 QM_SQC_VFT_SQC_SIZE | 1179 QM_SQC_VFT_INDEX_NUMBER | 1180 QM_SQC_VFT_VALID | 1181 (u64)base << QM_SQC_VFT_START_SQN_SHIFT; 1182 } else { 1183 tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT | 1184 QM_SQC_VFT_VALID | 1185 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT; 1186 } 1187 break; 1188 case CQC_VFT: 1189 if (qm->ver == QM_HW_V1) { 1190 tmp = QM_CQC_VFT_BUF_SIZE | 1191 QM_CQC_VFT_SQC_SIZE | 1192 QM_CQC_VFT_INDEX_NUMBER | 1193 QM_CQC_VFT_VALID; 1194 } else { 1195 tmp = QM_CQC_VFT_VALID; 1196 } 1197 break; 1198 case SHAPER_VFT: 1199 if (factor) { 1200 tmp = factor->cir_b | 1201 (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) | 1202 (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) | 1203 (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) | 1204 (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT); 1205 } 1206 break; 1207 } 1208 } 1209 1210 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); 1211 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); 1212 } 1213 1214 static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, 1215 u32 fun_num, u32 base, u32 number) 1216 { 1217 struct qm_shaper_factor *factor = NULL; 1218 unsigned int val; 1219 int ret; 1220 1221 if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 1222 factor = &qm->factor[fun_num]; 1223 1224 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 1225 val & BIT(0), POLL_PERIOD, 1226 POLL_TIMEOUT); 1227 if (ret) 1228 return ret; 1229 1230 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); 1231 writel(type, qm->io_base + QM_VFT_CFG_TYPE); 1232 if (type == SHAPER_VFT) 1233 fun_num |= base << QM_SHAPER_VFT_OFFSET; 1234 1235 writel(fun_num, qm->io_base + QM_VFT_CFG); 1236 1237 qm_vft_data_cfg(qm, type, base, number, factor); 1238 1239 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); 1240 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); 1241 1242 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 1243 val & BIT(0), POLL_PERIOD, 1244 POLL_TIMEOUT); 1245 } 1246 1247 static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num) 1248 { 1249 u32 qos = qm->factor[fun_num].func_qos; 1250 int ret, i; 1251 1252 ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]); 1253 if (ret) { 1254 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n"); 1255 return ret; 1256 } 1257 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG); 1258 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) { 1259 /* The base number of queue reuse for different alg type */ 1260 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1); 1261 if (ret) 1262 return ret; 1263 } 1264 1265 return 0; 1266 } 1267 1268 /* The config should be conducted after qm_dev_mem_reset() */ 1269 static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, 1270 u32 number) 1271 { 1272 int ret, i; 1273 1274 for (i = SQC_VFT; i <= CQC_VFT; i++) { 1275 ret = qm_set_vft_common(qm, i, fun_num, base, number); 1276 if (ret) 1277 return ret; 1278 } 1279 1280 /* init default shaper qos val */ 1281 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { 1282 ret = qm_shaper_init_vft(qm, fun_num); 1283 if (ret) 1284 goto back_sqc_cqc; 1285 } 1286 1287 return 0; 1288 back_sqc_cqc: 1289 for (i = SQC_VFT; i <= CQC_VFT; i++) 1290 qm_set_vft_common(qm, i, fun_num, 0, 0); 1291 1292 return ret; 1293 } 1294 1295 static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) 1296 { 1297 u64 sqc_vft; 1298 int ret; 1299 1300 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); 1301 if (ret) 1302 return ret; 1303 1304 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | 1305 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); 1306 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); 1307 *number = (QM_SQC_VFT_NUM_MASK_v2 & 1308 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; 1309 1310 return 0; 1311 } 1312 1313 void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, 1314 dma_addr_t *dma_addr) 1315 { 1316 struct device *dev = &qm->pdev->dev; 1317 void *ctx_addr; 1318 1319 ctx_addr = kzalloc(ctx_size, GFP_KERNEL); 1320 if (!ctx_addr) 1321 return ERR_PTR(-ENOMEM); 1322 1323 *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE); 1324 if (dma_mapping_error(dev, *dma_addr)) { 1325 dev_err(dev, "DMA mapping error!\n"); 1326 kfree(ctx_addr); 1327 return ERR_PTR(-ENOMEM); 1328 } 1329 1330 return ctx_addr; 1331 } 1332 1333 void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, 1334 const void *ctx_addr, dma_addr_t *dma_addr) 1335 { 1336 struct device *dev = &qm->pdev->dev; 1337 1338 dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE); 1339 kfree(ctx_addr); 1340 } 1341 1342 static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) 1343 { 1344 return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); 1345 } 1346 1347 static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) 1348 { 1349 return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); 1350 } 1351 1352 static void qm_hw_error_init_v1(struct hisi_qm *qm) 1353 { 1354 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); 1355 } 1356 1357 static void qm_hw_error_cfg(struct hisi_qm *qm) 1358 { 1359 struct hisi_qm_err_info *err_info = &qm->err_info; 1360 1361 qm->error_mask = err_info->nfe | err_info->ce | err_info->fe; 1362 /* clear QM hw residual error source */ 1363 writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE); 1364 1365 /* configure error type */ 1366 writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE); 1367 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); 1368 writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE); 1369 writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE); 1370 } 1371 1372 static void qm_hw_error_init_v2(struct hisi_qm *qm) 1373 { 1374 u32 irq_unmask; 1375 1376 qm_hw_error_cfg(qm); 1377 1378 irq_unmask = ~qm->error_mask; 1379 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1380 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); 1381 } 1382 1383 static void qm_hw_error_uninit_v2(struct hisi_qm *qm) 1384 { 1385 u32 irq_mask = qm->error_mask; 1386 1387 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1388 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); 1389 } 1390 1391 static void qm_hw_error_init_v3(struct hisi_qm *qm) 1392 { 1393 u32 irq_unmask; 1394 1395 qm_hw_error_cfg(qm); 1396 1397 /* enable close master ooo when hardware error happened */ 1398 writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); 1399 1400 irq_unmask = ~qm->error_mask; 1401 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1402 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); 1403 } 1404 1405 static void qm_hw_error_uninit_v3(struct hisi_qm *qm) 1406 { 1407 u32 irq_mask = qm->error_mask; 1408 1409 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1410 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); 1411 1412 /* disable close master ooo when hardware error happened */ 1413 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL); 1414 } 1415 1416 static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) 1417 { 1418 const struct hisi_qm_hw_error *err; 1419 struct device *dev = &qm->pdev->dev; 1420 u32 reg_val, type, vf_num; 1421 int i; 1422 1423 for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) { 1424 err = &qm_hw_error[i]; 1425 if (!(err->int_msk & error_status)) 1426 continue; 1427 1428 dev_err(dev, "%s [error status=0x%x] found\n", 1429 err->msg, err->int_msk); 1430 1431 if (err->int_msk & QM_DB_TIMEOUT) { 1432 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01); 1433 type = (reg_val & QM_DB_TIMEOUT_TYPE) >> 1434 QM_DB_TIMEOUT_TYPE_SHIFT; 1435 vf_num = reg_val & QM_DB_TIMEOUT_VF; 1436 dev_err(dev, "qm %s doorbell timeout in function %u\n", 1437 qm_db_timeout[type], vf_num); 1438 } else if (err->int_msk & QM_OF_FIFO_OF) { 1439 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00); 1440 type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >> 1441 QM_FIFO_OVERFLOW_TYPE_SHIFT; 1442 vf_num = reg_val & QM_FIFO_OVERFLOW_VF; 1443 1444 if (type < ARRAY_SIZE(qm_fifo_overflow)) 1445 dev_err(dev, "qm %s fifo overflow in function %u\n", 1446 qm_fifo_overflow[type], vf_num); 1447 else 1448 dev_err(dev, "unknown error type\n"); 1449 } 1450 } 1451 } 1452 1453 static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) 1454 { 1455 u32 error_status, tmp; 1456 1457 /* read err sts */ 1458 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); 1459 error_status = qm->error_mask & tmp; 1460 1461 if (error_status) { 1462 if (error_status & QM_ECC_MBIT) 1463 qm->err_status.is_qm_ecc_mbit = true; 1464 1465 qm_log_hw_error(qm, error_status); 1466 if (error_status & qm->err_info.qm_reset_mask) 1467 return ACC_ERR_NEED_RESET; 1468 1469 writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); 1470 writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE); 1471 } 1472 1473 return ACC_ERR_RECOVERED; 1474 } 1475 1476 static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num) 1477 { 1478 struct qm_mailbox mailbox; 1479 int ret; 1480 1481 qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0); 1482 mutex_lock(&qm->mailbox_lock); 1483 ret = qm_mb_nolock(qm, &mailbox); 1484 if (ret) 1485 goto err_unlock; 1486 1487 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | 1488 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); 1489 1490 err_unlock: 1491 mutex_unlock(&qm->mailbox_lock); 1492 return ret; 1493 } 1494 1495 static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask) 1496 { 1497 u32 val; 1498 1499 if (qm->fun_type == QM_HW_PF) 1500 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P); 1501 1502 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V); 1503 val |= QM_IFC_INT_SOURCE_MASK; 1504 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V); 1505 } 1506 1507 static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id) 1508 { 1509 struct device *dev = &qm->pdev->dev; 1510 u32 cmd; 1511 u64 msg; 1512 int ret; 1513 1514 ret = qm_get_mb_cmd(qm, &msg, vf_id); 1515 if (ret) { 1516 dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id); 1517 return; 1518 } 1519 1520 cmd = msg & QM_MB_CMD_DATA_MASK; 1521 switch (cmd) { 1522 case QM_VF_PREPARE_FAIL: 1523 dev_err(dev, "failed to stop VF(%u)!\n", vf_id); 1524 break; 1525 case QM_VF_START_FAIL: 1526 dev_err(dev, "failed to start VF(%u)!\n", vf_id); 1527 break; 1528 case QM_VF_PREPARE_DONE: 1529 case QM_VF_START_DONE: 1530 break; 1531 default: 1532 dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id); 1533 break; 1534 } 1535 } 1536 1537 static int qm_wait_vf_prepare_finish(struct hisi_qm *qm) 1538 { 1539 struct device *dev = &qm->pdev->dev; 1540 u32 vfs_num = qm->vfs_num; 1541 int cnt = 0; 1542 int ret = 0; 1543 u64 val; 1544 u32 i; 1545 1546 if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 1547 return 0; 1548 1549 while (true) { 1550 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); 1551 /* All VFs send command to PF, break */ 1552 if ((val & GENMASK(vfs_num, 1)) == GENMASK(vfs_num, 1)) 1553 break; 1554 1555 if (++cnt > QM_MAX_PF_WAIT_COUNT) { 1556 ret = -EBUSY; 1557 break; 1558 } 1559 1560 msleep(QM_WAIT_DST_ACK); 1561 } 1562 1563 /* PF check VFs msg */ 1564 for (i = 1; i <= vfs_num; i++) { 1565 if (val & BIT(i)) 1566 qm_handle_vf_msg(qm, i); 1567 else 1568 dev_err(dev, "VF(%u) not ping PF!\n", i); 1569 } 1570 1571 /* PF clear interrupt to ack VFs */ 1572 qm_clear_cmd_interrupt(qm, val); 1573 1574 return ret; 1575 } 1576 1577 static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num) 1578 { 1579 u32 val; 1580 1581 val = readl(qm->io_base + QM_IFC_INT_CFG); 1582 val &= ~QM_IFC_SEND_ALL_VFS; 1583 val |= fun_num; 1584 writel(val, qm->io_base + QM_IFC_INT_CFG); 1585 1586 val = readl(qm->io_base + QM_IFC_INT_SET_P); 1587 val |= QM_IFC_INT_SET_MASK; 1588 writel(val, qm->io_base + QM_IFC_INT_SET_P); 1589 } 1590 1591 static void qm_trigger_pf_interrupt(struct hisi_qm *qm) 1592 { 1593 u32 val; 1594 1595 val = readl(qm->io_base + QM_IFC_INT_SET_V); 1596 val |= QM_IFC_INT_SET_MASK; 1597 writel(val, qm->io_base + QM_IFC_INT_SET_V); 1598 } 1599 1600 static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num) 1601 { 1602 struct device *dev = &qm->pdev->dev; 1603 struct qm_mailbox mailbox; 1604 int cnt = 0; 1605 u64 val; 1606 int ret; 1607 1608 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0); 1609 mutex_lock(&qm->mailbox_lock); 1610 ret = qm_mb_nolock(qm, &mailbox); 1611 if (ret) { 1612 dev_err(dev, "failed to send command to vf(%u)!\n", fun_num); 1613 goto err_unlock; 1614 } 1615 1616 qm_trigger_vf_interrupt(qm, fun_num); 1617 while (true) { 1618 msleep(QM_WAIT_DST_ACK); 1619 val = readq(qm->io_base + QM_IFC_READY_STATUS); 1620 /* if VF respond, PF notifies VF successfully. */ 1621 if (!(val & BIT(fun_num))) 1622 goto err_unlock; 1623 1624 if (++cnt > QM_MAX_PF_WAIT_COUNT) { 1625 dev_err(dev, "failed to get response from VF(%u)!\n", fun_num); 1626 ret = -ETIMEDOUT; 1627 break; 1628 } 1629 } 1630 1631 err_unlock: 1632 mutex_unlock(&qm->mailbox_lock); 1633 return ret; 1634 } 1635 1636 static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd) 1637 { 1638 struct device *dev = &qm->pdev->dev; 1639 u32 vfs_num = qm->vfs_num; 1640 struct qm_mailbox mailbox; 1641 u64 val = 0; 1642 int cnt = 0; 1643 int ret; 1644 u32 i; 1645 1646 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0); 1647 mutex_lock(&qm->mailbox_lock); 1648 /* PF sends command to all VFs by mailbox */ 1649 ret = qm_mb_nolock(qm, &mailbox); 1650 if (ret) { 1651 dev_err(dev, "failed to send command to VFs!\n"); 1652 mutex_unlock(&qm->mailbox_lock); 1653 return ret; 1654 } 1655 1656 qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS); 1657 while (true) { 1658 msleep(QM_WAIT_DST_ACK); 1659 val = readq(qm->io_base + QM_IFC_READY_STATUS); 1660 /* If all VFs acked, PF notifies VFs successfully. */ 1661 if (!(val & GENMASK(vfs_num, 1))) { 1662 mutex_unlock(&qm->mailbox_lock); 1663 return 0; 1664 } 1665 1666 if (++cnt > QM_MAX_PF_WAIT_COUNT) 1667 break; 1668 } 1669 1670 mutex_unlock(&qm->mailbox_lock); 1671 1672 /* Check which vf respond timeout. */ 1673 for (i = 1; i <= vfs_num; i++) { 1674 if (val & BIT(i)) 1675 dev_err(dev, "failed to get response from VF(%u)!\n", i); 1676 } 1677 1678 return -ETIMEDOUT; 1679 } 1680 1681 static int qm_ping_pf(struct hisi_qm *qm, u64 cmd) 1682 { 1683 struct qm_mailbox mailbox; 1684 int cnt = 0; 1685 u32 val; 1686 int ret; 1687 1688 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0); 1689 mutex_lock(&qm->mailbox_lock); 1690 ret = qm_mb_nolock(qm, &mailbox); 1691 if (ret) { 1692 dev_err(&qm->pdev->dev, "failed to send command to PF!\n"); 1693 goto unlock; 1694 } 1695 1696 qm_trigger_pf_interrupt(qm); 1697 /* Waiting for PF response */ 1698 while (true) { 1699 msleep(QM_WAIT_DST_ACK); 1700 val = readl(qm->io_base + QM_IFC_INT_SET_V); 1701 if (!(val & QM_IFC_INT_STATUS_MASK)) 1702 break; 1703 1704 if (++cnt > QM_MAX_VF_WAIT_COUNT) { 1705 ret = -ETIMEDOUT; 1706 break; 1707 } 1708 } 1709 1710 unlock: 1711 mutex_unlock(&qm->mailbox_lock); 1712 return ret; 1713 } 1714 1715 static int qm_stop_qp(struct hisi_qp *qp) 1716 { 1717 return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); 1718 } 1719 1720 static int qm_set_msi(struct hisi_qm *qm, bool set) 1721 { 1722 struct pci_dev *pdev = qm->pdev; 1723 1724 if (set) { 1725 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, 1726 0); 1727 } else { 1728 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, 1729 ACC_PEH_MSI_DISABLE); 1730 if (qm->err_status.is_qm_ecc_mbit || 1731 qm->err_status.is_dev_ecc_mbit) 1732 return 0; 1733 1734 mdelay(1); 1735 if (readl(qm->io_base + QM_PEH_DFX_INFO0)) 1736 return -EFAULT; 1737 } 1738 1739 return 0; 1740 } 1741 1742 static void qm_wait_msi_finish(struct hisi_qm *qm) 1743 { 1744 struct pci_dev *pdev = qm->pdev; 1745 u32 cmd = ~0; 1746 int cnt = 0; 1747 u32 val; 1748 int ret; 1749 1750 while (true) { 1751 pci_read_config_dword(pdev, pdev->msi_cap + 1752 PCI_MSI_PENDING_64, &cmd); 1753 if (!cmd) 1754 break; 1755 1756 if (++cnt > MAX_WAIT_COUNTS) { 1757 pci_warn(pdev, "failed to empty MSI PENDING!\n"); 1758 break; 1759 } 1760 1761 udelay(1); 1762 } 1763 1764 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0, 1765 val, !(val & QM_PEH_DFX_MASK), 1766 POLL_PERIOD, POLL_TIMEOUT); 1767 if (ret) 1768 pci_warn(pdev, "failed to empty PEH MSI!\n"); 1769 1770 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1, 1771 val, !(val & QM_PEH_MSI_FINISH_MASK), 1772 POLL_PERIOD, POLL_TIMEOUT); 1773 if (ret) 1774 pci_warn(pdev, "failed to finish MSI operation!\n"); 1775 } 1776 1777 static int qm_set_msi_v3(struct hisi_qm *qm, bool set) 1778 { 1779 struct pci_dev *pdev = qm->pdev; 1780 int ret = -ETIMEDOUT; 1781 u32 cmd, i; 1782 1783 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); 1784 if (set) 1785 cmd |= QM_MSI_CAP_ENABLE; 1786 else 1787 cmd &= ~QM_MSI_CAP_ENABLE; 1788 1789 pci_write_config_dword(pdev, pdev->msi_cap, cmd); 1790 if (set) { 1791 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 1792 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); 1793 if (cmd & QM_MSI_CAP_ENABLE) 1794 return 0; 1795 1796 udelay(1); 1797 } 1798 } else { 1799 udelay(WAIT_PERIOD_US_MIN); 1800 qm_wait_msi_finish(qm); 1801 ret = 0; 1802 } 1803 1804 return ret; 1805 } 1806 1807 static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { 1808 .qm_db = qm_db_v1, 1809 .hw_error_init = qm_hw_error_init_v1, 1810 .set_msi = qm_set_msi, 1811 }; 1812 1813 static const struct hisi_qm_hw_ops qm_hw_ops_v2 = { 1814 .get_vft = qm_get_vft_v2, 1815 .qm_db = qm_db_v2, 1816 .hw_error_init = qm_hw_error_init_v2, 1817 .hw_error_uninit = qm_hw_error_uninit_v2, 1818 .hw_error_handle = qm_hw_error_handle_v2, 1819 .set_msi = qm_set_msi, 1820 }; 1821 1822 static const struct hisi_qm_hw_ops qm_hw_ops_v3 = { 1823 .get_vft = qm_get_vft_v2, 1824 .qm_db = qm_db_v2, 1825 .hw_error_init = qm_hw_error_init_v3, 1826 .hw_error_uninit = qm_hw_error_uninit_v3, 1827 .hw_error_handle = qm_hw_error_handle_v2, 1828 .set_msi = qm_set_msi_v3, 1829 }; 1830 1831 static void *qm_get_avail_sqe(struct hisi_qp *qp) 1832 { 1833 struct hisi_qp_status *qp_status = &qp->qp_status; 1834 u16 sq_tail = qp_status->sq_tail; 1835 1836 if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1)) 1837 return NULL; 1838 1839 return qp->sqe + sq_tail * qp->qm->sqe_size; 1840 } 1841 1842 static void hisi_qm_unset_hw_reset(struct hisi_qp *qp) 1843 { 1844 u64 *addr; 1845 1846 /* Use last 64 bits of DUS to reset status. */ 1847 addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET; 1848 *addr = 0; 1849 } 1850 1851 static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) 1852 { 1853 struct device *dev = &qm->pdev->dev; 1854 struct hisi_qp *qp; 1855 int qp_id; 1856 1857 if (!qm_qp_avail_state(qm, NULL, QP_INIT)) 1858 return ERR_PTR(-EPERM); 1859 1860 if (qm->qp_in_used == qm->qp_num) { 1861 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", 1862 qm->qp_num); 1863 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); 1864 return ERR_PTR(-EBUSY); 1865 } 1866 1867 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC); 1868 if (qp_id < 0) { 1869 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", 1870 qm->qp_num); 1871 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); 1872 return ERR_PTR(-EBUSY); 1873 } 1874 1875 qp = &qm->qp_array[qp_id]; 1876 hisi_qm_unset_hw_reset(qp); 1877 memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth); 1878 1879 qp->event_cb = NULL; 1880 qp->req_cb = NULL; 1881 qp->qp_id = qp_id; 1882 qp->alg_type = alg_type; 1883 qp->is_in_kernel = true; 1884 qm->qp_in_used++; 1885 atomic_set(&qp->qp_status.flags, QP_INIT); 1886 1887 return qp; 1888 } 1889 1890 /** 1891 * hisi_qm_create_qp() - Create a queue pair from qm. 1892 * @qm: The qm we create a qp from. 1893 * @alg_type: Accelerator specific algorithm type in sqc. 1894 * 1895 * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating 1896 * qp memory fails. 1897 */ 1898 static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) 1899 { 1900 struct hisi_qp *qp; 1901 int ret; 1902 1903 ret = qm_pm_get_sync(qm); 1904 if (ret) 1905 return ERR_PTR(ret); 1906 1907 down_write(&qm->qps_lock); 1908 qp = qm_create_qp_nolock(qm, alg_type); 1909 up_write(&qm->qps_lock); 1910 1911 if (IS_ERR(qp)) 1912 qm_pm_put_sync(qm); 1913 1914 return qp; 1915 } 1916 1917 /** 1918 * hisi_qm_release_qp() - Release a qp back to its qm. 1919 * @qp: The qp we want to release. 1920 * 1921 * This function releases the resource of a qp. 1922 */ 1923 static void hisi_qm_release_qp(struct hisi_qp *qp) 1924 { 1925 struct hisi_qm *qm = qp->qm; 1926 1927 down_write(&qm->qps_lock); 1928 1929 if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) { 1930 up_write(&qm->qps_lock); 1931 return; 1932 } 1933 1934 qm->qp_in_used--; 1935 idr_remove(&qm->qp_idr, qp->qp_id); 1936 1937 up_write(&qm->qps_lock); 1938 1939 qm_pm_put_sync(qm); 1940 } 1941 1942 static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 1943 { 1944 struct hisi_qm *qm = qp->qm; 1945 struct device *dev = &qm->pdev->dev; 1946 enum qm_hw_ver ver = qm->ver; 1947 struct qm_sqc *sqc; 1948 dma_addr_t sqc_dma; 1949 int ret; 1950 1951 sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL); 1952 if (!sqc) 1953 return -ENOMEM; 1954 1955 INIT_QC_COMMON(sqc, qp->sqe_dma, pasid); 1956 if (ver == QM_HW_V1) { 1957 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); 1958 sqc->w8 = cpu_to_le16(qp->sq_depth - 1); 1959 } else { 1960 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); 1961 sqc->w8 = 0; /* rand_qc */ 1962 } 1963 sqc->cq_num = cpu_to_le16(qp_id); 1964 sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); 1965 1966 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) 1967 sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE << 1968 QM_QC_PASID_ENABLE_SHIFT); 1969 1970 sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc), 1971 DMA_TO_DEVICE); 1972 if (dma_mapping_error(dev, sqc_dma)) { 1973 kfree(sqc); 1974 return -ENOMEM; 1975 } 1976 1977 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0); 1978 dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE); 1979 kfree(sqc); 1980 1981 return ret; 1982 } 1983 1984 static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 1985 { 1986 struct hisi_qm *qm = qp->qm; 1987 struct device *dev = &qm->pdev->dev; 1988 enum qm_hw_ver ver = qm->ver; 1989 struct qm_cqc *cqc; 1990 dma_addr_t cqc_dma; 1991 int ret; 1992 1993 cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL); 1994 if (!cqc) 1995 return -ENOMEM; 1996 1997 INIT_QC_COMMON(cqc, qp->cqe_dma, pasid); 1998 if (ver == QM_HW_V1) { 1999 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, 2000 QM_QC_CQE_SIZE)); 2001 cqc->w8 = cpu_to_le16(qp->cq_depth - 1); 2002 } else { 2003 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); 2004 cqc->w8 = 0; /* rand_qc */ 2005 } 2006 cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); 2007 2008 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) 2009 cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE); 2010 2011 cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc), 2012 DMA_TO_DEVICE); 2013 if (dma_mapping_error(dev, cqc_dma)) { 2014 kfree(cqc); 2015 return -ENOMEM; 2016 } 2017 2018 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0); 2019 dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE); 2020 kfree(cqc); 2021 2022 return ret; 2023 } 2024 2025 static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 2026 { 2027 int ret; 2028 2029 qm_init_qp_status(qp); 2030 2031 ret = qm_sq_ctx_cfg(qp, qp_id, pasid); 2032 if (ret) 2033 return ret; 2034 2035 return qm_cq_ctx_cfg(qp, qp_id, pasid); 2036 } 2037 2038 static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg) 2039 { 2040 struct hisi_qm *qm = qp->qm; 2041 struct device *dev = &qm->pdev->dev; 2042 int qp_id = qp->qp_id; 2043 u32 pasid = arg; 2044 int ret; 2045 2046 if (!qm_qp_avail_state(qm, qp, QP_START)) 2047 return -EPERM; 2048 2049 ret = qm_qp_ctx_cfg(qp, qp_id, pasid); 2050 if (ret) 2051 return ret; 2052 2053 atomic_set(&qp->qp_status.flags, QP_START); 2054 dev_dbg(dev, "queue %d started\n", qp_id); 2055 2056 return 0; 2057 } 2058 2059 /** 2060 * hisi_qm_start_qp() - Start a qp into running. 2061 * @qp: The qp we want to start to run. 2062 * @arg: Accelerator specific argument. 2063 * 2064 * After this function, qp can receive request from user. Return 0 if 2065 * successful, Return -EBUSY if failed. 2066 */ 2067 int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) 2068 { 2069 struct hisi_qm *qm = qp->qm; 2070 int ret; 2071 2072 down_write(&qm->qps_lock); 2073 ret = qm_start_qp_nolock(qp, arg); 2074 up_write(&qm->qps_lock); 2075 2076 return ret; 2077 } 2078 EXPORT_SYMBOL_GPL(hisi_qm_start_qp); 2079 2080 /** 2081 * qp_stop_fail_cb() - call request cb. 2082 * @qp: stopped failed qp. 2083 * 2084 * Callback function should be called whether task completed or not. 2085 */ 2086 static void qp_stop_fail_cb(struct hisi_qp *qp) 2087 { 2088 int qp_used = atomic_read(&qp->qp_status.used); 2089 u16 cur_tail = qp->qp_status.sq_tail; 2090 u16 sq_depth = qp->sq_depth; 2091 u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth; 2092 struct hisi_qm *qm = qp->qm; 2093 u16 pos; 2094 int i; 2095 2096 for (i = 0; i < qp_used; i++) { 2097 pos = (i + cur_head) % sq_depth; 2098 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos)); 2099 atomic_dec(&qp->qp_status.used); 2100 } 2101 } 2102 2103 /** 2104 * qm_drain_qp() - Drain a qp. 2105 * @qp: The qp we want to drain. 2106 * 2107 * Determine whether the queue is cleared by judging the tail pointers of 2108 * sq and cq. 2109 */ 2110 static int qm_drain_qp(struct hisi_qp *qp) 2111 { 2112 size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc); 2113 struct hisi_qm *qm = qp->qm; 2114 struct device *dev = &qm->pdev->dev; 2115 struct qm_sqc *sqc; 2116 struct qm_cqc *cqc; 2117 dma_addr_t dma_addr; 2118 int ret = 0, i = 0; 2119 void *addr; 2120 2121 /* No need to judge if master OOO is blocked. */ 2122 if (qm_check_dev_error(qm)) 2123 return 0; 2124 2125 /* Kunpeng930 supports drain qp by device */ 2126 if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) { 2127 ret = qm_stop_qp(qp); 2128 if (ret) 2129 dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id); 2130 return ret; 2131 } 2132 2133 addr = hisi_qm_ctx_alloc(qm, size, &dma_addr); 2134 if (IS_ERR(addr)) { 2135 dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n"); 2136 return -ENOMEM; 2137 } 2138 2139 while (++i) { 2140 ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id); 2141 if (ret) { 2142 dev_err_ratelimited(dev, "Failed to dump sqc!\n"); 2143 break; 2144 } 2145 sqc = addr; 2146 2147 ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)), 2148 qp->qp_id); 2149 if (ret) { 2150 dev_err_ratelimited(dev, "Failed to dump cqc!\n"); 2151 break; 2152 } 2153 cqc = addr + sizeof(struct qm_sqc); 2154 2155 if ((sqc->tail == cqc->tail) && 2156 (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc))) 2157 break; 2158 2159 if (i == MAX_WAIT_COUNTS) { 2160 dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id); 2161 ret = -EBUSY; 2162 break; 2163 } 2164 2165 usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); 2166 } 2167 2168 hisi_qm_ctx_free(qm, size, addr, &dma_addr); 2169 2170 return ret; 2171 } 2172 2173 static int qm_stop_qp_nolock(struct hisi_qp *qp) 2174 { 2175 struct device *dev = &qp->qm->pdev->dev; 2176 int ret; 2177 2178 /* 2179 * It is allowed to stop and release qp when reset, If the qp is 2180 * stopped when reset but still want to be released then, the 2181 * is_resetting flag should be set negative so that this qp will not 2182 * be restarted after reset. 2183 */ 2184 if (atomic_read(&qp->qp_status.flags) == QP_STOP) { 2185 qp->is_resetting = false; 2186 return 0; 2187 } 2188 2189 if (!qm_qp_avail_state(qp->qm, qp, QP_STOP)) 2190 return -EPERM; 2191 2192 atomic_set(&qp->qp_status.flags, QP_STOP); 2193 2194 ret = qm_drain_qp(qp); 2195 if (ret) 2196 dev_err(dev, "Failed to drain out data for stopping!\n"); 2197 2198 2199 flush_workqueue(qp->qm->wq); 2200 if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used))) 2201 qp_stop_fail_cb(qp); 2202 2203 dev_dbg(dev, "stop queue %u!", qp->qp_id); 2204 2205 return 0; 2206 } 2207 2208 /** 2209 * hisi_qm_stop_qp() - Stop a qp in qm. 2210 * @qp: The qp we want to stop. 2211 * 2212 * This function is reverse of hisi_qm_start_qp. Return 0 if successful. 2213 */ 2214 int hisi_qm_stop_qp(struct hisi_qp *qp) 2215 { 2216 int ret; 2217 2218 down_write(&qp->qm->qps_lock); 2219 ret = qm_stop_qp_nolock(qp); 2220 up_write(&qp->qm->qps_lock); 2221 2222 return ret; 2223 } 2224 EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); 2225 2226 /** 2227 * hisi_qp_send() - Queue up a task in the hardware queue. 2228 * @qp: The qp in which to put the message. 2229 * @msg: The message. 2230 * 2231 * This function will return -EBUSY if qp is currently full, and -EAGAIN 2232 * if qp related qm is resetting. 2233 * 2234 * Note: This function may run with qm_irq_thread and ACC reset at same time. 2235 * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC 2236 * reset may happen, we have no lock here considering performance. This 2237 * causes current qm_db sending fail or can not receive sended sqe. QM 2238 * sync/async receive function should handle the error sqe. ACC reset 2239 * done function should clear used sqe to 0. 2240 */ 2241 int hisi_qp_send(struct hisi_qp *qp, const void *msg) 2242 { 2243 struct hisi_qp_status *qp_status = &qp->qp_status; 2244 u16 sq_tail = qp_status->sq_tail; 2245 u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth; 2246 void *sqe = qm_get_avail_sqe(qp); 2247 2248 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || 2249 atomic_read(&qp->qm->status.flags) == QM_STOP || 2250 qp->is_resetting)) { 2251 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); 2252 return -EAGAIN; 2253 } 2254 2255 if (!sqe) 2256 return -EBUSY; 2257 2258 memcpy(sqe, msg, qp->qm->sqe_size); 2259 2260 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); 2261 atomic_inc(&qp->qp_status.used); 2262 qp_status->sq_tail = sq_tail_next; 2263 2264 return 0; 2265 } 2266 EXPORT_SYMBOL_GPL(hisi_qp_send); 2267 2268 static void hisi_qm_cache_wb(struct hisi_qm *qm) 2269 { 2270 unsigned int val; 2271 2272 if (qm->ver == QM_HW_V1) 2273 return; 2274 2275 writel(0x1, qm->io_base + QM_CACHE_WB_START); 2276 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, 2277 val, val & BIT(0), POLL_PERIOD, 2278 POLL_TIMEOUT)) 2279 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); 2280 } 2281 2282 static void qm_qp_event_notifier(struct hisi_qp *qp) 2283 { 2284 wake_up_interruptible(&qp->uacce_q->wait); 2285 } 2286 2287 /* This function returns free number of qp in qm. */ 2288 static int hisi_qm_get_available_instances(struct uacce_device *uacce) 2289 { 2290 struct hisi_qm *qm = uacce->priv; 2291 int ret; 2292 2293 down_read(&qm->qps_lock); 2294 ret = qm->qp_num - qm->qp_in_used; 2295 up_read(&qm->qps_lock); 2296 2297 return ret; 2298 } 2299 2300 static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset) 2301 { 2302 int i; 2303 2304 for (i = 0; i < qm->qp_num; i++) 2305 qm_set_qp_disable(&qm->qp_array[i], offset); 2306 } 2307 2308 static int hisi_qm_uacce_get_queue(struct uacce_device *uacce, 2309 unsigned long arg, 2310 struct uacce_queue *q) 2311 { 2312 struct hisi_qm *qm = uacce->priv; 2313 struct hisi_qp *qp; 2314 u8 alg_type = 0; 2315 2316 qp = hisi_qm_create_qp(qm, alg_type); 2317 if (IS_ERR(qp)) 2318 return PTR_ERR(qp); 2319 2320 q->priv = qp; 2321 q->uacce = uacce; 2322 qp->uacce_q = q; 2323 qp->event_cb = qm_qp_event_notifier; 2324 qp->pasid = arg; 2325 qp->is_in_kernel = false; 2326 2327 return 0; 2328 } 2329 2330 static void hisi_qm_uacce_put_queue(struct uacce_queue *q) 2331 { 2332 struct hisi_qp *qp = q->priv; 2333 2334 hisi_qm_release_qp(qp); 2335 } 2336 2337 /* map sq/cq/doorbell to user space */ 2338 static int hisi_qm_uacce_mmap(struct uacce_queue *q, 2339 struct vm_area_struct *vma, 2340 struct uacce_qfile_region *qfr) 2341 { 2342 struct hisi_qp *qp = q->priv; 2343 struct hisi_qm *qm = qp->qm; 2344 resource_size_t phys_base = qm->db_phys_base + 2345 qp->qp_id * qm->db_interval; 2346 size_t sz = vma->vm_end - vma->vm_start; 2347 struct pci_dev *pdev = qm->pdev; 2348 struct device *dev = &pdev->dev; 2349 unsigned long vm_pgoff; 2350 int ret; 2351 2352 switch (qfr->type) { 2353 case UACCE_QFRT_MMIO: 2354 if (qm->ver == QM_HW_V1) { 2355 if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR) 2356 return -EINVAL; 2357 } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { 2358 if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR + 2359 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE)) 2360 return -EINVAL; 2361 } else { 2362 if (sz > qm->db_interval) 2363 return -EINVAL; 2364 } 2365 2366 vma->vm_flags |= VM_IO; 2367 2368 return remap_pfn_range(vma, vma->vm_start, 2369 phys_base >> PAGE_SHIFT, 2370 sz, pgprot_noncached(vma->vm_page_prot)); 2371 case UACCE_QFRT_DUS: 2372 if (sz != qp->qdma.size) 2373 return -EINVAL; 2374 2375 /* 2376 * dma_mmap_coherent() requires vm_pgoff as 0 2377 * restore vm_pfoff to initial value for mmap() 2378 */ 2379 vm_pgoff = vma->vm_pgoff; 2380 vma->vm_pgoff = 0; 2381 ret = dma_mmap_coherent(dev, vma, qp->qdma.va, 2382 qp->qdma.dma, sz); 2383 vma->vm_pgoff = vm_pgoff; 2384 return ret; 2385 2386 default: 2387 return -EINVAL; 2388 } 2389 } 2390 2391 static int hisi_qm_uacce_start_queue(struct uacce_queue *q) 2392 { 2393 struct hisi_qp *qp = q->priv; 2394 2395 return hisi_qm_start_qp(qp, qp->pasid); 2396 } 2397 2398 static void hisi_qm_uacce_stop_queue(struct uacce_queue *q) 2399 { 2400 hisi_qm_stop_qp(q->priv); 2401 } 2402 2403 static int hisi_qm_is_q_updated(struct uacce_queue *q) 2404 { 2405 struct hisi_qp *qp = q->priv; 2406 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; 2407 int updated = 0; 2408 2409 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { 2410 /* make sure to read data from memory */ 2411 dma_rmb(); 2412 qm_cq_head_update(qp); 2413 cqe = qp->cqe + qp->qp_status.cq_head; 2414 updated = 1; 2415 } 2416 2417 return updated; 2418 } 2419 2420 static void qm_set_sqctype(struct uacce_queue *q, u16 type) 2421 { 2422 struct hisi_qm *qm = q->uacce->priv; 2423 struct hisi_qp *qp = q->priv; 2424 2425 down_write(&qm->qps_lock); 2426 qp->alg_type = type; 2427 up_write(&qm->qps_lock); 2428 } 2429 2430 static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd, 2431 unsigned long arg) 2432 { 2433 struct hisi_qp *qp = q->priv; 2434 struct hisi_qp_info qp_info; 2435 struct hisi_qp_ctx qp_ctx; 2436 2437 if (cmd == UACCE_CMD_QM_SET_QP_CTX) { 2438 if (copy_from_user(&qp_ctx, (void __user *)arg, 2439 sizeof(struct hisi_qp_ctx))) 2440 return -EFAULT; 2441 2442 if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1) 2443 return -EINVAL; 2444 2445 qm_set_sqctype(q, qp_ctx.qc_type); 2446 qp_ctx.id = qp->qp_id; 2447 2448 if (copy_to_user((void __user *)arg, &qp_ctx, 2449 sizeof(struct hisi_qp_ctx))) 2450 return -EFAULT; 2451 2452 return 0; 2453 } else if (cmd == UACCE_CMD_QM_SET_QP_INFO) { 2454 if (copy_from_user(&qp_info, (void __user *)arg, 2455 sizeof(struct hisi_qp_info))) 2456 return -EFAULT; 2457 2458 qp_info.sqe_size = qp->qm->sqe_size; 2459 qp_info.sq_depth = qp->sq_depth; 2460 qp_info.cq_depth = qp->cq_depth; 2461 2462 if (copy_to_user((void __user *)arg, &qp_info, 2463 sizeof(struct hisi_qp_info))) 2464 return -EFAULT; 2465 2466 return 0; 2467 } 2468 2469 return -EINVAL; 2470 } 2471 2472 static const struct uacce_ops uacce_qm_ops = { 2473 .get_available_instances = hisi_qm_get_available_instances, 2474 .get_queue = hisi_qm_uacce_get_queue, 2475 .put_queue = hisi_qm_uacce_put_queue, 2476 .start_queue = hisi_qm_uacce_start_queue, 2477 .stop_queue = hisi_qm_uacce_stop_queue, 2478 .mmap = hisi_qm_uacce_mmap, 2479 .ioctl = hisi_qm_uacce_ioctl, 2480 .is_q_updated = hisi_qm_is_q_updated, 2481 }; 2482 2483 static int qm_alloc_uacce(struct hisi_qm *qm) 2484 { 2485 struct pci_dev *pdev = qm->pdev; 2486 struct uacce_device *uacce; 2487 unsigned long mmio_page_nr; 2488 unsigned long dus_page_nr; 2489 u16 sq_depth, cq_depth; 2490 struct uacce_interface interface = { 2491 .flags = UACCE_DEV_SVA, 2492 .ops = &uacce_qm_ops, 2493 }; 2494 int ret; 2495 2496 ret = strscpy(interface.name, dev_driver_string(&pdev->dev), 2497 sizeof(interface.name)); 2498 if (ret < 0) 2499 return -ENAMETOOLONG; 2500 2501 uacce = uacce_alloc(&pdev->dev, &interface); 2502 if (IS_ERR(uacce)) 2503 return PTR_ERR(uacce); 2504 2505 if (uacce->flags & UACCE_DEV_SVA) { 2506 qm->use_sva = true; 2507 } else { 2508 /* only consider sva case */ 2509 uacce_remove(uacce); 2510 qm->uacce = NULL; 2511 return -EINVAL; 2512 } 2513 2514 uacce->is_vf = pdev->is_virtfn; 2515 uacce->priv = qm; 2516 2517 if (qm->ver == QM_HW_V1) 2518 uacce->api_ver = HISI_QM_API_VER_BASE; 2519 else if (qm->ver == QM_HW_V2) 2520 uacce->api_ver = HISI_QM_API_VER2_BASE; 2521 else 2522 uacce->api_ver = HISI_QM_API_VER3_BASE; 2523 2524 if (qm->ver == QM_HW_V1) 2525 mmio_page_nr = QM_DOORBELL_PAGE_NR; 2526 else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 2527 mmio_page_nr = QM_DOORBELL_PAGE_NR + 2528 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE; 2529 else 2530 mmio_page_nr = qm->db_interval / PAGE_SIZE; 2531 2532 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); 2533 2534 /* Add one more page for device or qp status */ 2535 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth + 2536 sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >> 2537 PAGE_SHIFT; 2538 2539 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr; 2540 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr; 2541 2542 qm->uacce = uacce; 2543 2544 return 0; 2545 } 2546 2547 /** 2548 * qm_frozen() - Try to froze QM to cut continuous queue request. If 2549 * there is user on the QM, return failure without doing anything. 2550 * @qm: The qm needed to be fronzen. 2551 * 2552 * This function frozes QM, then we can do SRIOV disabling. 2553 */ 2554 static int qm_frozen(struct hisi_qm *qm) 2555 { 2556 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) 2557 return 0; 2558 2559 down_write(&qm->qps_lock); 2560 2561 if (!qm->qp_in_used) { 2562 qm->qp_in_used = qm->qp_num; 2563 up_write(&qm->qps_lock); 2564 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl); 2565 return 0; 2566 } 2567 2568 up_write(&qm->qps_lock); 2569 2570 return -EBUSY; 2571 } 2572 2573 static int qm_try_frozen_vfs(struct pci_dev *pdev, 2574 struct hisi_qm_list *qm_list) 2575 { 2576 struct hisi_qm *qm, *vf_qm; 2577 struct pci_dev *dev; 2578 int ret = 0; 2579 2580 if (!qm_list || !pdev) 2581 return -EINVAL; 2582 2583 /* Try to frozen all the VFs as disable SRIOV */ 2584 mutex_lock(&qm_list->lock); 2585 list_for_each_entry(qm, &qm_list->list, list) { 2586 dev = qm->pdev; 2587 if (dev == pdev) 2588 continue; 2589 if (pci_physfn(dev) == pdev) { 2590 vf_qm = pci_get_drvdata(dev); 2591 ret = qm_frozen(vf_qm); 2592 if (ret) 2593 goto frozen_fail; 2594 } 2595 } 2596 2597 frozen_fail: 2598 mutex_unlock(&qm_list->lock); 2599 2600 return ret; 2601 } 2602 2603 /** 2604 * hisi_qm_wait_task_finish() - Wait until the task is finished 2605 * when removing the driver. 2606 * @qm: The qm needed to wait for the task to finish. 2607 * @qm_list: The list of all available devices. 2608 */ 2609 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list) 2610 { 2611 while (qm_frozen(qm) || 2612 ((qm->fun_type == QM_HW_PF) && 2613 qm_try_frozen_vfs(qm->pdev, qm_list))) { 2614 msleep(WAIT_PERIOD); 2615 } 2616 2617 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) || 2618 test_bit(QM_RESETTING, &qm->misc_ctl)) 2619 msleep(WAIT_PERIOD); 2620 2621 udelay(REMOVE_WAIT_DELAY); 2622 } 2623 EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish); 2624 2625 static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) 2626 { 2627 struct device *dev = &qm->pdev->dev; 2628 struct qm_dma *qdma; 2629 int i; 2630 2631 for (i = num - 1; i >= 0; i--) { 2632 qdma = &qm->qp_array[i].qdma; 2633 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); 2634 kfree(qm->poll_data[i].qp_finish_id); 2635 } 2636 2637 kfree(qm->poll_data); 2638 kfree(qm->qp_array); 2639 } 2640 2641 static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, 2642 u16 sq_depth, u16 cq_depth) 2643 { 2644 struct device *dev = &qm->pdev->dev; 2645 size_t off = qm->sqe_size * sq_depth; 2646 struct hisi_qp *qp; 2647 int ret = -ENOMEM; 2648 2649 qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16), 2650 GFP_KERNEL); 2651 if (!qm->poll_data[id].qp_finish_id) 2652 return -ENOMEM; 2653 2654 qp = &qm->qp_array[id]; 2655 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, 2656 GFP_KERNEL); 2657 if (!qp->qdma.va) 2658 goto err_free_qp_finish_id; 2659 2660 qp->sqe = qp->qdma.va; 2661 qp->sqe_dma = qp->qdma.dma; 2662 qp->cqe = qp->qdma.va + off; 2663 qp->cqe_dma = qp->qdma.dma + off; 2664 qp->qdma.size = dma_size; 2665 qp->sq_depth = sq_depth; 2666 qp->cq_depth = cq_depth; 2667 qp->qm = qm; 2668 qp->qp_id = id; 2669 2670 return 0; 2671 2672 err_free_qp_finish_id: 2673 kfree(qm->poll_data[id].qp_finish_id); 2674 return ret; 2675 } 2676 2677 static void hisi_qm_pre_init(struct hisi_qm *qm) 2678 { 2679 struct pci_dev *pdev = qm->pdev; 2680 2681 if (qm->ver == QM_HW_V1) 2682 qm->ops = &qm_hw_ops_v1; 2683 else if (qm->ver == QM_HW_V2) 2684 qm->ops = &qm_hw_ops_v2; 2685 else 2686 qm->ops = &qm_hw_ops_v3; 2687 2688 pci_set_drvdata(pdev, qm); 2689 mutex_init(&qm->mailbox_lock); 2690 init_rwsem(&qm->qps_lock); 2691 qm->qp_in_used = 0; 2692 qm->misc_ctl = false; 2693 if (test_bit(QM_SUPPORT_RPM, &qm->caps)) { 2694 if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev))) 2695 dev_info(&pdev->dev, "_PS0 and _PR0 are not defined"); 2696 } 2697 } 2698 2699 static void qm_cmd_uninit(struct hisi_qm *qm) 2700 { 2701 u32 val; 2702 2703 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 2704 return; 2705 2706 val = readl(qm->io_base + QM_IFC_INT_MASK); 2707 val |= QM_IFC_INT_DISABLE; 2708 writel(val, qm->io_base + QM_IFC_INT_MASK); 2709 } 2710 2711 static void qm_cmd_init(struct hisi_qm *qm) 2712 { 2713 u32 val; 2714 2715 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 2716 return; 2717 2718 /* Clear communication interrupt source */ 2719 qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR); 2720 2721 /* Enable pf to vf communication reg. */ 2722 val = readl(qm->io_base + QM_IFC_INT_MASK); 2723 val &= ~QM_IFC_INT_DISABLE; 2724 writel(val, qm->io_base + QM_IFC_INT_MASK); 2725 } 2726 2727 static void qm_put_pci_res(struct hisi_qm *qm) 2728 { 2729 struct pci_dev *pdev = qm->pdev; 2730 2731 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 2732 iounmap(qm->db_io_base); 2733 2734 iounmap(qm->io_base); 2735 pci_release_mem_regions(pdev); 2736 } 2737 2738 static void hisi_qm_pci_uninit(struct hisi_qm *qm) 2739 { 2740 struct pci_dev *pdev = qm->pdev; 2741 2742 pci_free_irq_vectors(pdev); 2743 qm_put_pci_res(qm); 2744 pci_disable_device(pdev); 2745 } 2746 2747 static void hisi_qm_set_state(struct hisi_qm *qm, u8 state) 2748 { 2749 if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF) 2750 writel(state, qm->io_base + QM_VF_STATE); 2751 } 2752 2753 static void hisi_qm_unint_work(struct hisi_qm *qm) 2754 { 2755 destroy_workqueue(qm->wq); 2756 } 2757 2758 static void hisi_qm_memory_uninit(struct hisi_qm *qm) 2759 { 2760 struct device *dev = &qm->pdev->dev; 2761 2762 hisi_qp_memory_uninit(qm, qm->qp_num); 2763 if (qm->qdma.va) { 2764 hisi_qm_cache_wb(qm); 2765 dma_free_coherent(dev, qm->qdma.size, 2766 qm->qdma.va, qm->qdma.dma); 2767 } 2768 2769 idr_destroy(&qm->qp_idr); 2770 2771 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 2772 kfree(qm->factor); 2773 } 2774 2775 /** 2776 * hisi_qm_uninit() - Uninitialize qm. 2777 * @qm: The qm needed uninit. 2778 * 2779 * This function uninits qm related device resources. 2780 */ 2781 void hisi_qm_uninit(struct hisi_qm *qm) 2782 { 2783 qm_cmd_uninit(qm); 2784 hisi_qm_unint_work(qm); 2785 down_write(&qm->qps_lock); 2786 2787 if (!qm_avail_state(qm, QM_CLOSE)) { 2788 up_write(&qm->qps_lock); 2789 return; 2790 } 2791 2792 hisi_qm_memory_uninit(qm); 2793 hisi_qm_set_state(qm, QM_NOT_READY); 2794 up_write(&qm->qps_lock); 2795 2796 qm_irqs_unregister(qm); 2797 hisi_qm_pci_uninit(qm); 2798 if (qm->use_sva) { 2799 uacce_remove(qm->uacce); 2800 qm->uacce = NULL; 2801 } 2802 } 2803 EXPORT_SYMBOL_GPL(hisi_qm_uninit); 2804 2805 /** 2806 * hisi_qm_get_vft() - Get vft from a qm. 2807 * @qm: The qm we want to get its vft. 2808 * @base: The base number of queue in vft. 2809 * @number: The number of queues in vft. 2810 * 2811 * We can allocate multiple queues to a qm by configuring virtual function 2812 * table. We get related configures by this function. Normally, we call this 2813 * function in VF driver to get the queue information. 2814 * 2815 * qm hw v1 does not support this interface. 2816 */ 2817 static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) 2818 { 2819 if (!base || !number) 2820 return -EINVAL; 2821 2822 if (!qm->ops->get_vft) { 2823 dev_err(&qm->pdev->dev, "Don't support vft read!\n"); 2824 return -EINVAL; 2825 } 2826 2827 return qm->ops->get_vft(qm, base, number); 2828 } 2829 2830 /** 2831 * hisi_qm_set_vft() - Set vft to a qm. 2832 * @qm: The qm we want to set its vft. 2833 * @fun_num: The function number. 2834 * @base: The base number of queue in vft. 2835 * @number: The number of queues in vft. 2836 * 2837 * This function is alway called in PF driver, it is used to assign queues 2838 * among PF and VFs. 2839 * 2840 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1) 2841 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1) 2842 * (VF function number 0x2) 2843 */ 2844 static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, 2845 u32 number) 2846 { 2847 u32 max_q_num = qm->ctrl_qp_num; 2848 2849 if (base >= max_q_num || number > max_q_num || 2850 (base + number) > max_q_num) 2851 return -EINVAL; 2852 2853 return qm_set_sqc_cqc_vft(qm, fun_num, base, number); 2854 } 2855 2856 static void qm_init_eq_aeq_status(struct hisi_qm *qm) 2857 { 2858 struct hisi_qm_status *status = &qm->status; 2859 2860 status->eq_head = 0; 2861 status->aeq_head = 0; 2862 status->eqc_phase = true; 2863 status->aeqc_phase = true; 2864 } 2865 2866 static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm) 2867 { 2868 /* Clear eq/aeq interrupt source */ 2869 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); 2870 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 2871 2872 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); 2873 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); 2874 } 2875 2876 static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm) 2877 { 2878 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); 2879 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); 2880 } 2881 2882 static int qm_eq_ctx_cfg(struct hisi_qm *qm) 2883 { 2884 struct device *dev = &qm->pdev->dev; 2885 struct qm_eqc *eqc; 2886 dma_addr_t eqc_dma; 2887 int ret; 2888 2889 eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL); 2890 if (!eqc) 2891 return -ENOMEM; 2892 2893 eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); 2894 eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); 2895 if (qm->ver == QM_HW_V1) 2896 eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); 2897 eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); 2898 2899 eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc), 2900 DMA_TO_DEVICE); 2901 if (dma_mapping_error(dev, eqc_dma)) { 2902 kfree(eqc); 2903 return -ENOMEM; 2904 } 2905 2906 ret = hisi_qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0); 2907 dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE); 2908 kfree(eqc); 2909 2910 return ret; 2911 } 2912 2913 static int qm_aeq_ctx_cfg(struct hisi_qm *qm) 2914 { 2915 struct device *dev = &qm->pdev->dev; 2916 struct qm_aeqc *aeqc; 2917 dma_addr_t aeqc_dma; 2918 int ret; 2919 2920 aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL); 2921 if (!aeqc) 2922 return -ENOMEM; 2923 2924 aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); 2925 aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); 2926 aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); 2927 2928 aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc), 2929 DMA_TO_DEVICE); 2930 if (dma_mapping_error(dev, aeqc_dma)) { 2931 kfree(aeqc); 2932 return -ENOMEM; 2933 } 2934 2935 ret = hisi_qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0); 2936 dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE); 2937 kfree(aeqc); 2938 2939 return ret; 2940 } 2941 2942 static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm) 2943 { 2944 struct device *dev = &qm->pdev->dev; 2945 int ret; 2946 2947 qm_init_eq_aeq_status(qm); 2948 2949 ret = qm_eq_ctx_cfg(qm); 2950 if (ret) { 2951 dev_err(dev, "Set eqc failed!\n"); 2952 return ret; 2953 } 2954 2955 return qm_aeq_ctx_cfg(qm); 2956 } 2957 2958 static int __hisi_qm_start(struct hisi_qm *qm) 2959 { 2960 int ret; 2961 2962 WARN_ON(!qm->qdma.va); 2963 2964 if (qm->fun_type == QM_HW_PF) { 2965 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); 2966 if (ret) 2967 return ret; 2968 } 2969 2970 ret = qm_eq_aeq_ctx_cfg(qm); 2971 if (ret) 2972 return ret; 2973 2974 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); 2975 if (ret) 2976 return ret; 2977 2978 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); 2979 if (ret) 2980 return ret; 2981 2982 qm_init_prefetch(qm); 2983 qm_enable_eq_aeq_interrupts(qm); 2984 2985 return 0; 2986 } 2987 2988 /** 2989 * hisi_qm_start() - start qm 2990 * @qm: The qm to be started. 2991 * 2992 * This function starts a qm, then we can allocate qp from this qm. 2993 */ 2994 int hisi_qm_start(struct hisi_qm *qm) 2995 { 2996 struct device *dev = &qm->pdev->dev; 2997 int ret = 0; 2998 2999 down_write(&qm->qps_lock); 3000 3001 if (!qm_avail_state(qm, QM_START)) { 3002 up_write(&qm->qps_lock); 3003 return -EPERM; 3004 } 3005 3006 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num); 3007 3008 if (!qm->qp_num) { 3009 dev_err(dev, "qp_num should not be 0\n"); 3010 ret = -EINVAL; 3011 goto err_unlock; 3012 } 3013 3014 ret = __hisi_qm_start(qm); 3015 if (!ret) 3016 atomic_set(&qm->status.flags, QM_START); 3017 3018 hisi_qm_set_state(qm, QM_READY); 3019 err_unlock: 3020 up_write(&qm->qps_lock); 3021 return ret; 3022 } 3023 EXPORT_SYMBOL_GPL(hisi_qm_start); 3024 3025 static int qm_restart(struct hisi_qm *qm) 3026 { 3027 struct device *dev = &qm->pdev->dev; 3028 struct hisi_qp *qp; 3029 int ret, i; 3030 3031 ret = hisi_qm_start(qm); 3032 if (ret < 0) 3033 return ret; 3034 3035 down_write(&qm->qps_lock); 3036 for (i = 0; i < qm->qp_num; i++) { 3037 qp = &qm->qp_array[i]; 3038 if (atomic_read(&qp->qp_status.flags) == QP_STOP && 3039 qp->is_resetting == true) { 3040 ret = qm_start_qp_nolock(qp, 0); 3041 if (ret < 0) { 3042 dev_err(dev, "Failed to start qp%d!\n", i); 3043 3044 up_write(&qm->qps_lock); 3045 return ret; 3046 } 3047 qp->is_resetting = false; 3048 } 3049 } 3050 up_write(&qm->qps_lock); 3051 3052 return 0; 3053 } 3054 3055 /* Stop started qps in reset flow */ 3056 static int qm_stop_started_qp(struct hisi_qm *qm) 3057 { 3058 struct device *dev = &qm->pdev->dev; 3059 struct hisi_qp *qp; 3060 int i, ret; 3061 3062 for (i = 0; i < qm->qp_num; i++) { 3063 qp = &qm->qp_array[i]; 3064 if (qp && atomic_read(&qp->qp_status.flags) == QP_START) { 3065 qp->is_resetting = true; 3066 ret = qm_stop_qp_nolock(qp); 3067 if (ret < 0) { 3068 dev_err(dev, "Failed to stop qp%d!\n", i); 3069 return ret; 3070 } 3071 } 3072 } 3073 3074 return 0; 3075 } 3076 3077 3078 /** 3079 * qm_clear_queues() - Clear all queues memory in a qm. 3080 * @qm: The qm in which the queues will be cleared. 3081 * 3082 * This function clears all queues memory in a qm. Reset of accelerator can 3083 * use this to clear queues. 3084 */ 3085 static void qm_clear_queues(struct hisi_qm *qm) 3086 { 3087 struct hisi_qp *qp; 3088 int i; 3089 3090 for (i = 0; i < qm->qp_num; i++) { 3091 qp = &qm->qp_array[i]; 3092 if (qp->is_in_kernel && qp->is_resetting) 3093 memset(qp->qdma.va, 0, qp->qdma.size); 3094 } 3095 3096 memset(qm->qdma.va, 0, qm->qdma.size); 3097 } 3098 3099 /** 3100 * hisi_qm_stop() - Stop a qm. 3101 * @qm: The qm which will be stopped. 3102 * @r: The reason to stop qm. 3103 * 3104 * This function stops qm and its qps, then qm can not accept request. 3105 * Related resources are not released at this state, we can use hisi_qm_start 3106 * to let qm start again. 3107 */ 3108 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) 3109 { 3110 struct device *dev = &qm->pdev->dev; 3111 int ret = 0; 3112 3113 down_write(&qm->qps_lock); 3114 3115 qm->status.stop_reason = r; 3116 if (!qm_avail_state(qm, QM_STOP)) { 3117 ret = -EPERM; 3118 goto err_unlock; 3119 } 3120 3121 if (qm->status.stop_reason == QM_SOFT_RESET || 3122 qm->status.stop_reason == QM_FLR) { 3123 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 3124 ret = qm_stop_started_qp(qm); 3125 if (ret < 0) { 3126 dev_err(dev, "Failed to stop started qp!\n"); 3127 goto err_unlock; 3128 } 3129 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 3130 } 3131 3132 qm_disable_eq_aeq_interrupts(qm); 3133 if (qm->fun_type == QM_HW_PF) { 3134 ret = hisi_qm_set_vft(qm, 0, 0, 0); 3135 if (ret < 0) { 3136 dev_err(dev, "Failed to set vft!\n"); 3137 ret = -EBUSY; 3138 goto err_unlock; 3139 } 3140 } 3141 3142 qm_clear_queues(qm); 3143 atomic_set(&qm->status.flags, QM_STOP); 3144 3145 err_unlock: 3146 up_write(&qm->qps_lock); 3147 return ret; 3148 } 3149 EXPORT_SYMBOL_GPL(hisi_qm_stop); 3150 3151 static void qm_hw_error_init(struct hisi_qm *qm) 3152 { 3153 if (!qm->ops->hw_error_init) { 3154 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); 3155 return; 3156 } 3157 3158 qm->ops->hw_error_init(qm); 3159 } 3160 3161 static void qm_hw_error_uninit(struct hisi_qm *qm) 3162 { 3163 if (!qm->ops->hw_error_uninit) { 3164 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n"); 3165 return; 3166 } 3167 3168 qm->ops->hw_error_uninit(qm); 3169 } 3170 3171 static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm) 3172 { 3173 if (!qm->ops->hw_error_handle) { 3174 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n"); 3175 return ACC_ERR_NONE; 3176 } 3177 3178 return qm->ops->hw_error_handle(qm); 3179 } 3180 3181 /** 3182 * hisi_qm_dev_err_init() - Initialize device error configuration. 3183 * @qm: The qm for which we want to do error initialization. 3184 * 3185 * Initialize QM and device error related configuration. 3186 */ 3187 void hisi_qm_dev_err_init(struct hisi_qm *qm) 3188 { 3189 if (qm->fun_type == QM_HW_VF) 3190 return; 3191 3192 qm_hw_error_init(qm); 3193 3194 if (!qm->err_ini->hw_err_enable) { 3195 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n"); 3196 return; 3197 } 3198 qm->err_ini->hw_err_enable(qm); 3199 } 3200 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init); 3201 3202 /** 3203 * hisi_qm_dev_err_uninit() - Uninitialize device error configuration. 3204 * @qm: The qm for which we want to do error uninitialization. 3205 * 3206 * Uninitialize QM and device error related configuration. 3207 */ 3208 void hisi_qm_dev_err_uninit(struct hisi_qm *qm) 3209 { 3210 if (qm->fun_type == QM_HW_VF) 3211 return; 3212 3213 qm_hw_error_uninit(qm); 3214 3215 if (!qm->err_ini->hw_err_disable) { 3216 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n"); 3217 return; 3218 } 3219 qm->err_ini->hw_err_disable(qm); 3220 } 3221 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit); 3222 3223 /** 3224 * hisi_qm_free_qps() - free multiple queue pairs. 3225 * @qps: The queue pairs need to be freed. 3226 * @qp_num: The num of queue pairs. 3227 */ 3228 void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num) 3229 { 3230 int i; 3231 3232 if (!qps || qp_num <= 0) 3233 return; 3234 3235 for (i = qp_num - 1; i >= 0; i--) 3236 hisi_qm_release_qp(qps[i]); 3237 } 3238 EXPORT_SYMBOL_GPL(hisi_qm_free_qps); 3239 3240 static void free_list(struct list_head *head) 3241 { 3242 struct hisi_qm_resource *res, *tmp; 3243 3244 list_for_each_entry_safe(res, tmp, head, list) { 3245 list_del(&res->list); 3246 kfree(res); 3247 } 3248 } 3249 3250 static int hisi_qm_sort_devices(int node, struct list_head *head, 3251 struct hisi_qm_list *qm_list) 3252 { 3253 struct hisi_qm_resource *res, *tmp; 3254 struct hisi_qm *qm; 3255 struct list_head *n; 3256 struct device *dev; 3257 int dev_node; 3258 3259 list_for_each_entry(qm, &qm_list->list, list) { 3260 dev = &qm->pdev->dev; 3261 3262 dev_node = dev_to_node(dev); 3263 if (dev_node < 0) 3264 dev_node = 0; 3265 3266 res = kzalloc(sizeof(*res), GFP_KERNEL); 3267 if (!res) 3268 return -ENOMEM; 3269 3270 res->qm = qm; 3271 res->distance = node_distance(dev_node, node); 3272 n = head; 3273 list_for_each_entry(tmp, head, list) { 3274 if (res->distance < tmp->distance) { 3275 n = &tmp->list; 3276 break; 3277 } 3278 } 3279 list_add_tail(&res->list, n); 3280 } 3281 3282 return 0; 3283 } 3284 3285 /** 3286 * hisi_qm_alloc_qps_node() - Create multiple queue pairs. 3287 * @qm_list: The list of all available devices. 3288 * @qp_num: The number of queue pairs need created. 3289 * @alg_type: The algorithm type. 3290 * @node: The numa node. 3291 * @qps: The queue pairs need created. 3292 * 3293 * This function will sort all available device according to numa distance. 3294 * Then try to create all queue pairs from one device, if all devices do 3295 * not meet the requirements will return error. 3296 */ 3297 int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, 3298 u8 alg_type, int node, struct hisi_qp **qps) 3299 { 3300 struct hisi_qm_resource *tmp; 3301 int ret = -ENODEV; 3302 LIST_HEAD(head); 3303 int i; 3304 3305 if (!qps || !qm_list || qp_num <= 0) 3306 return -EINVAL; 3307 3308 mutex_lock(&qm_list->lock); 3309 if (hisi_qm_sort_devices(node, &head, qm_list)) { 3310 mutex_unlock(&qm_list->lock); 3311 goto err; 3312 } 3313 3314 list_for_each_entry(tmp, &head, list) { 3315 for (i = 0; i < qp_num; i++) { 3316 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type); 3317 if (IS_ERR(qps[i])) { 3318 hisi_qm_free_qps(qps, i); 3319 break; 3320 } 3321 } 3322 3323 if (i == qp_num) { 3324 ret = 0; 3325 break; 3326 } 3327 } 3328 3329 mutex_unlock(&qm_list->lock); 3330 if (ret) 3331 pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n", 3332 node, alg_type, qp_num); 3333 3334 err: 3335 free_list(&head); 3336 return ret; 3337 } 3338 EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node); 3339 3340 static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs) 3341 { 3342 u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j; 3343 u32 max_qp_num = qm->max_qp_num; 3344 u32 q_base = qm->qp_num; 3345 int ret; 3346 3347 if (!num_vfs) 3348 return -EINVAL; 3349 3350 vfs_q_num = qm->ctrl_qp_num - qm->qp_num; 3351 3352 /* If vfs_q_num is less than num_vfs, return error. */ 3353 if (vfs_q_num < num_vfs) 3354 return -EINVAL; 3355 3356 q_num = vfs_q_num / num_vfs; 3357 remain_q_num = vfs_q_num % num_vfs; 3358 3359 for (i = num_vfs; i > 0; i--) { 3360 /* 3361 * if q_num + remain_q_num > max_qp_num in last vf, divide the 3362 * remaining queues equally. 3363 */ 3364 if (i == num_vfs && q_num + remain_q_num <= max_qp_num) { 3365 act_q_num = q_num + remain_q_num; 3366 remain_q_num = 0; 3367 } else if (remain_q_num > 0) { 3368 act_q_num = q_num + 1; 3369 remain_q_num--; 3370 } else { 3371 act_q_num = q_num; 3372 } 3373 3374 act_q_num = min_t(int, act_q_num, max_qp_num); 3375 ret = hisi_qm_set_vft(qm, i, q_base, act_q_num); 3376 if (ret) { 3377 for (j = num_vfs; j > i; j--) 3378 hisi_qm_set_vft(qm, j, 0, 0); 3379 return ret; 3380 } 3381 q_base += act_q_num; 3382 } 3383 3384 return 0; 3385 } 3386 3387 static int qm_clear_vft_config(struct hisi_qm *qm) 3388 { 3389 int ret; 3390 u32 i; 3391 3392 for (i = 1; i <= qm->vfs_num; i++) { 3393 ret = hisi_qm_set_vft(qm, i, 0, 0); 3394 if (ret) 3395 return ret; 3396 } 3397 qm->vfs_num = 0; 3398 3399 return 0; 3400 } 3401 3402 static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos) 3403 { 3404 struct device *dev = &qm->pdev->dev; 3405 u32 ir = qos * QM_QOS_RATE; 3406 int ret, total_vfs, i; 3407 3408 total_vfs = pci_sriov_get_totalvfs(qm->pdev); 3409 if (fun_index > total_vfs) 3410 return -EINVAL; 3411 3412 qm->factor[fun_index].func_qos = qos; 3413 3414 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]); 3415 if (ret) { 3416 dev_err(dev, "failed to calculate shaper parameter!\n"); 3417 return -EINVAL; 3418 } 3419 3420 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) { 3421 /* The base number of queue reuse for different alg type */ 3422 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1); 3423 if (ret) { 3424 dev_err(dev, "type: %d, failed to set shaper vft!\n", i); 3425 return -EINVAL; 3426 } 3427 } 3428 3429 return 0; 3430 } 3431 3432 static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index) 3433 { 3434 u64 cir_u = 0, cir_b = 0, cir_s = 0; 3435 u64 shaper_vft, ir_calc, ir; 3436 unsigned int val; 3437 u32 error_rate; 3438 int ret; 3439 3440 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 3441 val & BIT(0), POLL_PERIOD, 3442 POLL_TIMEOUT); 3443 if (ret) 3444 return 0; 3445 3446 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR); 3447 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE); 3448 writel(fun_index, qm->io_base + QM_VFT_CFG); 3449 3450 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); 3451 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); 3452 3453 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 3454 val & BIT(0), POLL_PERIOD, 3455 POLL_TIMEOUT); 3456 if (ret) 3457 return 0; 3458 3459 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) | 3460 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32); 3461 3462 cir_b = shaper_vft & QM_SHAPER_CIR_B_MASK; 3463 cir_u = shaper_vft & QM_SHAPER_CIR_U_MASK; 3464 cir_u = cir_u >> QM_SHAPER_FACTOR_CIR_U_SHIFT; 3465 3466 cir_s = shaper_vft & QM_SHAPER_CIR_S_MASK; 3467 cir_s = cir_s >> QM_SHAPER_FACTOR_CIR_S_SHIFT; 3468 3469 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s); 3470 3471 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE; 3472 3473 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; 3474 if (error_rate > QM_QOS_MIN_ERROR_RATE) { 3475 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate); 3476 return 0; 3477 } 3478 3479 return ir; 3480 } 3481 3482 static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num) 3483 { 3484 struct device *dev = &qm->pdev->dev; 3485 u64 mb_cmd; 3486 u32 qos; 3487 int ret; 3488 3489 qos = qm_get_shaper_vft_qos(qm, fun_num); 3490 if (!qos) { 3491 dev_err(dev, "function(%u) failed to get qos by PF!\n", fun_num); 3492 return; 3493 } 3494 3495 mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT; 3496 ret = qm_ping_single_vf(qm, mb_cmd, fun_num); 3497 if (ret) 3498 dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num); 3499 } 3500 3501 static int qm_vf_read_qos(struct hisi_qm *qm) 3502 { 3503 int cnt = 0; 3504 int ret = -EINVAL; 3505 3506 /* reset mailbox qos val */ 3507 qm->mb_qos = 0; 3508 3509 /* vf ping pf to get function qos */ 3510 ret = qm_ping_pf(qm, QM_VF_GET_QOS); 3511 if (ret) { 3512 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n"); 3513 return ret; 3514 } 3515 3516 while (true) { 3517 msleep(QM_WAIT_DST_ACK); 3518 if (qm->mb_qos) 3519 break; 3520 3521 if (++cnt > QM_MAX_VF_WAIT_COUNT) { 3522 pci_err(qm->pdev, "PF ping VF timeout!\n"); 3523 return -ETIMEDOUT; 3524 } 3525 } 3526 3527 return ret; 3528 } 3529 3530 static ssize_t qm_algqos_read(struct file *filp, char __user *buf, 3531 size_t count, loff_t *pos) 3532 { 3533 struct hisi_qm *qm = filp->private_data; 3534 char tbuf[QM_DBG_READ_LEN]; 3535 u32 qos_val, ir; 3536 int ret; 3537 3538 ret = hisi_qm_get_dfx_access(qm); 3539 if (ret) 3540 return ret; 3541 3542 /* Mailbox and reset cannot be operated at the same time */ 3543 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 3544 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n"); 3545 ret = -EAGAIN; 3546 goto err_put_dfx_access; 3547 } 3548 3549 if (qm->fun_type == QM_HW_PF) { 3550 ir = qm_get_shaper_vft_qos(qm, 0); 3551 } else { 3552 ret = qm_vf_read_qos(qm); 3553 if (ret) 3554 goto err_get_status; 3555 ir = qm->mb_qos; 3556 } 3557 3558 qos_val = ir / QM_QOS_RATE; 3559 ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val); 3560 3561 ret = simple_read_from_buffer(buf, count, pos, tbuf, ret); 3562 3563 err_get_status: 3564 clear_bit(QM_RESETTING, &qm->misc_ctl); 3565 err_put_dfx_access: 3566 hisi_qm_put_dfx_access(qm); 3567 return ret; 3568 } 3569 3570 static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf, 3571 unsigned long *val, 3572 unsigned int *fun_index) 3573 { 3574 struct bus_type *bus_type = qm->pdev->dev.bus; 3575 char tbuf_bdf[QM_DBG_READ_LEN] = {0}; 3576 char val_buf[QM_DBG_READ_LEN] = {0}; 3577 struct pci_dev *pdev; 3578 struct device *dev; 3579 int ret; 3580 3581 ret = sscanf(buf, "%s %s", tbuf_bdf, val_buf); 3582 if (ret != QM_QOS_PARAM_NUM) 3583 return -EINVAL; 3584 3585 ret = kstrtoul(val_buf, 10, val); 3586 if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) { 3587 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n"); 3588 return -EINVAL; 3589 } 3590 3591 dev = bus_find_device_by_name(bus_type, NULL, tbuf_bdf); 3592 if (!dev) { 3593 pci_err(qm->pdev, "input pci bdf number is error!\n"); 3594 return -ENODEV; 3595 } 3596 3597 pdev = container_of(dev, struct pci_dev, dev); 3598 3599 *fun_index = pdev->devfn; 3600 3601 return 0; 3602 } 3603 3604 static ssize_t qm_algqos_write(struct file *filp, const char __user *buf, 3605 size_t count, loff_t *pos) 3606 { 3607 struct hisi_qm *qm = filp->private_data; 3608 char tbuf[QM_DBG_READ_LEN]; 3609 unsigned int fun_index; 3610 unsigned long val; 3611 int len, ret; 3612 3613 if (*pos != 0) 3614 return 0; 3615 3616 if (count >= QM_DBG_READ_LEN) 3617 return -ENOSPC; 3618 3619 len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count); 3620 if (len < 0) 3621 return len; 3622 3623 tbuf[len] = '\0'; 3624 ret = qm_get_qos_value(qm, tbuf, &val, &fun_index); 3625 if (ret) 3626 return ret; 3627 3628 /* Mailbox and reset cannot be operated at the same time */ 3629 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 3630 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n"); 3631 return -EAGAIN; 3632 } 3633 3634 ret = qm_pm_get_sync(qm); 3635 if (ret) { 3636 ret = -EINVAL; 3637 goto err_get_status; 3638 } 3639 3640 ret = qm_func_shaper_enable(qm, fun_index, val); 3641 if (ret) { 3642 pci_err(qm->pdev, "failed to enable function shaper!\n"); 3643 ret = -EINVAL; 3644 goto err_put_sync; 3645 } 3646 3647 pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n", 3648 fun_index, val); 3649 ret = count; 3650 3651 err_put_sync: 3652 qm_pm_put_sync(qm); 3653 err_get_status: 3654 clear_bit(QM_RESETTING, &qm->misc_ctl); 3655 return ret; 3656 } 3657 3658 static const struct file_operations qm_algqos_fops = { 3659 .owner = THIS_MODULE, 3660 .open = simple_open, 3661 .read = qm_algqos_read, 3662 .write = qm_algqos_write, 3663 }; 3664 3665 /** 3666 * hisi_qm_set_algqos_init() - Initialize function qos debugfs files. 3667 * @qm: The qm for which we want to add debugfs files. 3668 * 3669 * Create function qos debugfs files, VF ping PF to get function qos. 3670 */ 3671 void hisi_qm_set_algqos_init(struct hisi_qm *qm) 3672 { 3673 if (qm->fun_type == QM_HW_PF) 3674 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root, 3675 qm, &qm_algqos_fops); 3676 else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 3677 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root, 3678 qm, &qm_algqos_fops); 3679 } 3680 3681 static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func) 3682 { 3683 int i; 3684 3685 for (i = 1; i <= total_func; i++) 3686 qm->factor[i].func_qos = QM_QOS_MAX_VAL; 3687 } 3688 3689 /** 3690 * hisi_qm_sriov_enable() - enable virtual functions 3691 * @pdev: the PCIe device 3692 * @max_vfs: the number of virtual functions to enable 3693 * 3694 * Returns the number of enabled VFs. If there are VFs enabled already or 3695 * max_vfs is more than the total number of device can be enabled, returns 3696 * failure. 3697 */ 3698 int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs) 3699 { 3700 struct hisi_qm *qm = pci_get_drvdata(pdev); 3701 int pre_existing_vfs, num_vfs, total_vfs, ret; 3702 3703 ret = qm_pm_get_sync(qm); 3704 if (ret) 3705 return ret; 3706 3707 total_vfs = pci_sriov_get_totalvfs(pdev); 3708 pre_existing_vfs = pci_num_vf(pdev); 3709 if (pre_existing_vfs) { 3710 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n", 3711 pre_existing_vfs); 3712 goto err_put_sync; 3713 } 3714 3715 if (max_vfs > total_vfs) { 3716 pci_err(pdev, "%d VFs is more than total VFs %d!\n", max_vfs, total_vfs); 3717 ret = -ERANGE; 3718 goto err_put_sync; 3719 } 3720 3721 num_vfs = max_vfs; 3722 3723 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 3724 hisi_qm_init_vf_qos(qm, num_vfs); 3725 3726 ret = qm_vf_q_assign(qm, num_vfs); 3727 if (ret) { 3728 pci_err(pdev, "Can't assign queues for VF!\n"); 3729 goto err_put_sync; 3730 } 3731 3732 qm->vfs_num = num_vfs; 3733 3734 ret = pci_enable_sriov(pdev, num_vfs); 3735 if (ret) { 3736 pci_err(pdev, "Can't enable VF!\n"); 3737 qm_clear_vft_config(qm); 3738 goto err_put_sync; 3739 } 3740 3741 pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs); 3742 3743 return num_vfs; 3744 3745 err_put_sync: 3746 qm_pm_put_sync(qm); 3747 return ret; 3748 } 3749 EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable); 3750 3751 /** 3752 * hisi_qm_sriov_disable - disable virtual functions 3753 * @pdev: the PCI device. 3754 * @is_frozen: true when all the VFs are frozen. 3755 * 3756 * Return failure if there are VFs assigned already or VF is in used. 3757 */ 3758 int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen) 3759 { 3760 struct hisi_qm *qm = pci_get_drvdata(pdev); 3761 int ret; 3762 3763 if (pci_vfs_assigned(pdev)) { 3764 pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n"); 3765 return -EPERM; 3766 } 3767 3768 /* While VF is in used, SRIOV cannot be disabled. */ 3769 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) { 3770 pci_err(pdev, "Task is using its VF!\n"); 3771 return -EBUSY; 3772 } 3773 3774 pci_disable_sriov(pdev); 3775 3776 ret = qm_clear_vft_config(qm); 3777 if (ret) 3778 return ret; 3779 3780 qm_pm_put_sync(qm); 3781 3782 return 0; 3783 } 3784 EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable); 3785 3786 /** 3787 * hisi_qm_sriov_configure - configure the number of VFs 3788 * @pdev: The PCI device 3789 * @num_vfs: The number of VFs need enabled 3790 * 3791 * Enable SR-IOV according to num_vfs, 0 means disable. 3792 */ 3793 int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs) 3794 { 3795 if (num_vfs == 0) 3796 return hisi_qm_sriov_disable(pdev, false); 3797 else 3798 return hisi_qm_sriov_enable(pdev, num_vfs); 3799 } 3800 EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure); 3801 3802 static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) 3803 { 3804 u32 err_sts; 3805 3806 if (!qm->err_ini->get_dev_hw_err_status) { 3807 dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n"); 3808 return ACC_ERR_NONE; 3809 } 3810 3811 /* get device hardware error status */ 3812 err_sts = qm->err_ini->get_dev_hw_err_status(qm); 3813 if (err_sts) { 3814 if (err_sts & qm->err_info.ecc_2bits_mask) 3815 qm->err_status.is_dev_ecc_mbit = true; 3816 3817 if (qm->err_ini->log_dev_hw_err) 3818 qm->err_ini->log_dev_hw_err(qm, err_sts); 3819 3820 if (err_sts & qm->err_info.dev_reset_mask) 3821 return ACC_ERR_NEED_RESET; 3822 3823 if (qm->err_ini->clear_dev_hw_err_status) 3824 qm->err_ini->clear_dev_hw_err_status(qm, err_sts); 3825 } 3826 3827 return ACC_ERR_RECOVERED; 3828 } 3829 3830 static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm) 3831 { 3832 enum acc_err_result qm_ret, dev_ret; 3833 3834 /* log qm error */ 3835 qm_ret = qm_hw_error_handle(qm); 3836 3837 /* log device error */ 3838 dev_ret = qm_dev_err_handle(qm); 3839 3840 return (qm_ret == ACC_ERR_NEED_RESET || 3841 dev_ret == ACC_ERR_NEED_RESET) ? 3842 ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED; 3843 } 3844 3845 /** 3846 * hisi_qm_dev_err_detected() - Get device and qm error status then log it. 3847 * @pdev: The PCI device which need report error. 3848 * @state: The connectivity between CPU and device. 3849 * 3850 * We register this function into PCIe AER handlers, It will report device or 3851 * qm hardware error status when error occur. 3852 */ 3853 pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, 3854 pci_channel_state_t state) 3855 { 3856 struct hisi_qm *qm = pci_get_drvdata(pdev); 3857 enum acc_err_result ret; 3858 3859 if (pdev->is_virtfn) 3860 return PCI_ERS_RESULT_NONE; 3861 3862 pci_info(pdev, "PCI error detected, state(=%u)!!\n", state); 3863 if (state == pci_channel_io_perm_failure) 3864 return PCI_ERS_RESULT_DISCONNECT; 3865 3866 ret = qm_process_dev_error(qm); 3867 if (ret == ACC_ERR_NEED_RESET) 3868 return PCI_ERS_RESULT_NEED_RESET; 3869 3870 return PCI_ERS_RESULT_RECOVERED; 3871 } 3872 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected); 3873 3874 static int qm_check_req_recv(struct hisi_qm *qm) 3875 { 3876 struct pci_dev *pdev = qm->pdev; 3877 int ret; 3878 u32 val; 3879 3880 if (qm->ver >= QM_HW_V3) 3881 return 0; 3882 3883 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID); 3884 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, 3885 (val == ACC_VENDOR_ID_VALUE), 3886 POLL_PERIOD, POLL_TIMEOUT); 3887 if (ret) { 3888 dev_err(&pdev->dev, "Fails to read QM reg!\n"); 3889 return ret; 3890 } 3891 3892 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID); 3893 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, 3894 (val == PCI_VENDOR_ID_HUAWEI), 3895 POLL_PERIOD, POLL_TIMEOUT); 3896 if (ret) 3897 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n"); 3898 3899 return ret; 3900 } 3901 3902 static int qm_set_pf_mse(struct hisi_qm *qm, bool set) 3903 { 3904 struct pci_dev *pdev = qm->pdev; 3905 u16 cmd; 3906 int i; 3907 3908 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 3909 if (set) 3910 cmd |= PCI_COMMAND_MEMORY; 3911 else 3912 cmd &= ~PCI_COMMAND_MEMORY; 3913 3914 pci_write_config_word(pdev, PCI_COMMAND, cmd); 3915 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 3916 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 3917 if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1)) 3918 return 0; 3919 3920 udelay(1); 3921 } 3922 3923 return -ETIMEDOUT; 3924 } 3925 3926 static int qm_set_vf_mse(struct hisi_qm *qm, bool set) 3927 { 3928 struct pci_dev *pdev = qm->pdev; 3929 u16 sriov_ctrl; 3930 int pos; 3931 int i; 3932 3933 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 3934 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); 3935 if (set) 3936 sriov_ctrl |= PCI_SRIOV_CTRL_MSE; 3937 else 3938 sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE; 3939 pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl); 3940 3941 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 3942 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); 3943 if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >> 3944 ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT) 3945 return 0; 3946 3947 udelay(1); 3948 } 3949 3950 return -ETIMEDOUT; 3951 } 3952 3953 static int qm_vf_reset_prepare(struct hisi_qm *qm, 3954 enum qm_stop_reason stop_reason) 3955 { 3956 struct hisi_qm_list *qm_list = qm->qm_list; 3957 struct pci_dev *pdev = qm->pdev; 3958 struct pci_dev *virtfn; 3959 struct hisi_qm *vf_qm; 3960 int ret = 0; 3961 3962 mutex_lock(&qm_list->lock); 3963 list_for_each_entry(vf_qm, &qm_list->list, list) { 3964 virtfn = vf_qm->pdev; 3965 if (virtfn == pdev) 3966 continue; 3967 3968 if (pci_physfn(virtfn) == pdev) { 3969 /* save VFs PCIE BAR configuration */ 3970 pci_save_state(virtfn); 3971 3972 ret = hisi_qm_stop(vf_qm, stop_reason); 3973 if (ret) 3974 goto stop_fail; 3975 } 3976 } 3977 3978 stop_fail: 3979 mutex_unlock(&qm_list->lock); 3980 return ret; 3981 } 3982 3983 static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd, 3984 enum qm_stop_reason stop_reason) 3985 { 3986 struct pci_dev *pdev = qm->pdev; 3987 int ret; 3988 3989 if (!qm->vfs_num) 3990 return 0; 3991 3992 /* Kunpeng930 supports to notify VFs to stop before PF reset */ 3993 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { 3994 ret = qm_ping_all_vfs(qm, cmd); 3995 if (ret) 3996 pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n"); 3997 } else { 3998 ret = qm_vf_reset_prepare(qm, stop_reason); 3999 if (ret) 4000 pci_err(pdev, "failed to prepare reset, ret = %d.\n", ret); 4001 } 4002 4003 return ret; 4004 } 4005 4006 static int qm_controller_reset_prepare(struct hisi_qm *qm) 4007 { 4008 struct pci_dev *pdev = qm->pdev; 4009 int ret; 4010 4011 ret = qm_reset_prepare_ready(qm); 4012 if (ret) { 4013 pci_err(pdev, "Controller reset not ready!\n"); 4014 return ret; 4015 } 4016 4017 /* PF obtains the information of VF by querying the register. */ 4018 qm_cmd_uninit(qm); 4019 4020 /* Whether VFs stop successfully, soft reset will continue. */ 4021 ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET); 4022 if (ret) 4023 pci_err(pdev, "failed to stop vfs by pf in soft reset.\n"); 4024 4025 ret = hisi_qm_stop(qm, QM_SOFT_RESET); 4026 if (ret) { 4027 pci_err(pdev, "Fails to stop QM!\n"); 4028 qm_reset_bit_clear(qm); 4029 return ret; 4030 } 4031 4032 ret = qm_wait_vf_prepare_finish(qm); 4033 if (ret) 4034 pci_err(pdev, "failed to stop by vfs in soft reset!\n"); 4035 4036 clear_bit(QM_RST_SCHED, &qm->misc_ctl); 4037 4038 return 0; 4039 } 4040 4041 static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) 4042 { 4043 u32 nfe_enb = 0; 4044 4045 /* Kunpeng930 hardware automatically close master ooo when NFE occurs */ 4046 if (qm->ver >= QM_HW_V3) 4047 return; 4048 4049 if (!qm->err_status.is_dev_ecc_mbit && 4050 qm->err_status.is_qm_ecc_mbit && 4051 qm->err_ini->close_axi_master_ooo) { 4052 4053 qm->err_ini->close_axi_master_ooo(qm); 4054 4055 } else if (qm->err_status.is_dev_ecc_mbit && 4056 !qm->err_status.is_qm_ecc_mbit && 4057 !qm->err_ini->close_axi_master_ooo) { 4058 4059 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); 4060 writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, 4061 qm->io_base + QM_RAS_NFE_ENABLE); 4062 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); 4063 } 4064 } 4065 4066 static int qm_soft_reset(struct hisi_qm *qm) 4067 { 4068 struct pci_dev *pdev = qm->pdev; 4069 int ret; 4070 u32 val; 4071 4072 /* Ensure all doorbells and mailboxes received by QM */ 4073 ret = qm_check_req_recv(qm); 4074 if (ret) 4075 return ret; 4076 4077 if (qm->vfs_num) { 4078 ret = qm_set_vf_mse(qm, false); 4079 if (ret) { 4080 pci_err(pdev, "Fails to disable vf MSE bit.\n"); 4081 return ret; 4082 } 4083 } 4084 4085 ret = qm->ops->set_msi(qm, false); 4086 if (ret) { 4087 pci_err(pdev, "Fails to disable PEH MSI bit.\n"); 4088 return ret; 4089 } 4090 4091 qm_dev_ecc_mbit_handle(qm); 4092 4093 /* OOO register set and check */ 4094 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, 4095 qm->io_base + ACC_MASTER_GLOBAL_CTRL); 4096 4097 /* If bus lock, reset chip */ 4098 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, 4099 val, 4100 (val == ACC_MASTER_TRANS_RETURN_RW), 4101 POLL_PERIOD, POLL_TIMEOUT); 4102 if (ret) { 4103 pci_emerg(pdev, "Bus lock! Please reset system.\n"); 4104 return ret; 4105 } 4106 4107 if (qm->err_ini->close_sva_prefetch) 4108 qm->err_ini->close_sva_prefetch(qm); 4109 4110 ret = qm_set_pf_mse(qm, false); 4111 if (ret) { 4112 pci_err(pdev, "Fails to disable pf MSE bit.\n"); 4113 return ret; 4114 } 4115 4116 /* The reset related sub-control registers are not in PCI BAR */ 4117 if (ACPI_HANDLE(&pdev->dev)) { 4118 unsigned long long value = 0; 4119 acpi_status s; 4120 4121 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), 4122 qm->err_info.acpi_rst, 4123 NULL, &value); 4124 if (ACPI_FAILURE(s)) { 4125 pci_err(pdev, "NO controller reset method!\n"); 4126 return -EIO; 4127 } 4128 4129 if (value) { 4130 pci_err(pdev, "Reset step %llu failed!\n", value); 4131 return -EIO; 4132 } 4133 } else { 4134 pci_err(pdev, "No reset method!\n"); 4135 return -EINVAL; 4136 } 4137 4138 return 0; 4139 } 4140 4141 static int qm_vf_reset_done(struct hisi_qm *qm) 4142 { 4143 struct hisi_qm_list *qm_list = qm->qm_list; 4144 struct pci_dev *pdev = qm->pdev; 4145 struct pci_dev *virtfn; 4146 struct hisi_qm *vf_qm; 4147 int ret = 0; 4148 4149 mutex_lock(&qm_list->lock); 4150 list_for_each_entry(vf_qm, &qm_list->list, list) { 4151 virtfn = vf_qm->pdev; 4152 if (virtfn == pdev) 4153 continue; 4154 4155 if (pci_physfn(virtfn) == pdev) { 4156 /* enable VFs PCIE BAR configuration */ 4157 pci_restore_state(virtfn); 4158 4159 ret = qm_restart(vf_qm); 4160 if (ret) 4161 goto restart_fail; 4162 } 4163 } 4164 4165 restart_fail: 4166 mutex_unlock(&qm_list->lock); 4167 return ret; 4168 } 4169 4170 static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd) 4171 { 4172 struct pci_dev *pdev = qm->pdev; 4173 int ret; 4174 4175 if (!qm->vfs_num) 4176 return 0; 4177 4178 ret = qm_vf_q_assign(qm, qm->vfs_num); 4179 if (ret) { 4180 pci_err(pdev, "failed to assign VFs, ret = %d.\n", ret); 4181 return ret; 4182 } 4183 4184 /* Kunpeng930 supports to notify VFs to start after PF reset. */ 4185 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { 4186 ret = qm_ping_all_vfs(qm, cmd); 4187 if (ret) 4188 pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n"); 4189 } else { 4190 ret = qm_vf_reset_done(qm); 4191 if (ret) 4192 pci_warn(pdev, "failed to start vfs, ret = %d.\n", ret); 4193 } 4194 4195 return ret; 4196 } 4197 4198 static int qm_dev_hw_init(struct hisi_qm *qm) 4199 { 4200 return qm->err_ini->hw_init(qm); 4201 } 4202 4203 static void qm_restart_prepare(struct hisi_qm *qm) 4204 { 4205 u32 value; 4206 4207 if (qm->err_ini->open_sva_prefetch) 4208 qm->err_ini->open_sva_prefetch(qm); 4209 4210 if (qm->ver >= QM_HW_V3) 4211 return; 4212 4213 if (!qm->err_status.is_qm_ecc_mbit && 4214 !qm->err_status.is_dev_ecc_mbit) 4215 return; 4216 4217 /* temporarily close the OOO port used for PEH to write out MSI */ 4218 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4219 writel(value & ~qm->err_info.msi_wr_port, 4220 qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4221 4222 /* clear dev ecc 2bit error source if having */ 4223 value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask; 4224 if (value && qm->err_ini->clear_dev_hw_err_status) 4225 qm->err_ini->clear_dev_hw_err_status(qm, value); 4226 4227 /* clear QM ecc mbit error source */ 4228 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); 4229 4230 /* clear AM Reorder Buffer ecc mbit source */ 4231 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS); 4232 } 4233 4234 static void qm_restart_done(struct hisi_qm *qm) 4235 { 4236 u32 value; 4237 4238 if (qm->ver >= QM_HW_V3) 4239 goto clear_flags; 4240 4241 if (!qm->err_status.is_qm_ecc_mbit && 4242 !qm->err_status.is_dev_ecc_mbit) 4243 return; 4244 4245 /* open the OOO port for PEH to write out MSI */ 4246 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4247 value |= qm->err_info.msi_wr_port; 4248 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4249 4250 clear_flags: 4251 qm->err_status.is_qm_ecc_mbit = false; 4252 qm->err_status.is_dev_ecc_mbit = false; 4253 } 4254 4255 static int qm_controller_reset_done(struct hisi_qm *qm) 4256 { 4257 struct pci_dev *pdev = qm->pdev; 4258 int ret; 4259 4260 ret = qm->ops->set_msi(qm, true); 4261 if (ret) { 4262 pci_err(pdev, "Fails to enable PEH MSI bit!\n"); 4263 return ret; 4264 } 4265 4266 ret = qm_set_pf_mse(qm, true); 4267 if (ret) { 4268 pci_err(pdev, "Fails to enable pf MSE bit!\n"); 4269 return ret; 4270 } 4271 4272 if (qm->vfs_num) { 4273 ret = qm_set_vf_mse(qm, true); 4274 if (ret) { 4275 pci_err(pdev, "Fails to enable vf MSE bit!\n"); 4276 return ret; 4277 } 4278 } 4279 4280 ret = qm_dev_hw_init(qm); 4281 if (ret) { 4282 pci_err(pdev, "Failed to init device\n"); 4283 return ret; 4284 } 4285 4286 qm_restart_prepare(qm); 4287 hisi_qm_dev_err_init(qm); 4288 if (qm->err_ini->open_axi_master_ooo) 4289 qm->err_ini->open_axi_master_ooo(qm); 4290 4291 ret = qm_dev_mem_reset(qm); 4292 if (ret) { 4293 pci_err(pdev, "failed to reset device memory\n"); 4294 return ret; 4295 } 4296 4297 ret = qm_restart(qm); 4298 if (ret) { 4299 pci_err(pdev, "Failed to start QM!\n"); 4300 return ret; 4301 } 4302 4303 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE); 4304 if (ret) 4305 pci_err(pdev, "failed to start vfs by pf in soft reset.\n"); 4306 4307 ret = qm_wait_vf_prepare_finish(qm); 4308 if (ret) 4309 pci_err(pdev, "failed to start by vfs in soft reset!\n"); 4310 4311 qm_cmd_init(qm); 4312 qm_restart_done(qm); 4313 4314 qm_reset_bit_clear(qm); 4315 4316 return 0; 4317 } 4318 4319 static int qm_controller_reset(struct hisi_qm *qm) 4320 { 4321 struct pci_dev *pdev = qm->pdev; 4322 int ret; 4323 4324 pci_info(pdev, "Controller resetting...\n"); 4325 4326 ret = qm_controller_reset_prepare(qm); 4327 if (ret) { 4328 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 4329 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 4330 clear_bit(QM_RST_SCHED, &qm->misc_ctl); 4331 return ret; 4332 } 4333 4334 hisi_qm_show_last_dfx_regs(qm); 4335 if (qm->err_ini->show_last_dfx_regs) 4336 qm->err_ini->show_last_dfx_regs(qm); 4337 4338 ret = qm_soft_reset(qm); 4339 if (ret) { 4340 pci_err(pdev, "Controller reset failed (%d)\n", ret); 4341 qm_reset_bit_clear(qm); 4342 return ret; 4343 } 4344 4345 ret = qm_controller_reset_done(qm); 4346 if (ret) { 4347 qm_reset_bit_clear(qm); 4348 return ret; 4349 } 4350 4351 pci_info(pdev, "Controller reset complete\n"); 4352 4353 return 0; 4354 } 4355 4356 /** 4357 * hisi_qm_dev_slot_reset() - slot reset 4358 * @pdev: the PCIe device 4359 * 4360 * This function offers QM relate PCIe device reset interface. Drivers which 4361 * use QM can use this function as slot_reset in its struct pci_error_handlers. 4362 */ 4363 pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev) 4364 { 4365 struct hisi_qm *qm = pci_get_drvdata(pdev); 4366 int ret; 4367 4368 if (pdev->is_virtfn) 4369 return PCI_ERS_RESULT_RECOVERED; 4370 4371 /* reset pcie device controller */ 4372 ret = qm_controller_reset(qm); 4373 if (ret) { 4374 pci_err(pdev, "Controller reset failed (%d)\n", ret); 4375 return PCI_ERS_RESULT_DISCONNECT; 4376 } 4377 4378 return PCI_ERS_RESULT_RECOVERED; 4379 } 4380 EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset); 4381 4382 void hisi_qm_reset_prepare(struct pci_dev *pdev) 4383 { 4384 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 4385 struct hisi_qm *qm = pci_get_drvdata(pdev); 4386 u32 delay = 0; 4387 int ret; 4388 4389 hisi_qm_dev_err_uninit(pf_qm); 4390 4391 /* 4392 * Check whether there is an ECC mbit error, If it occurs, need to 4393 * wait for soft reset to fix it. 4394 */ 4395 while (qm_check_dev_error(pf_qm)) { 4396 msleep(++delay); 4397 if (delay > QM_RESET_WAIT_TIMEOUT) 4398 return; 4399 } 4400 4401 ret = qm_reset_prepare_ready(qm); 4402 if (ret) { 4403 pci_err(pdev, "FLR not ready!\n"); 4404 return; 4405 } 4406 4407 /* PF obtains the information of VF by querying the register. */ 4408 if (qm->fun_type == QM_HW_PF) 4409 qm_cmd_uninit(qm); 4410 4411 ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_FLR); 4412 if (ret) 4413 pci_err(pdev, "failed to stop vfs by pf in FLR.\n"); 4414 4415 ret = hisi_qm_stop(qm, QM_FLR); 4416 if (ret) { 4417 pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret); 4418 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 4419 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 4420 return; 4421 } 4422 4423 ret = qm_wait_vf_prepare_finish(qm); 4424 if (ret) 4425 pci_err(pdev, "failed to stop by vfs in FLR!\n"); 4426 4427 pci_info(pdev, "FLR resetting...\n"); 4428 } 4429 EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare); 4430 4431 static bool qm_flr_reset_complete(struct pci_dev *pdev) 4432 { 4433 struct pci_dev *pf_pdev = pci_physfn(pdev); 4434 struct hisi_qm *qm = pci_get_drvdata(pf_pdev); 4435 u32 id; 4436 4437 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id); 4438 if (id == QM_PCI_COMMAND_INVALID) { 4439 pci_err(pdev, "Device can not be used!\n"); 4440 return false; 4441 } 4442 4443 return true; 4444 } 4445 4446 void hisi_qm_reset_done(struct pci_dev *pdev) 4447 { 4448 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 4449 struct hisi_qm *qm = pci_get_drvdata(pdev); 4450 int ret; 4451 4452 if (qm->fun_type == QM_HW_PF) { 4453 ret = qm_dev_hw_init(qm); 4454 if (ret) { 4455 pci_err(pdev, "Failed to init PF, ret = %d.\n", ret); 4456 goto flr_done; 4457 } 4458 } 4459 4460 hisi_qm_dev_err_init(pf_qm); 4461 4462 ret = qm_restart(qm); 4463 if (ret) { 4464 pci_err(pdev, "Failed to start QM, ret = %d.\n", ret); 4465 goto flr_done; 4466 } 4467 4468 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE); 4469 if (ret) 4470 pci_err(pdev, "failed to start vfs by pf in FLR.\n"); 4471 4472 ret = qm_wait_vf_prepare_finish(qm); 4473 if (ret) 4474 pci_err(pdev, "failed to start by vfs in FLR!\n"); 4475 4476 flr_done: 4477 if (qm->fun_type == QM_HW_PF) 4478 qm_cmd_init(qm); 4479 4480 if (qm_flr_reset_complete(pdev)) 4481 pci_info(pdev, "FLR reset complete\n"); 4482 4483 qm_reset_bit_clear(qm); 4484 } 4485 EXPORT_SYMBOL_GPL(hisi_qm_reset_done); 4486 4487 static irqreturn_t qm_abnormal_irq(int irq, void *data) 4488 { 4489 struct hisi_qm *qm = data; 4490 enum acc_err_result ret; 4491 4492 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt); 4493 ret = qm_process_dev_error(qm); 4494 if (ret == ACC_ERR_NEED_RESET && 4495 !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) && 4496 !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl)) 4497 schedule_work(&qm->rst_work); 4498 4499 return IRQ_HANDLED; 4500 } 4501 4502 4503 /** 4504 * hisi_qm_dev_shutdown() - Shutdown device. 4505 * @pdev: The device will be shutdown. 4506 * 4507 * This function will stop qm when OS shutdown or rebooting. 4508 */ 4509 void hisi_qm_dev_shutdown(struct pci_dev *pdev) 4510 { 4511 struct hisi_qm *qm = pci_get_drvdata(pdev); 4512 int ret; 4513 4514 ret = hisi_qm_stop(qm, QM_NORMAL); 4515 if (ret) 4516 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n"); 4517 } 4518 EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown); 4519 4520 static void hisi_qm_controller_reset(struct work_struct *rst_work) 4521 { 4522 struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work); 4523 int ret; 4524 4525 ret = qm_pm_get_sync(qm); 4526 if (ret) { 4527 clear_bit(QM_RST_SCHED, &qm->misc_ctl); 4528 return; 4529 } 4530 4531 /* reset pcie device controller */ 4532 ret = qm_controller_reset(qm); 4533 if (ret) 4534 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret); 4535 4536 qm_pm_put_sync(qm); 4537 } 4538 4539 static void qm_pf_reset_vf_prepare(struct hisi_qm *qm, 4540 enum qm_stop_reason stop_reason) 4541 { 4542 enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE; 4543 struct pci_dev *pdev = qm->pdev; 4544 int ret; 4545 4546 ret = qm_reset_prepare_ready(qm); 4547 if (ret) { 4548 dev_err(&pdev->dev, "reset prepare not ready!\n"); 4549 atomic_set(&qm->status.flags, QM_STOP); 4550 cmd = QM_VF_PREPARE_FAIL; 4551 goto err_prepare; 4552 } 4553 4554 ret = hisi_qm_stop(qm, stop_reason); 4555 if (ret) { 4556 dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret); 4557 atomic_set(&qm->status.flags, QM_STOP); 4558 cmd = QM_VF_PREPARE_FAIL; 4559 goto err_prepare; 4560 } else { 4561 goto out; 4562 } 4563 4564 err_prepare: 4565 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 4566 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 4567 out: 4568 pci_save_state(pdev); 4569 ret = qm_ping_pf(qm, cmd); 4570 if (ret) 4571 dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n"); 4572 } 4573 4574 static void qm_pf_reset_vf_done(struct hisi_qm *qm) 4575 { 4576 enum qm_mb_cmd cmd = QM_VF_START_DONE; 4577 struct pci_dev *pdev = qm->pdev; 4578 int ret; 4579 4580 pci_restore_state(pdev); 4581 ret = hisi_qm_start(qm); 4582 if (ret) { 4583 dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret); 4584 cmd = QM_VF_START_FAIL; 4585 } 4586 4587 qm_cmd_init(qm); 4588 ret = qm_ping_pf(qm, cmd); 4589 if (ret) 4590 dev_warn(&pdev->dev, "PF responds timeout in reset done!\n"); 4591 4592 qm_reset_bit_clear(qm); 4593 } 4594 4595 static int qm_wait_pf_reset_finish(struct hisi_qm *qm) 4596 { 4597 struct device *dev = &qm->pdev->dev; 4598 u32 val, cmd; 4599 u64 msg; 4600 int ret; 4601 4602 /* Wait for reset to finish */ 4603 ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val, 4604 val == BIT(0), QM_VF_RESET_WAIT_US, 4605 QM_VF_RESET_WAIT_TIMEOUT_US); 4606 /* hardware completion status should be available by this time */ 4607 if (ret) { 4608 dev_err(dev, "couldn't get reset done status from PF, timeout!\n"); 4609 return -ETIMEDOUT; 4610 } 4611 4612 /* 4613 * Whether message is got successfully, 4614 * VF needs to ack PF by clearing the interrupt. 4615 */ 4616 ret = qm_get_mb_cmd(qm, &msg, 0); 4617 qm_clear_cmd_interrupt(qm, 0); 4618 if (ret) { 4619 dev_err(dev, "failed to get msg from PF in reset done!\n"); 4620 return ret; 4621 } 4622 4623 cmd = msg & QM_MB_CMD_DATA_MASK; 4624 if (cmd != QM_PF_RESET_DONE) { 4625 dev_err(dev, "the cmd(%u) is not reset done!\n", cmd); 4626 ret = -EINVAL; 4627 } 4628 4629 return ret; 4630 } 4631 4632 static void qm_pf_reset_vf_process(struct hisi_qm *qm, 4633 enum qm_stop_reason stop_reason) 4634 { 4635 struct device *dev = &qm->pdev->dev; 4636 int ret; 4637 4638 dev_info(dev, "device reset start...\n"); 4639 4640 /* The message is obtained by querying the register during resetting */ 4641 qm_cmd_uninit(qm); 4642 qm_pf_reset_vf_prepare(qm, stop_reason); 4643 4644 ret = qm_wait_pf_reset_finish(qm); 4645 if (ret) 4646 goto err_get_status; 4647 4648 qm_pf_reset_vf_done(qm); 4649 4650 dev_info(dev, "device reset done.\n"); 4651 4652 return; 4653 4654 err_get_status: 4655 qm_cmd_init(qm); 4656 qm_reset_bit_clear(qm); 4657 } 4658 4659 static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) 4660 { 4661 struct device *dev = &qm->pdev->dev; 4662 u64 msg; 4663 u32 cmd; 4664 int ret; 4665 4666 /* 4667 * Get the msg from source by sending mailbox. Whether message is got 4668 * successfully, destination needs to ack source by clearing the interrupt. 4669 */ 4670 ret = qm_get_mb_cmd(qm, &msg, fun_num); 4671 qm_clear_cmd_interrupt(qm, BIT(fun_num)); 4672 if (ret) { 4673 dev_err(dev, "failed to get msg from source!\n"); 4674 return; 4675 } 4676 4677 cmd = msg & QM_MB_CMD_DATA_MASK; 4678 switch (cmd) { 4679 case QM_PF_FLR_PREPARE: 4680 qm_pf_reset_vf_process(qm, QM_FLR); 4681 break; 4682 case QM_PF_SRST_PREPARE: 4683 qm_pf_reset_vf_process(qm, QM_SOFT_RESET); 4684 break; 4685 case QM_VF_GET_QOS: 4686 qm_vf_get_qos(qm, fun_num); 4687 break; 4688 case QM_PF_SET_QOS: 4689 qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT; 4690 break; 4691 default: 4692 dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num); 4693 break; 4694 } 4695 } 4696 4697 static void qm_cmd_process(struct work_struct *cmd_process) 4698 { 4699 struct hisi_qm *qm = container_of(cmd_process, 4700 struct hisi_qm, cmd_process); 4701 u32 vfs_num = qm->vfs_num; 4702 u64 val; 4703 u32 i; 4704 4705 if (qm->fun_type == QM_HW_PF) { 4706 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); 4707 if (!val) 4708 return; 4709 4710 for (i = 1; i <= vfs_num; i++) { 4711 if (val & BIT(i)) 4712 qm_handle_cmd_msg(qm, i); 4713 } 4714 4715 return; 4716 } 4717 4718 qm_handle_cmd_msg(qm, 0); 4719 } 4720 4721 /** 4722 * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list. 4723 * @qm: The qm needs add. 4724 * @qm_list: The qm list. 4725 * 4726 * This function adds qm to qm list, and will register algorithm to 4727 * crypto when the qm list is empty. 4728 */ 4729 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list) 4730 { 4731 struct device *dev = &qm->pdev->dev; 4732 int flag = 0; 4733 int ret = 0; 4734 4735 mutex_lock(&qm_list->lock); 4736 if (list_empty(&qm_list->list)) 4737 flag = 1; 4738 list_add_tail(&qm->list, &qm_list->list); 4739 mutex_unlock(&qm_list->lock); 4740 4741 if (qm->ver <= QM_HW_V2 && qm->use_sva) { 4742 dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n"); 4743 return 0; 4744 } 4745 4746 if (flag) { 4747 ret = qm_list->register_to_crypto(qm); 4748 if (ret) { 4749 mutex_lock(&qm_list->lock); 4750 list_del(&qm->list); 4751 mutex_unlock(&qm_list->lock); 4752 } 4753 } 4754 4755 return ret; 4756 } 4757 EXPORT_SYMBOL_GPL(hisi_qm_alg_register); 4758 4759 /** 4760 * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from 4761 * qm list. 4762 * @qm: The qm needs delete. 4763 * @qm_list: The qm list. 4764 * 4765 * This function deletes qm from qm list, and will unregister algorithm 4766 * from crypto when the qm list is empty. 4767 */ 4768 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list) 4769 { 4770 mutex_lock(&qm_list->lock); 4771 list_del(&qm->list); 4772 mutex_unlock(&qm_list->lock); 4773 4774 if (qm->ver <= QM_HW_V2 && qm->use_sva) 4775 return; 4776 4777 if (list_empty(&qm_list->list)) 4778 qm_list->unregister_from_crypto(qm); 4779 } 4780 EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister); 4781 4782 static void qm_unregister_abnormal_irq(struct hisi_qm *qm) 4783 { 4784 struct pci_dev *pdev = qm->pdev; 4785 u32 irq_vector, val; 4786 4787 if (qm->fun_type == QM_HW_VF) 4788 return; 4789 4790 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver); 4791 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) 4792 return; 4793 4794 irq_vector = val & QM_IRQ_VECTOR_MASK; 4795 free_irq(pci_irq_vector(pdev, irq_vector), qm); 4796 } 4797 4798 static int qm_register_abnormal_irq(struct hisi_qm *qm) 4799 { 4800 struct pci_dev *pdev = qm->pdev; 4801 u32 irq_vector, val; 4802 int ret; 4803 4804 if (qm->fun_type == QM_HW_VF) 4805 return 0; 4806 4807 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver); 4808 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) 4809 return 0; 4810 4811 irq_vector = val & QM_IRQ_VECTOR_MASK; 4812 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm); 4813 if (ret) 4814 dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret); 4815 4816 return ret; 4817 } 4818 4819 static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm) 4820 { 4821 struct pci_dev *pdev = qm->pdev; 4822 u32 irq_vector, val; 4823 4824 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver); 4825 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4826 return; 4827 4828 irq_vector = val & QM_IRQ_VECTOR_MASK; 4829 free_irq(pci_irq_vector(pdev, irq_vector), qm); 4830 } 4831 4832 static int qm_register_mb_cmd_irq(struct hisi_qm *qm) 4833 { 4834 struct pci_dev *pdev = qm->pdev; 4835 u32 irq_vector, val; 4836 int ret; 4837 4838 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver); 4839 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4840 return 0; 4841 4842 irq_vector = val & QM_IRQ_VECTOR_MASK; 4843 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm); 4844 if (ret) 4845 dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret); 4846 4847 return ret; 4848 } 4849 4850 static void qm_unregister_aeq_irq(struct hisi_qm *qm) 4851 { 4852 struct pci_dev *pdev = qm->pdev; 4853 u32 irq_vector, val; 4854 4855 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver); 4856 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4857 return; 4858 4859 irq_vector = val & QM_IRQ_VECTOR_MASK; 4860 free_irq(pci_irq_vector(pdev, irq_vector), qm); 4861 } 4862 4863 static int qm_register_aeq_irq(struct hisi_qm *qm) 4864 { 4865 struct pci_dev *pdev = qm->pdev; 4866 u32 irq_vector, val; 4867 int ret; 4868 4869 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver); 4870 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4871 return 0; 4872 4873 irq_vector = val & QM_IRQ_VECTOR_MASK; 4874 ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), qm_aeq_irq, 4875 qm_aeq_thread, 0, qm->dev_name, qm); 4876 if (ret) 4877 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); 4878 4879 return ret; 4880 } 4881 4882 static void qm_unregister_eq_irq(struct hisi_qm *qm) 4883 { 4884 struct pci_dev *pdev = qm->pdev; 4885 u32 irq_vector, val; 4886 4887 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver); 4888 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4889 return; 4890 4891 irq_vector = val & QM_IRQ_VECTOR_MASK; 4892 free_irq(pci_irq_vector(pdev, irq_vector), qm); 4893 } 4894 4895 static int qm_register_eq_irq(struct hisi_qm *qm) 4896 { 4897 struct pci_dev *pdev = qm->pdev; 4898 u32 irq_vector, val; 4899 int ret; 4900 4901 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver); 4902 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4903 return 0; 4904 4905 irq_vector = val & QM_IRQ_VECTOR_MASK; 4906 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_irq, 0, qm->dev_name, qm); 4907 if (ret) 4908 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); 4909 4910 return ret; 4911 } 4912 4913 static void qm_irqs_unregister(struct hisi_qm *qm) 4914 { 4915 qm_unregister_mb_cmd_irq(qm); 4916 qm_unregister_abnormal_irq(qm); 4917 qm_unregister_aeq_irq(qm); 4918 qm_unregister_eq_irq(qm); 4919 } 4920 4921 static int qm_irqs_register(struct hisi_qm *qm) 4922 { 4923 int ret; 4924 4925 ret = qm_register_eq_irq(qm); 4926 if (ret) 4927 return ret; 4928 4929 ret = qm_register_aeq_irq(qm); 4930 if (ret) 4931 goto free_eq_irq; 4932 4933 ret = qm_register_abnormal_irq(qm); 4934 if (ret) 4935 goto free_aeq_irq; 4936 4937 ret = qm_register_mb_cmd_irq(qm); 4938 if (ret) 4939 goto free_abnormal_irq; 4940 4941 return 0; 4942 4943 free_abnormal_irq: 4944 qm_unregister_abnormal_irq(qm); 4945 free_aeq_irq: 4946 qm_unregister_aeq_irq(qm); 4947 free_eq_irq: 4948 qm_unregister_eq_irq(qm); 4949 return ret; 4950 } 4951 4952 static int qm_get_qp_num(struct hisi_qm *qm) 4953 { 4954 bool is_db_isolation; 4955 4956 /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */ 4957 if (qm->fun_type == QM_HW_VF) { 4958 if (qm->ver != QM_HW_V1) 4959 /* v2 starts to support get vft by mailbox */ 4960 return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); 4961 4962 return 0; 4963 } 4964 4965 is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); 4966 qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true); 4967 qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, 4968 QM_FUNC_MAX_QP_CAP, is_db_isolation); 4969 4970 /* check if qp number is valid */ 4971 if (qm->qp_num > qm->max_qp_num) { 4972 dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n", 4973 qm->qp_num, qm->max_qp_num); 4974 return -EINVAL; 4975 } 4976 4977 return 0; 4978 } 4979 4980 static void qm_get_hw_caps(struct hisi_qm *qm) 4981 { 4982 const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ? 4983 qm_cap_info_pf : qm_cap_info_vf; 4984 u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) : 4985 ARRAY_SIZE(qm_cap_info_vf); 4986 u32 val, i; 4987 4988 /* Doorbell isolate register is a independent register. */ 4989 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true); 4990 if (val) 4991 set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); 4992 4993 if (qm->ver >= QM_HW_V3) { 4994 val = readl(qm->io_base + QM_FUNC_CAPS_REG); 4995 qm->cap_ver = val & QM_CAPBILITY_VERSION; 4996 } 4997 4998 /* Get PF/VF common capbility */ 4999 for (i = 1; i < ARRAY_SIZE(qm_cap_info_comm); i++) { 5000 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver); 5001 if (val) 5002 set_bit(qm_cap_info_comm[i].type, &qm->caps); 5003 } 5004 5005 /* Get PF/VF different capbility */ 5006 for (i = 0; i < size; i++) { 5007 val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver); 5008 if (val) 5009 set_bit(cap_info[i].type, &qm->caps); 5010 } 5011 } 5012 5013 static int qm_get_pci_res(struct hisi_qm *qm) 5014 { 5015 struct pci_dev *pdev = qm->pdev; 5016 struct device *dev = &pdev->dev; 5017 int ret; 5018 5019 ret = pci_request_mem_regions(pdev, qm->dev_name); 5020 if (ret < 0) { 5021 dev_err(dev, "Failed to request mem regions!\n"); 5022 return ret; 5023 } 5024 5025 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); 5026 qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2)); 5027 if (!qm->io_base) { 5028 ret = -EIO; 5029 goto err_request_mem_regions; 5030 } 5031 5032 qm_get_hw_caps(qm); 5033 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { 5034 qm->db_interval = QM_QP_DB_INTERVAL; 5035 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); 5036 qm->db_io_base = ioremap(qm->db_phys_base, 5037 pci_resource_len(pdev, PCI_BAR_4)); 5038 if (!qm->db_io_base) { 5039 ret = -EIO; 5040 goto err_ioremap; 5041 } 5042 } else { 5043 qm->db_phys_base = qm->phys_base; 5044 qm->db_io_base = qm->io_base; 5045 qm->db_interval = 0; 5046 } 5047 5048 ret = qm_get_qp_num(qm); 5049 if (ret) 5050 goto err_db_ioremap; 5051 5052 return 0; 5053 5054 err_db_ioremap: 5055 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 5056 iounmap(qm->db_io_base); 5057 err_ioremap: 5058 iounmap(qm->io_base); 5059 err_request_mem_regions: 5060 pci_release_mem_regions(pdev); 5061 return ret; 5062 } 5063 5064 static int hisi_qm_pci_init(struct hisi_qm *qm) 5065 { 5066 struct pci_dev *pdev = qm->pdev; 5067 struct device *dev = &pdev->dev; 5068 unsigned int num_vec; 5069 int ret; 5070 5071 ret = pci_enable_device_mem(pdev); 5072 if (ret < 0) { 5073 dev_err(dev, "Failed to enable device mem!\n"); 5074 return ret; 5075 } 5076 5077 ret = qm_get_pci_res(qm); 5078 if (ret) 5079 goto err_disable_pcidev; 5080 5081 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 5082 if (ret < 0) 5083 goto err_get_pci_res; 5084 pci_set_master(pdev); 5085 5086 num_vec = qm_get_irq_num(qm); 5087 ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); 5088 if (ret < 0) { 5089 dev_err(dev, "Failed to enable MSI vectors!\n"); 5090 goto err_get_pci_res; 5091 } 5092 5093 return 0; 5094 5095 err_get_pci_res: 5096 qm_put_pci_res(qm); 5097 err_disable_pcidev: 5098 pci_disable_device(pdev); 5099 return ret; 5100 } 5101 5102 static int hisi_qm_init_work(struct hisi_qm *qm) 5103 { 5104 int i; 5105 5106 for (i = 0; i < qm->qp_num; i++) 5107 INIT_WORK(&qm->poll_data[i].work, qm_work_process); 5108 5109 if (qm->fun_type == QM_HW_PF) 5110 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); 5111 5112 if (qm->ver > QM_HW_V2) 5113 INIT_WORK(&qm->cmd_process, qm_cmd_process); 5114 5115 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | 5116 WQ_UNBOUND, num_online_cpus(), 5117 pci_name(qm->pdev)); 5118 if (!qm->wq) { 5119 pci_err(qm->pdev, "failed to alloc workqueue!\n"); 5120 return -ENOMEM; 5121 } 5122 5123 return 0; 5124 } 5125 5126 static int hisi_qp_alloc_memory(struct hisi_qm *qm) 5127 { 5128 struct device *dev = &qm->pdev->dev; 5129 u16 sq_depth, cq_depth; 5130 size_t qp_dma_size; 5131 int i, ret; 5132 5133 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL); 5134 if (!qm->qp_array) 5135 return -ENOMEM; 5136 5137 qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL); 5138 if (!qm->poll_data) { 5139 kfree(qm->qp_array); 5140 return -ENOMEM; 5141 } 5142 5143 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); 5144 5145 /* one more page for device or qp statuses */ 5146 qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth; 5147 qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE; 5148 for (i = 0; i < qm->qp_num; i++) { 5149 qm->poll_data[i].qm = qm; 5150 ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth); 5151 if (ret) 5152 goto err_init_qp_mem; 5153 5154 dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size); 5155 } 5156 5157 return 0; 5158 err_init_qp_mem: 5159 hisi_qp_memory_uninit(qm, i); 5160 5161 return ret; 5162 } 5163 5164 static int hisi_qm_memory_init(struct hisi_qm *qm) 5165 { 5166 struct device *dev = &qm->pdev->dev; 5167 int ret, total_func; 5168 size_t off = 0; 5169 5170 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { 5171 total_func = pci_sriov_get_totalvfs(qm->pdev) + 1; 5172 qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL); 5173 if (!qm->factor) 5174 return -ENOMEM; 5175 5176 /* Only the PF value needs to be initialized */ 5177 qm->factor[0].func_qos = QM_QOS_MAX_VAL; 5178 } 5179 5180 #define QM_INIT_BUF(qm, type, num) do { \ 5181 (qm)->type = ((qm)->qdma.va + (off)); \ 5182 (qm)->type##_dma = (qm)->qdma.dma + (off); \ 5183 off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \ 5184 } while (0) 5185 5186 idr_init(&qm->qp_idr); 5187 qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP); 5188 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) + 5189 QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) + 5190 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + 5191 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); 5192 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, 5193 GFP_ATOMIC); 5194 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); 5195 if (!qm->qdma.va) { 5196 ret = -ENOMEM; 5197 goto err_destroy_idr; 5198 } 5199 5200 QM_INIT_BUF(qm, eqe, qm->eq_depth); 5201 QM_INIT_BUF(qm, aeqe, qm->aeq_depth); 5202 QM_INIT_BUF(qm, sqc, qm->qp_num); 5203 QM_INIT_BUF(qm, cqc, qm->qp_num); 5204 5205 ret = hisi_qp_alloc_memory(qm); 5206 if (ret) 5207 goto err_alloc_qp_array; 5208 5209 return 0; 5210 5211 err_alloc_qp_array: 5212 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); 5213 err_destroy_idr: 5214 idr_destroy(&qm->qp_idr); 5215 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 5216 kfree(qm->factor); 5217 5218 return ret; 5219 } 5220 5221 /** 5222 * hisi_qm_init() - Initialize configures about qm. 5223 * @qm: The qm needing init. 5224 * 5225 * This function init qm, then we can call hisi_qm_start to put qm into work. 5226 */ 5227 int hisi_qm_init(struct hisi_qm *qm) 5228 { 5229 struct pci_dev *pdev = qm->pdev; 5230 struct device *dev = &pdev->dev; 5231 int ret; 5232 5233 hisi_qm_pre_init(qm); 5234 5235 ret = hisi_qm_pci_init(qm); 5236 if (ret) 5237 return ret; 5238 5239 ret = qm_irqs_register(qm); 5240 if (ret) 5241 goto err_pci_init; 5242 5243 if (qm->fun_type == QM_HW_PF) { 5244 qm_disable_clock_gate(qm); 5245 ret = qm_dev_mem_reset(qm); 5246 if (ret) { 5247 dev_err(dev, "failed to reset device memory\n"); 5248 goto err_irq_register; 5249 } 5250 } 5251 5252 if (qm->mode == UACCE_MODE_SVA) { 5253 ret = qm_alloc_uacce(qm); 5254 if (ret < 0) 5255 dev_warn(dev, "fail to alloc uacce (%d)\n", ret); 5256 } 5257 5258 ret = hisi_qm_memory_init(qm); 5259 if (ret) 5260 goto err_alloc_uacce; 5261 5262 ret = hisi_qm_init_work(qm); 5263 if (ret) 5264 goto err_free_qm_memory; 5265 5266 qm_cmd_init(qm); 5267 atomic_set(&qm->status.flags, QM_INIT); 5268 5269 return 0; 5270 5271 err_free_qm_memory: 5272 hisi_qm_memory_uninit(qm); 5273 err_alloc_uacce: 5274 if (qm->use_sva) { 5275 uacce_remove(qm->uacce); 5276 qm->uacce = NULL; 5277 } 5278 err_irq_register: 5279 qm_irqs_unregister(qm); 5280 err_pci_init: 5281 hisi_qm_pci_uninit(qm); 5282 return ret; 5283 } 5284 EXPORT_SYMBOL_GPL(hisi_qm_init); 5285 5286 /** 5287 * hisi_qm_get_dfx_access() - Try to get dfx access. 5288 * @qm: pointer to accelerator device. 5289 * 5290 * Try to get dfx access, then user can get message. 5291 * 5292 * If device is in suspended, return failure, otherwise 5293 * bump up the runtime PM usage counter. 5294 */ 5295 int hisi_qm_get_dfx_access(struct hisi_qm *qm) 5296 { 5297 struct device *dev = &qm->pdev->dev; 5298 5299 if (pm_runtime_suspended(dev)) { 5300 dev_info(dev, "can not read/write - device in suspended.\n"); 5301 return -EAGAIN; 5302 } 5303 5304 return qm_pm_get_sync(qm); 5305 } 5306 EXPORT_SYMBOL_GPL(hisi_qm_get_dfx_access); 5307 5308 /** 5309 * hisi_qm_put_dfx_access() - Put dfx access. 5310 * @qm: pointer to accelerator device. 5311 * 5312 * Put dfx access, drop runtime PM usage counter. 5313 */ 5314 void hisi_qm_put_dfx_access(struct hisi_qm *qm) 5315 { 5316 qm_pm_put_sync(qm); 5317 } 5318 EXPORT_SYMBOL_GPL(hisi_qm_put_dfx_access); 5319 5320 /** 5321 * hisi_qm_pm_init() - Initialize qm runtime PM. 5322 * @qm: pointer to accelerator device. 5323 * 5324 * Function that initialize qm runtime PM. 5325 */ 5326 void hisi_qm_pm_init(struct hisi_qm *qm) 5327 { 5328 struct device *dev = &qm->pdev->dev; 5329 5330 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 5331 return; 5332 5333 pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY); 5334 pm_runtime_use_autosuspend(dev); 5335 pm_runtime_put_noidle(dev); 5336 } 5337 EXPORT_SYMBOL_GPL(hisi_qm_pm_init); 5338 5339 /** 5340 * hisi_qm_pm_uninit() - Uninitialize qm runtime PM. 5341 * @qm: pointer to accelerator device. 5342 * 5343 * Function that uninitialize qm runtime PM. 5344 */ 5345 void hisi_qm_pm_uninit(struct hisi_qm *qm) 5346 { 5347 struct device *dev = &qm->pdev->dev; 5348 5349 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 5350 return; 5351 5352 pm_runtime_get_noresume(dev); 5353 pm_runtime_dont_use_autosuspend(dev); 5354 } 5355 EXPORT_SYMBOL_GPL(hisi_qm_pm_uninit); 5356 5357 static int qm_prepare_for_suspend(struct hisi_qm *qm) 5358 { 5359 struct pci_dev *pdev = qm->pdev; 5360 int ret; 5361 u32 val; 5362 5363 ret = qm->ops->set_msi(qm, false); 5364 if (ret) { 5365 pci_err(pdev, "failed to disable MSI before suspending!\n"); 5366 return ret; 5367 } 5368 5369 /* shutdown OOO register */ 5370 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, 5371 qm->io_base + ACC_MASTER_GLOBAL_CTRL); 5372 5373 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, 5374 val, 5375 (val == ACC_MASTER_TRANS_RETURN_RW), 5376 POLL_PERIOD, POLL_TIMEOUT); 5377 if (ret) { 5378 pci_emerg(pdev, "Bus lock! Please reset system.\n"); 5379 return ret; 5380 } 5381 5382 ret = qm_set_pf_mse(qm, false); 5383 if (ret) 5384 pci_err(pdev, "failed to disable MSE before suspending!\n"); 5385 5386 return ret; 5387 } 5388 5389 static int qm_rebuild_for_resume(struct hisi_qm *qm) 5390 { 5391 struct pci_dev *pdev = qm->pdev; 5392 int ret; 5393 5394 ret = qm_set_pf_mse(qm, true); 5395 if (ret) { 5396 pci_err(pdev, "failed to enable MSE after resuming!\n"); 5397 return ret; 5398 } 5399 5400 ret = qm->ops->set_msi(qm, true); 5401 if (ret) { 5402 pci_err(pdev, "failed to enable MSI after resuming!\n"); 5403 return ret; 5404 } 5405 5406 ret = qm_dev_hw_init(qm); 5407 if (ret) { 5408 pci_err(pdev, "failed to init device after resuming\n"); 5409 return ret; 5410 } 5411 5412 qm_cmd_init(qm); 5413 hisi_qm_dev_err_init(qm); 5414 qm_disable_clock_gate(qm); 5415 ret = qm_dev_mem_reset(qm); 5416 if (ret) 5417 pci_err(pdev, "failed to reset device memory\n"); 5418 5419 return ret; 5420 } 5421 5422 /** 5423 * hisi_qm_suspend() - Runtime suspend of given device. 5424 * @dev: device to suspend. 5425 * 5426 * Function that suspend the device. 5427 */ 5428 int hisi_qm_suspend(struct device *dev) 5429 { 5430 struct pci_dev *pdev = to_pci_dev(dev); 5431 struct hisi_qm *qm = pci_get_drvdata(pdev); 5432 int ret; 5433 5434 pci_info(pdev, "entering suspended state\n"); 5435 5436 ret = hisi_qm_stop(qm, QM_NORMAL); 5437 if (ret) { 5438 pci_err(pdev, "failed to stop qm(%d)\n", ret); 5439 return ret; 5440 } 5441 5442 ret = qm_prepare_for_suspend(qm); 5443 if (ret) 5444 pci_err(pdev, "failed to prepare suspended(%d)\n", ret); 5445 5446 return ret; 5447 } 5448 EXPORT_SYMBOL_GPL(hisi_qm_suspend); 5449 5450 /** 5451 * hisi_qm_resume() - Runtime resume of given device. 5452 * @dev: device to resume. 5453 * 5454 * Function that resume the device. 5455 */ 5456 int hisi_qm_resume(struct device *dev) 5457 { 5458 struct pci_dev *pdev = to_pci_dev(dev); 5459 struct hisi_qm *qm = pci_get_drvdata(pdev); 5460 int ret; 5461 5462 pci_info(pdev, "resuming from suspend state\n"); 5463 5464 ret = qm_rebuild_for_resume(qm); 5465 if (ret) { 5466 pci_err(pdev, "failed to rebuild resume(%d)\n", ret); 5467 return ret; 5468 } 5469 5470 ret = hisi_qm_start(qm); 5471 if (ret) { 5472 if (qm_check_dev_error(qm)) { 5473 pci_info(pdev, "failed to start qm due to device error, device will be reset!\n"); 5474 return 0; 5475 } 5476 5477 pci_err(pdev, "failed to start qm(%d)!\n", ret); 5478 } 5479 5480 return ret; 5481 } 5482 EXPORT_SYMBOL_GPL(hisi_qm_resume); 5483 5484 MODULE_LICENSE("GPL v2"); 5485 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); 5486 MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver"); 5487