1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Copyright (c) 2018-2019 Hisilicon Limited. */ 3 4 #include <linux/device.h> 5 6 #include "hclge_debugfs.h" 7 #include "hclge_main.h" 8 #include "hclge_tm.h" 9 #include "hnae3.h" 10 11 static struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = { 12 { .reg_type = "bios common", 13 .dfx_msg = &hclge_dbg_bios_common_reg[0], 14 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg), 15 .offset = HCLGE_DBG_DFX_BIOS_OFFSET, 16 .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } }, 17 { .reg_type = "ssu", 18 .dfx_msg = &hclge_dbg_ssu_reg_0[0], 19 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0), 20 .offset = HCLGE_DBG_DFX_SSU_0_OFFSET, 21 .cmd = HCLGE_OPC_DFX_SSU_REG_0 } }, 22 { .reg_type = "ssu", 23 .dfx_msg = &hclge_dbg_ssu_reg_1[0], 24 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1), 25 .offset = HCLGE_DBG_DFX_SSU_1_OFFSET, 26 .cmd = HCLGE_OPC_DFX_SSU_REG_1 } }, 27 { .reg_type = "ssu", 28 .dfx_msg = &hclge_dbg_ssu_reg_2[0], 29 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2), 30 .offset = HCLGE_DBG_DFX_SSU_2_OFFSET, 31 .cmd = HCLGE_OPC_DFX_SSU_REG_2 } }, 32 { .reg_type = "igu egu", 33 .dfx_msg = &hclge_dbg_igu_egu_reg[0], 34 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg), 35 .offset = HCLGE_DBG_DFX_IGU_OFFSET, 36 .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } }, 37 { .reg_type = "rpu", 38 .dfx_msg = &hclge_dbg_rpu_reg_0[0], 39 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0), 40 .offset = HCLGE_DBG_DFX_RPU_0_OFFSET, 41 .cmd = HCLGE_OPC_DFX_RPU_REG_0 } }, 42 { .reg_type = "rpu", 43 .dfx_msg = &hclge_dbg_rpu_reg_1[0], 44 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1), 45 .offset = HCLGE_DBG_DFX_RPU_1_OFFSET, 46 .cmd = HCLGE_OPC_DFX_RPU_REG_1 } }, 47 { .reg_type = "ncsi", 48 .dfx_msg = &hclge_dbg_ncsi_reg[0], 49 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg), 50 .offset = HCLGE_DBG_DFX_NCSI_OFFSET, 51 .cmd = HCLGE_OPC_DFX_NCSI_REG } }, 52 { .reg_type = "rtc", 53 .dfx_msg = &hclge_dbg_rtc_reg[0], 54 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg), 55 .offset = HCLGE_DBG_DFX_RTC_OFFSET, 56 .cmd = HCLGE_OPC_DFX_RTC_REG } }, 57 { .reg_type = "ppp", 58 .dfx_msg = &hclge_dbg_ppp_reg[0], 59 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg), 60 .offset = HCLGE_DBG_DFX_PPP_OFFSET, 61 .cmd = HCLGE_OPC_DFX_PPP_REG } }, 62 { .reg_type = "rcb", 63 .dfx_msg = &hclge_dbg_rcb_reg[0], 64 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg), 65 .offset = HCLGE_DBG_DFX_RCB_OFFSET, 66 .cmd = HCLGE_OPC_DFX_RCB_REG } }, 67 { .reg_type = "tqp", 68 .dfx_msg = &hclge_dbg_tqp_reg[0], 69 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg), 70 .offset = HCLGE_DBG_DFX_TQP_OFFSET, 71 .cmd = HCLGE_OPC_DFX_TQP_REG } }, 72 }; 73 74 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset) 75 { 76 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT]; 77 int entries_per_desc; 78 int index; 79 int ret; 80 81 ret = hclge_query_bd_num_cmd_send(hdev, desc); 82 if (ret) { 83 dev_err(&hdev->pdev->dev, 84 "get dfx bdnum fail, ret = %d\n", ret); 85 return ret; 86 } 87 88 entries_per_desc = ARRAY_SIZE(desc[0].data); 89 index = offset % entries_per_desc; 90 return (int)desc[offset / entries_per_desc].data[index]; 91 } 92 93 static int hclge_dbg_cmd_send(struct hclge_dev *hdev, 94 struct hclge_desc *desc_src, 95 int index, int bd_num, 96 enum hclge_opcode_type cmd) 97 { 98 struct hclge_desc *desc = desc_src; 99 int ret, i; 100 101 hclge_cmd_setup_basic_desc(desc, cmd, true); 102 desc->data[0] = cpu_to_le32(index); 103 104 for (i = 1; i < bd_num; i++) { 105 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 106 desc++; 107 hclge_cmd_setup_basic_desc(desc, cmd, true); 108 } 109 110 ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num); 111 if (ret) 112 dev_err(&hdev->pdev->dev, 113 "cmd(0x%x) send fail, ret = %d\n", cmd, ret); 114 return ret; 115 } 116 117 static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, 118 struct hclge_dbg_reg_type_info *reg_info, 119 const char *cmd_buf) 120 { 121 #define IDX_OFFSET 1 122 123 const char *s = &cmd_buf[strlen(reg_info->reg_type) + IDX_OFFSET]; 124 struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg; 125 struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg; 126 struct hclge_desc *desc_src; 127 struct hclge_desc *desc; 128 int entries_per_desc; 129 int bd_num, buf_len; 130 int index = 0; 131 int min_num; 132 int ret, i; 133 134 if (*s) { 135 ret = kstrtouint(s, 0, &index); 136 index = (ret != 0) ? 0 : index; 137 } 138 139 bd_num = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset); 140 if (bd_num <= 0) { 141 dev_err(&hdev->pdev->dev, "get cmd(%d) bd num(%d) failed\n", 142 reg_msg->offset, bd_num); 143 return; 144 } 145 146 buf_len = sizeof(struct hclge_desc) * bd_num; 147 desc_src = kzalloc(buf_len, GFP_KERNEL); 148 if (!desc_src) { 149 dev_err(&hdev->pdev->dev, "call kzalloc failed\n"); 150 return; 151 } 152 153 desc = desc_src; 154 ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd); 155 if (ret) { 156 kfree(desc_src); 157 return; 158 } 159 160 entries_per_desc = ARRAY_SIZE(desc->data); 161 min_num = min_t(int, bd_num * entries_per_desc, reg_msg->msg_num); 162 163 desc = desc_src; 164 for (i = 0; i < min_num; i++) { 165 if (i > 0 && (i % entries_per_desc) == 0) 166 desc++; 167 if (dfx_message->flag) 168 dev_info(&hdev->pdev->dev, "%s: 0x%x\n", 169 dfx_message->message, 170 le32_to_cpu(desc->data[i % entries_per_desc])); 171 172 dfx_message++; 173 } 174 175 kfree(desc_src); 176 } 177 178 static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf) 179 { 180 struct device *dev = &hdev->pdev->dev; 181 struct hclge_dbg_bitmap_cmd *bitmap; 182 int rq_id, pri_id, qset_id; 183 int port_id, nq_id, pg_id; 184 struct hclge_desc desc[2]; 185 186 int cnt, ret; 187 188 cnt = sscanf(cmd_buf, "%i %i %i %i %i %i", 189 &port_id, &pri_id, &pg_id, &rq_id, &nq_id, &qset_id); 190 if (cnt != 6) { 191 dev_err(&hdev->pdev->dev, 192 "dump dcb: bad command parameter, cnt=%d\n", cnt); 193 return; 194 } 195 196 ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1, 197 HCLGE_OPC_QSET_DFX_STS); 198 if (ret) 199 return; 200 201 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; 202 dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0); 203 dev_info(dev, "nic_qs_mask: 0x%x\n", bitmap->bit1); 204 dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2); 205 dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3); 206 207 ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, HCLGE_OPC_PRI_DFX_STS); 208 if (ret) 209 return; 210 211 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; 212 dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0); 213 dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1); 214 dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2); 215 216 ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, HCLGE_OPC_PG_DFX_STS); 217 if (ret) 218 return; 219 220 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; 221 dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0); 222 dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1); 223 dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2); 224 225 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, 226 HCLGE_OPC_PORT_DFX_STS); 227 if (ret) 228 return; 229 230 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; 231 dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0); 232 dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1); 233 234 ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_NQ_CNT); 235 if (ret) 236 return; 237 238 dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1])); 239 240 ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT); 241 if (ret) 242 return; 243 244 dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1])); 245 246 ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS); 247 if (ret) 248 return; 249 250 dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1])); 251 dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2])); 252 dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n", 253 le32_to_cpu(desc[0].data[3])); 254 dev_info(dev, "tx_private_waterline: 0x%x\n", 255 le32_to_cpu(desc[0].data[4])); 256 dev_info(dev, "tm_bypass_en: 0x%x\n", le32_to_cpu(desc[0].data[5])); 257 dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0])); 258 dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1])); 259 260 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, 261 HCLGE_OPC_TM_INTERNAL_CNT); 262 if (ret) 263 return; 264 265 dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1])); 266 dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2])); 267 268 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, 269 HCLGE_OPC_TM_INTERNAL_STS_1); 270 if (ret) 271 return; 272 273 dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1])); 274 dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2])); 275 dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[3])); 276 dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n", 277 le32_to_cpu(desc[0].data[4])); 278 dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", 279 le32_to_cpu(desc[0].data[5])); 280 } 281 282 static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf) 283 { 284 struct hclge_dbg_reg_type_info *reg_info; 285 bool has_dump = false; 286 int i; 287 288 for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) { 289 reg_info = &hclge_dbg_reg_info[i]; 290 if (!strncmp(cmd_buf, reg_info->reg_type, 291 strlen(reg_info->reg_type))) { 292 hclge_dbg_dump_reg_common(hdev, reg_info, cmd_buf); 293 has_dump = true; 294 } 295 } 296 297 if (strncmp(cmd_buf, "dcb", 3) == 0) { 298 hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]); 299 has_dump = true; 300 } 301 302 if (!has_dump) { 303 dev_info(&hdev->pdev->dev, "unknown command\n"); 304 return; 305 } 306 } 307 308 static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index, 309 char *title_buf, char *true_buf, 310 char *false_buf) 311 { 312 if (flag) 313 dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index, 314 true_buf); 315 else 316 dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index, 317 false_buf); 318 } 319 320 static void hclge_dbg_dump_tc(struct hclge_dev *hdev) 321 { 322 struct hclge_ets_tc_weight_cmd *ets_weight; 323 struct hclge_desc desc; 324 int i, ret; 325 326 if (!hnae3_dev_dcb_supported(hdev)) { 327 dev_info(&hdev->pdev->dev, 328 "Only DCB-supported dev supports tc\n"); 329 return; 330 } 331 332 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true); 333 334 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 335 if (ret) { 336 dev_err(&hdev->pdev->dev, "dump tc fail, ret = %d\n", ret); 337 return; 338 } 339 340 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; 341 342 dev_info(&hdev->pdev->dev, "dump tc\n"); 343 dev_info(&hdev->pdev->dev, "weight_offset: %u\n", 344 ets_weight->weight_offset); 345 346 for (i = 0; i < HNAE3_MAX_TC; i++) 347 hclge_title_idx_print(hdev, ets_weight->tc_weight[i], i, 348 "tc", "no sp mode", "sp mode"); 349 } 350 351 static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) 352 { 353 struct hclge_port_shapping_cmd *port_shap_cfg_cmd; 354 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; 355 struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd; 356 enum hclge_opcode_type cmd; 357 struct hclge_desc desc; 358 int ret; 359 360 cmd = HCLGE_OPC_TM_PG_C_SHAPPING; 361 hclge_cmd_setup_basic_desc(&desc, cmd, true); 362 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 363 if (ret) 364 goto err_tm_pg_cmd_send; 365 366 pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; 367 dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id); 368 dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n", 369 le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para)); 370 371 cmd = HCLGE_OPC_TM_PG_P_SHAPPING; 372 hclge_cmd_setup_basic_desc(&desc, cmd, true); 373 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 374 if (ret) 375 goto err_tm_pg_cmd_send; 376 377 pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; 378 dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id); 379 dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n", 380 le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para)); 381 382 cmd = HCLGE_OPC_TM_PORT_SHAPPING; 383 hclge_cmd_setup_basic_desc(&desc, cmd, true); 384 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 385 if (ret) 386 goto err_tm_pg_cmd_send; 387 388 port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; 389 dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n", 390 le32_to_cpu(port_shap_cfg_cmd->port_shapping_para)); 391 392 cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG; 393 hclge_cmd_setup_basic_desc(&desc, cmd, true); 394 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 395 if (ret) 396 goto err_tm_pg_cmd_send; 397 398 dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", 399 le32_to_cpu(desc.data[0])); 400 401 cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG; 402 hclge_cmd_setup_basic_desc(&desc, cmd, true); 403 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 404 if (ret) 405 goto err_tm_pg_cmd_send; 406 407 dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n", 408 le32_to_cpu(desc.data[0])); 409 410 cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG; 411 hclge_cmd_setup_basic_desc(&desc, cmd, true); 412 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 413 if (ret) 414 goto err_tm_pg_cmd_send; 415 416 dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", 417 le32_to_cpu(desc.data[0])); 418 419 if (!hnae3_dev_dcb_supported(hdev)) { 420 dev_info(&hdev->pdev->dev, 421 "Only DCB-supported dev supports tm mapping\n"); 422 return; 423 } 424 425 cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; 426 hclge_cmd_setup_basic_desc(&desc, cmd, true); 427 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 428 if (ret) 429 goto err_tm_pg_cmd_send; 430 431 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; 432 dev_info(&hdev->pdev->dev, "BP_TO_QSET tc_id: %u\n", 433 bp_to_qs_map_cmd->tc_id); 434 dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n", 435 bp_to_qs_map_cmd->qs_group_id); 436 dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n", 437 le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map)); 438 return; 439 440 err_tm_pg_cmd_send: 441 dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), ret = %d\n", 442 cmd, ret); 443 } 444 445 static void hclge_dbg_dump_tm(struct hclge_dev *hdev) 446 { 447 struct hclge_priority_weight_cmd *priority_weight; 448 struct hclge_pg_to_pri_link_cmd *pg_to_pri_map; 449 struct hclge_qs_to_pri_link_cmd *qs_to_pri_map; 450 struct hclge_nq_to_qs_link_cmd *nq_to_qs_map; 451 struct hclge_pri_shapping_cmd *shap_cfg_cmd; 452 struct hclge_pg_weight_cmd *pg_weight; 453 struct hclge_qs_weight_cmd *qs_weight; 454 enum hclge_opcode_type cmd; 455 struct hclge_desc desc; 456 int ret; 457 458 cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK; 459 hclge_cmd_setup_basic_desc(&desc, cmd, true); 460 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 461 if (ret) 462 goto err_tm_cmd_send; 463 464 pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data; 465 dev_info(&hdev->pdev->dev, "dump tm\n"); 466 dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n", 467 pg_to_pri_map->pg_id); 468 dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n", 469 pg_to_pri_map->pri_bit_map); 470 471 cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK; 472 hclge_cmd_setup_basic_desc(&desc, cmd, true); 473 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 474 if (ret) 475 goto err_tm_cmd_send; 476 477 qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 478 dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n", 479 le16_to_cpu(qs_to_pri_map->qs_id)); 480 dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n", 481 qs_to_pri_map->priority); 482 dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n", 483 qs_to_pri_map->link_vld); 484 485 cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK; 486 hclge_cmd_setup_basic_desc(&desc, cmd, true); 487 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 488 if (ret) 489 goto err_tm_cmd_send; 490 491 nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data; 492 dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", 493 le16_to_cpu(nq_to_qs_map->nq_id)); 494 dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n", 495 le16_to_cpu(nq_to_qs_map->qset_id)); 496 497 cmd = HCLGE_OPC_TM_PG_WEIGHT; 498 hclge_cmd_setup_basic_desc(&desc, cmd, true); 499 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 500 if (ret) 501 goto err_tm_cmd_send; 502 503 pg_weight = (struct hclge_pg_weight_cmd *)desc.data; 504 dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id); 505 dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr); 506 507 cmd = HCLGE_OPC_TM_QS_WEIGHT; 508 hclge_cmd_setup_basic_desc(&desc, cmd, true); 509 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 510 if (ret) 511 goto err_tm_cmd_send; 512 513 qs_weight = (struct hclge_qs_weight_cmd *)desc.data; 514 dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", 515 le16_to_cpu(qs_weight->qs_id)); 516 dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr); 517 518 cmd = HCLGE_OPC_TM_PRI_WEIGHT; 519 hclge_cmd_setup_basic_desc(&desc, cmd, true); 520 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 521 if (ret) 522 goto err_tm_cmd_send; 523 524 priority_weight = (struct hclge_priority_weight_cmd *)desc.data; 525 dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id); 526 dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr); 527 528 cmd = HCLGE_OPC_TM_PRI_C_SHAPPING; 529 hclge_cmd_setup_basic_desc(&desc, cmd, true); 530 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 531 if (ret) 532 goto err_tm_cmd_send; 533 534 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 535 dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id); 536 dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n", 537 le32_to_cpu(shap_cfg_cmd->pri_shapping_para)); 538 539 cmd = HCLGE_OPC_TM_PRI_P_SHAPPING; 540 hclge_cmd_setup_basic_desc(&desc, cmd, true); 541 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 542 if (ret) 543 goto err_tm_cmd_send; 544 545 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 546 dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id); 547 dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n", 548 le32_to_cpu(shap_cfg_cmd->pri_shapping_para)); 549 550 hclge_dbg_dump_tm_pg(hdev); 551 552 return; 553 554 err_tm_cmd_send: 555 dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), ret = %d\n", 556 cmd, ret); 557 } 558 559 static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, 560 const char *cmd_buf) 561 { 562 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; 563 struct hclge_nq_to_qs_link_cmd *nq_to_qs_map; 564 struct hclge_qs_to_pri_link_cmd *map; 565 struct hclge_tqp_tx_queue_tc_cmd *tc; 566 enum hclge_opcode_type cmd; 567 struct hclge_desc desc; 568 int queue_id, group_id; 569 u32 qset_maping[32]; 570 int tc_id, qset_id; 571 int pri_id, ret; 572 u32 i; 573 574 ret = kstrtouint(cmd_buf, 0, &queue_id); 575 queue_id = (ret != 0) ? 0 : queue_id; 576 577 cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK; 578 nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data; 579 hclge_cmd_setup_basic_desc(&desc, cmd, true); 580 nq_to_qs_map->nq_id = cpu_to_le16(queue_id); 581 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 582 if (ret) 583 goto err_tm_map_cmd_send; 584 qset_id = nq_to_qs_map->qset_id & 0x3FF; 585 586 cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK; 587 map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 588 hclge_cmd_setup_basic_desc(&desc, cmd, true); 589 map->qs_id = cpu_to_le16(qset_id); 590 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 591 if (ret) 592 goto err_tm_map_cmd_send; 593 pri_id = map->priority; 594 595 cmd = HCLGE_OPC_TQP_TX_QUEUE_TC; 596 tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data; 597 hclge_cmd_setup_basic_desc(&desc, cmd, true); 598 tc->queue_id = cpu_to_le16(queue_id); 599 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 600 if (ret) 601 goto err_tm_map_cmd_send; 602 tc_id = tc->tc_id & 0x7; 603 604 dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n"); 605 dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n", 606 queue_id, qset_id, pri_id, tc_id); 607 608 if (!hnae3_dev_dcb_supported(hdev)) { 609 dev_info(&hdev->pdev->dev, 610 "Only DCB-supported dev supports tm mapping\n"); 611 return; 612 } 613 614 cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; 615 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; 616 for (group_id = 0; group_id < 32; group_id++) { 617 hclge_cmd_setup_basic_desc(&desc, cmd, true); 618 bp_to_qs_map_cmd->tc_id = tc_id; 619 bp_to_qs_map_cmd->qs_group_id = group_id; 620 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 621 if (ret) 622 goto err_tm_map_cmd_send; 623 624 qset_maping[group_id] = bp_to_qs_map_cmd->qs_bit_map; 625 } 626 627 dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n"); 628 629 i = 0; 630 for (group_id = 0; group_id < 4; group_id++) { 631 dev_info(&hdev->pdev->dev, 632 "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n", 633 group_id * 256, qset_maping[(u32)(i + 7)], 634 qset_maping[(u32)(i + 6)], qset_maping[(u32)(i + 5)], 635 qset_maping[(u32)(i + 4)], qset_maping[(u32)(i + 3)], 636 qset_maping[(u32)(i + 2)], qset_maping[(u32)(i + 1)], 637 qset_maping[i]); 638 i += 8; 639 } 640 641 return; 642 643 err_tm_map_cmd_send: 644 dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), ret = %d\n", 645 cmd, ret); 646 } 647 648 static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev) 649 { 650 struct hclge_cfg_pause_param_cmd *pause_param; 651 struct hclge_desc desc; 652 int ret; 653 654 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); 655 656 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 657 if (ret) { 658 dev_err(&hdev->pdev->dev, "dump checksum fail, ret = %d\n", 659 ret); 660 return; 661 } 662 663 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; 664 dev_info(&hdev->pdev->dev, "dump qos pause cfg\n"); 665 dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n", 666 pause_param->pause_trans_gap); 667 dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n", 668 le16_to_cpu(pause_param->pause_trans_time)); 669 } 670 671 static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev) 672 { 673 struct hclge_qos_pri_map_cmd *pri_map; 674 struct hclge_desc desc; 675 int ret; 676 677 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true); 678 679 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 680 if (ret) { 681 dev_err(&hdev->pdev->dev, 682 "dump qos pri map fail, ret = %d\n", ret); 683 return; 684 } 685 686 pri_map = (struct hclge_qos_pri_map_cmd *)desc.data; 687 dev_info(&hdev->pdev->dev, "dump qos pri map\n"); 688 dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri); 689 dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc); 690 dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc); 691 dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc); 692 dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc); 693 dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc); 694 dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc); 695 dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc); 696 dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc); 697 } 698 699 static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) 700 { 701 struct hclge_tx_buff_alloc_cmd *tx_buf_cmd; 702 struct hclge_rx_priv_buff_cmd *rx_buf_cmd; 703 struct hclge_rx_priv_wl_buf *rx_priv_wl; 704 struct hclge_rx_com_wl *rx_packet_cnt; 705 struct hclge_rx_com_thrd *rx_com_thrd; 706 struct hclge_rx_com_wl *rx_com_wl; 707 enum hclge_opcode_type cmd; 708 struct hclge_desc desc[2]; 709 int i, ret; 710 711 cmd = HCLGE_OPC_TX_BUFF_ALLOC; 712 hclge_cmd_setup_basic_desc(desc, cmd, true); 713 ret = hclge_cmd_send(&hdev->hw, desc, 1); 714 if (ret) 715 goto err_qos_cmd_send; 716 717 dev_info(&hdev->pdev->dev, "dump qos buf cfg\n"); 718 719 tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data; 720 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 721 dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i, 722 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i])); 723 724 cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC; 725 hclge_cmd_setup_basic_desc(desc, cmd, true); 726 ret = hclge_cmd_send(&hdev->hw, desc, 1); 727 if (ret) 728 goto err_qos_cmd_send; 729 730 dev_info(&hdev->pdev->dev, "\n"); 731 rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data; 732 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 733 dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i, 734 le16_to_cpu(rx_buf_cmd->buf_num[i])); 735 736 dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n", 737 le16_to_cpu(rx_buf_cmd->shared_buf)); 738 739 cmd = HCLGE_OPC_RX_COM_WL_ALLOC; 740 hclge_cmd_setup_basic_desc(desc, cmd, true); 741 ret = hclge_cmd_send(&hdev->hw, desc, 1); 742 if (ret) 743 goto err_qos_cmd_send; 744 745 rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data; 746 dev_info(&hdev->pdev->dev, "\n"); 747 dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n", 748 le16_to_cpu(rx_com_wl->com_wl.high), 749 le16_to_cpu(rx_com_wl->com_wl.low)); 750 751 cmd = HCLGE_OPC_RX_GBL_PKT_CNT; 752 hclge_cmd_setup_basic_desc(desc, cmd, true); 753 ret = hclge_cmd_send(&hdev->hw, desc, 1); 754 if (ret) 755 goto err_qos_cmd_send; 756 757 rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data; 758 dev_info(&hdev->pdev->dev, 759 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n", 760 le16_to_cpu(rx_packet_cnt->com_wl.high), 761 le16_to_cpu(rx_packet_cnt->com_wl.low)); 762 dev_info(&hdev->pdev->dev, "\n"); 763 764 if (!hnae3_dev_dcb_supported(hdev)) { 765 dev_info(&hdev->pdev->dev, 766 "Only DCB-supported dev supports rx priv wl\n"); 767 return; 768 } 769 cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC; 770 hclge_cmd_setup_basic_desc(&desc[0], cmd, true); 771 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 772 hclge_cmd_setup_basic_desc(&desc[1], cmd, true); 773 ret = hclge_cmd_send(&hdev->hw, desc, 2); 774 if (ret) 775 goto err_qos_cmd_send; 776 777 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data; 778 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 779 dev_info(&hdev->pdev->dev, 780 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i, 781 le16_to_cpu(rx_priv_wl->tc_wl[i].high), 782 le16_to_cpu(rx_priv_wl->tc_wl[i].low)); 783 784 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data; 785 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 786 dev_info(&hdev->pdev->dev, 787 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", 788 i + HCLGE_TC_NUM_ONE_DESC, 789 le16_to_cpu(rx_priv_wl->tc_wl[i].high), 790 le16_to_cpu(rx_priv_wl->tc_wl[i].low)); 791 792 cmd = HCLGE_OPC_RX_COM_THRD_ALLOC; 793 hclge_cmd_setup_basic_desc(&desc[0], cmd, true); 794 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 795 hclge_cmd_setup_basic_desc(&desc[1], cmd, true); 796 ret = hclge_cmd_send(&hdev->hw, desc, 2); 797 if (ret) 798 goto err_qos_cmd_send; 799 800 dev_info(&hdev->pdev->dev, "\n"); 801 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data; 802 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 803 dev_info(&hdev->pdev->dev, 804 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i, 805 le16_to_cpu(rx_com_thrd->com_thrd[i].high), 806 le16_to_cpu(rx_com_thrd->com_thrd[i].low)); 807 808 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data; 809 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 810 dev_info(&hdev->pdev->dev, 811 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", 812 i + HCLGE_TC_NUM_ONE_DESC, 813 le16_to_cpu(rx_com_thrd->com_thrd[i].high), 814 le16_to_cpu(rx_com_thrd->com_thrd[i].low)); 815 return; 816 817 err_qos_cmd_send: 818 dev_err(&hdev->pdev->dev, 819 "dump qos buf cfg fail(0x%x), ret = %d\n", cmd, ret); 820 } 821 822 static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev) 823 { 824 struct hclge_mac_ethertype_idx_rd_cmd *req0; 825 char printf_buf[HCLGE_DBG_BUF_LEN]; 826 struct hclge_desc desc; 827 int ret, i; 828 829 dev_info(&hdev->pdev->dev, "mng tab:\n"); 830 memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); 831 strncat(printf_buf, 832 "entry|mac_addr |mask|ether|mask|vlan|mask", 833 HCLGE_DBG_BUF_LEN - 1); 834 strncat(printf_buf + strlen(printf_buf), 835 "|i_map|i_dir|e_type|pf_id|vf_id|q_id|drop\n", 836 HCLGE_DBG_BUF_LEN - strlen(printf_buf) - 1); 837 838 dev_info(&hdev->pdev->dev, "%s", printf_buf); 839 840 for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) { 841 hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD, 842 true); 843 req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data; 844 req0->index = cpu_to_le16(i); 845 846 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 847 if (ret) { 848 dev_err(&hdev->pdev->dev, 849 "call hclge_cmd_send fail, ret = %d\n", ret); 850 return; 851 } 852 853 if (!req0->resp_code) 854 continue; 855 856 memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); 857 snprintf(printf_buf, HCLGE_DBG_BUF_LEN, 858 "%02u |%02x:%02x:%02x:%02x:%02x:%02x|", 859 le16_to_cpu(req0->index), 860 req0->mac_addr[0], req0->mac_addr[1], 861 req0->mac_addr[2], req0->mac_addr[3], 862 req0->mac_addr[4], req0->mac_addr[5]); 863 864 snprintf(printf_buf + strlen(printf_buf), 865 HCLGE_DBG_BUF_LEN - strlen(printf_buf), 866 "%x |%04x |%x |%04x|%x |%02x |%02x |", 867 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B), 868 req0->ethter_type, 869 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B), 870 req0->vlan_tag & HCLGE_DBG_MNG_VLAN_TAG, 871 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B), 872 req0->i_port_bitmap, req0->i_port_direction); 873 874 snprintf(printf_buf + strlen(printf_buf), 875 HCLGE_DBG_BUF_LEN - strlen(printf_buf), 876 "%d |%d |%02d |%04d|%x\n", 877 !!(req0->egress_port & HCLGE_DBG_MNG_E_TYPE_B), 878 req0->egress_port & HCLGE_DBG_MNG_PF_ID, 879 (req0->egress_port >> 3) & HCLGE_DBG_MNG_VF_ID, 880 req0->egress_queue, 881 !!(req0->egress_port & HCLGE_DBG_MNG_DROP_B)); 882 883 dev_info(&hdev->pdev->dev, "%s", printf_buf); 884 } 885 } 886 887 static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage, 888 bool sel_x, u32 loc) 889 { 890 struct hclge_fd_tcam_config_1_cmd *req1; 891 struct hclge_fd_tcam_config_2_cmd *req2; 892 struct hclge_fd_tcam_config_3_cmd *req3; 893 struct hclge_desc desc[3]; 894 int ret, i; 895 u32 *req; 896 897 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true); 898 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 899 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true); 900 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 901 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true); 902 903 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; 904 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; 905 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; 906 907 req1->stage = stage; 908 req1->xy_sel = sel_x ? 1 : 0; 909 req1->index = cpu_to_le32(loc); 910 911 ret = hclge_cmd_send(&hdev->hw, desc, 3); 912 if (ret) 913 return ret; 914 915 dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n", 916 sel_x ? "x" : "y", loc); 917 918 /* tcam_data0 ~ tcam_data1 */ 919 req = (u32 *)req1->tcam_data; 920 for (i = 0; i < 2; i++) 921 dev_info(&hdev->pdev->dev, "%08x\n", *req++); 922 923 /* tcam_data2 ~ tcam_data7 */ 924 req = (u32 *)req2->tcam_data; 925 for (i = 0; i < 6; i++) 926 dev_info(&hdev->pdev->dev, "%08x\n", *req++); 927 928 /* tcam_data8 ~ tcam_data12 */ 929 req = (u32 *)req3->tcam_data; 930 for (i = 0; i < 5; i++) 931 dev_info(&hdev->pdev->dev, "%08x\n", *req++); 932 933 return ret; 934 } 935 936 static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs) 937 { 938 struct hclge_fd_rule *rule; 939 struct hlist_node *node; 940 int cnt = 0; 941 942 spin_lock_bh(&hdev->fd_rule_lock); 943 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 944 rule_locs[cnt] = rule->location; 945 cnt++; 946 } 947 spin_unlock_bh(&hdev->fd_rule_lock); 948 949 if (cnt != hdev->hclge_fd_rule_num) 950 return -EINVAL; 951 952 return cnt; 953 } 954 955 static void hclge_dbg_fd_tcam(struct hclge_dev *hdev) 956 { 957 int i, ret, rule_cnt; 958 u16 *rule_locs; 959 960 if (!hnae3_dev_fd_supported(hdev)) { 961 dev_err(&hdev->pdev->dev, 962 "Only FD-supported dev supports dump fd tcam\n"); 963 return; 964 } 965 966 if (!hdev->hclge_fd_rule_num || 967 !hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) 968 return; 969 970 rule_locs = kcalloc(hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], 971 sizeof(u16), GFP_KERNEL); 972 if (!rule_locs) 973 return; 974 975 rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs); 976 if (rule_cnt <= 0) { 977 dev_err(&hdev->pdev->dev, 978 "failed to get rule number, ret = %d\n", rule_cnt); 979 kfree(rule_locs); 980 return; 981 } 982 983 for (i = 0; i < rule_cnt; i++) { 984 ret = hclge_dbg_fd_tcam_read(hdev, 0, true, rule_locs[i]); 985 if (ret) { 986 dev_err(&hdev->pdev->dev, 987 "failed to get fd tcam key x, ret = %d\n", ret); 988 kfree(rule_locs); 989 return; 990 } 991 992 ret = hclge_dbg_fd_tcam_read(hdev, 0, false, rule_locs[i]); 993 if (ret) { 994 dev_err(&hdev->pdev->dev, 995 "failed to get fd tcam key y, ret = %d\n", ret); 996 kfree(rule_locs); 997 return; 998 } 999 } 1000 1001 kfree(rule_locs); 1002 } 1003 1004 void hclge_dbg_dump_rst_info(struct hclge_dev *hdev) 1005 { 1006 dev_info(&hdev->pdev->dev, "PF reset count: %u\n", 1007 hdev->rst_stats.pf_rst_cnt); 1008 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1009 hdev->rst_stats.flr_rst_cnt); 1010 dev_info(&hdev->pdev->dev, "GLOBAL reset count: %u\n", 1011 hdev->rst_stats.global_rst_cnt); 1012 dev_info(&hdev->pdev->dev, "IMP reset count: %u\n", 1013 hdev->rst_stats.imp_rst_cnt); 1014 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1015 hdev->rst_stats.reset_done_cnt); 1016 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1017 hdev->rst_stats.hw_reset_done_cnt); 1018 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1019 hdev->rst_stats.reset_cnt); 1020 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1021 hdev->rst_stats.reset_fail_cnt); 1022 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1023 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE)); 1024 dev_info(&hdev->pdev->dev, "reset interrupt source: 0x%x\n", 1025 hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG)); 1026 dev_info(&hdev->pdev->dev, "reset interrupt status: 0x%x\n", 1027 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS)); 1028 dev_info(&hdev->pdev->dev, "hardware reset status: 0x%x\n", 1029 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); 1030 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1031 hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG)); 1032 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1033 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING)); 1034 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1035 } 1036 1037 static void hclge_dbg_dump_serv_info(struct hclge_dev *hdev) 1038 { 1039 dev_info(&hdev->pdev->dev, "last_serv_processed: %lu\n", 1040 hdev->last_serv_processed); 1041 dev_info(&hdev->pdev->dev, "last_serv_cnt: %lu\n", 1042 hdev->serv_processed_cnt); 1043 } 1044 1045 static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev) 1046 { 1047 struct hclge_desc *desc_src, *desc_tmp; 1048 struct hclge_get_m7_bd_cmd *req; 1049 struct hclge_desc desc; 1050 u32 bd_num, buf_len; 1051 int ret, i; 1052 1053 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_STATS_BD, true); 1054 1055 req = (struct hclge_get_m7_bd_cmd *)desc.data; 1056 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1057 if (ret) { 1058 dev_err(&hdev->pdev->dev, 1059 "get firmware statistics bd number failed, ret = %d\n", 1060 ret); 1061 return; 1062 } 1063 1064 bd_num = le32_to_cpu(req->bd_num); 1065 1066 buf_len = sizeof(struct hclge_desc) * bd_num; 1067 desc_src = kzalloc(buf_len, GFP_KERNEL); 1068 if (!desc_src) { 1069 dev_err(&hdev->pdev->dev, 1070 "allocate desc for get_m7_stats failed\n"); 1071 return; 1072 } 1073 1074 desc_tmp = desc_src; 1075 ret = hclge_dbg_cmd_send(hdev, desc_tmp, 0, bd_num, 1076 HCLGE_OPC_M7_STATS_INFO); 1077 if (ret) { 1078 kfree(desc_src); 1079 dev_err(&hdev->pdev->dev, 1080 "get firmware statistics failed, ret = %d\n", ret); 1081 return; 1082 } 1083 1084 for (i = 0; i < bd_num; i++) { 1085 dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n", 1086 le32_to_cpu(desc_tmp->data[0]), 1087 le32_to_cpu(desc_tmp->data[1]), 1088 le32_to_cpu(desc_tmp->data[2])); 1089 dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n", 1090 le32_to_cpu(desc_tmp->data[3]), 1091 le32_to_cpu(desc_tmp->data[4]), 1092 le32_to_cpu(desc_tmp->data[5])); 1093 1094 desc_tmp++; 1095 } 1096 1097 kfree(desc_src); 1098 } 1099 1100 #define HCLGE_CMD_NCL_CONFIG_BD_NUM 5 1101 1102 static void hclge_ncl_config_data_print(struct hclge_dev *hdev, 1103 struct hclge_desc *desc, int *offset, 1104 int *length) 1105 { 1106 #define HCLGE_CMD_DATA_NUM 6 1107 1108 int i; 1109 int j; 1110 1111 for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) { 1112 for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) { 1113 if (i == 0 && j == 0) 1114 continue; 1115 1116 dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n", 1117 *offset, 1118 le32_to_cpu(desc[i].data[j])); 1119 *offset += sizeof(u32); 1120 *length -= sizeof(u32); 1121 if (*length <= 0) 1122 return; 1123 } 1124 } 1125 } 1126 1127 /* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file 1128 * @hdev: pointer to struct hclge_dev 1129 * @cmd_buf: string that contains offset and length 1130 */ 1131 static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, 1132 const char *cmd_buf) 1133 { 1134 #define HCLGE_MAX_NCL_CONFIG_OFFSET 4096 1135 #define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4) 1136 1137 struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM]; 1138 int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM; 1139 int offset; 1140 int length; 1141 int data0; 1142 int ret; 1143 1144 ret = sscanf(cmd_buf, "%x %x", &offset, &length); 1145 if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET || 1146 length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) { 1147 dev_err(&hdev->pdev->dev, "Invalid offset or length.\n"); 1148 return; 1149 } 1150 if (offset < 0 || length <= 0) { 1151 dev_err(&hdev->pdev->dev, "Non-positive offset or length.\n"); 1152 return; 1153 } 1154 1155 dev_info(&hdev->pdev->dev, "offset | data\n"); 1156 1157 while (length > 0) { 1158 data0 = offset; 1159 if (length >= HCLGE_MAX_NCL_CONFIG_LENGTH) 1160 data0 |= HCLGE_MAX_NCL_CONFIG_LENGTH << 16; 1161 else 1162 data0 |= length << 16; 1163 ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num, 1164 HCLGE_OPC_QUERY_NCL_CONFIG); 1165 if (ret) 1166 return; 1167 1168 hclge_ncl_config_data_print(hdev, desc, &offset, &length); 1169 } 1170 } 1171 1172 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt 1173 * @hdev: pointer to struct hclge_dev 1174 */ 1175 static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev) 1176 { 1177 #define HCLGE_BILLION_NANO_SECONDS 1000000000 1178 1179 struct hclge_mac_tnl_stats stats; 1180 unsigned long rem_nsec; 1181 1182 dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n"); 1183 1184 while (kfifo_get(&hdev->mac_tnl_log, &stats)) { 1185 rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS); 1186 dev_info(&hdev->pdev->dev, "[%07lu.%03lu] status = 0x%x\n", 1187 (unsigned long)stats.time, rem_nsec / 1000, 1188 stats.status); 1189 } 1190 } 1191 1192 static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid) 1193 { 1194 struct hclge_qs_shapping_cmd *shap_cfg_cmd; 1195 u8 ir_u, ir_b, ir_s, bs_b, bs_s; 1196 struct hclge_desc desc; 1197 u32 shapping_para; 1198 int ret; 1199 1200 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true); 1201 1202 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; 1203 shap_cfg_cmd->qs_id = cpu_to_le16(qsid); 1204 1205 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1206 if (ret) { 1207 dev_err(&hdev->pdev->dev, 1208 "qs%u failed to get tx_rate, ret=%d\n", 1209 qsid, ret); 1210 return; 1211 } 1212 1213 shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para); 1214 ir_b = hclge_tm_get_field(shapping_para, IR_B); 1215 ir_u = hclge_tm_get_field(shapping_para, IR_U); 1216 ir_s = hclge_tm_get_field(shapping_para, IR_S); 1217 bs_b = hclge_tm_get_field(shapping_para, BS_B); 1218 bs_s = hclge_tm_get_field(shapping_para, BS_S); 1219 1220 dev_info(&hdev->pdev->dev, 1221 "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u\n", 1222 qsid, ir_b, ir_u, ir_s, bs_b, bs_s); 1223 } 1224 1225 static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev) 1226 { 1227 struct hnae3_knic_private_info *kinfo; 1228 struct hclge_vport *vport; 1229 int vport_id, i; 1230 1231 for (vport_id = 0; vport_id <= pci_num_vf(hdev->pdev); vport_id++) { 1232 vport = &hdev->vport[vport_id]; 1233 kinfo = &vport->nic.kinfo; 1234 1235 dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id); 1236 1237 for (i = 0; i < kinfo->num_tc; i++) { 1238 u16 qsid = vport->qs_offset + i; 1239 1240 hclge_dbg_dump_qs_shaper_single(hdev, qsid); 1241 } 1242 } 1243 } 1244 1245 static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev, 1246 const char *cmd_buf) 1247 { 1248 #define HCLGE_MAX_QSET_NUM 1024 1249 1250 u16 qsid; 1251 int ret; 1252 1253 ret = kstrtou16(cmd_buf, 0, &qsid); 1254 if (ret) { 1255 hclge_dbg_dump_qs_shaper_all(hdev); 1256 return; 1257 } 1258 1259 if (qsid >= HCLGE_MAX_QSET_NUM) { 1260 dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-1023]\n", 1261 qsid); 1262 return; 1263 } 1264 1265 hclge_dbg_dump_qs_shaper_single(hdev, qsid); 1266 } 1267 1268 int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf) 1269 { 1270 #define DUMP_REG "dump reg" 1271 #define DUMP_TM_MAP "dump tm map" 1272 1273 struct hclge_vport *vport = hclge_get_vport(handle); 1274 struct hclge_dev *hdev = vport->back; 1275 1276 if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) { 1277 hclge_dbg_fd_tcam(hdev); 1278 } else if (strncmp(cmd_buf, "dump tc", 7) == 0) { 1279 hclge_dbg_dump_tc(hdev); 1280 } else if (strncmp(cmd_buf, DUMP_TM_MAP, strlen(DUMP_TM_MAP)) == 0) { 1281 hclge_dbg_dump_tm_map(hdev, &cmd_buf[sizeof(DUMP_TM_MAP)]); 1282 } else if (strncmp(cmd_buf, "dump tm", 7) == 0) { 1283 hclge_dbg_dump_tm(hdev); 1284 } else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) { 1285 hclge_dbg_dump_qos_pause_cfg(hdev); 1286 } else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) { 1287 hclge_dbg_dump_qos_pri_map(hdev); 1288 } else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) { 1289 hclge_dbg_dump_qos_buf_cfg(hdev); 1290 } else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) { 1291 hclge_dbg_dump_mng_table(hdev); 1292 } else if (strncmp(cmd_buf, DUMP_REG, strlen(DUMP_REG)) == 0) { 1293 hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]); 1294 } else if (strncmp(cmd_buf, "dump reset info", 15) == 0) { 1295 hclge_dbg_dump_rst_info(hdev); 1296 } else if (strncmp(cmd_buf, "dump serv info", 14) == 0) { 1297 hclge_dbg_dump_serv_info(hdev); 1298 } else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) { 1299 hclge_dbg_get_m7_stats_info(hdev); 1300 } else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) { 1301 hclge_dbg_dump_ncl_config(hdev, 1302 &cmd_buf[sizeof("dump ncl_config")]); 1303 } else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) { 1304 hclge_dbg_dump_mac_tnl_status(hdev); 1305 } else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) { 1306 hclge_dbg_dump_qs_shaper(hdev, 1307 &cmd_buf[sizeof("dump qs shaper")]); 1308 } else { 1309 dev_info(&hdev->pdev->dev, "unknown command\n"); 1310 return -EINVAL; 1311 } 1312 1313 return 0; 1314 } 1315