1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Copyright (c) 2018-2019 Hisilicon Limited. */ 3 4 #include <linux/device.h> 5 6 #include "hclge_debugfs.h" 7 #include "hclge_main.h" 8 #include "hclge_tm.h" 9 #include "hnae3.h" 10 11 static struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = { 12 { .reg_type = "bios common", 13 .dfx_msg = &hclge_dbg_bios_common_reg[0], 14 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg), 15 .offset = HCLGE_DBG_DFX_BIOS_OFFSET, 16 .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } }, 17 { .reg_type = "ssu", 18 .dfx_msg = &hclge_dbg_ssu_reg_0[0], 19 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0), 20 .offset = HCLGE_DBG_DFX_SSU_0_OFFSET, 21 .cmd = HCLGE_OPC_DFX_SSU_REG_0 } }, 22 { .reg_type = "ssu", 23 .dfx_msg = &hclge_dbg_ssu_reg_1[0], 24 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1), 25 .offset = HCLGE_DBG_DFX_SSU_1_OFFSET, 26 .cmd = HCLGE_OPC_DFX_SSU_REG_1 } }, 27 { .reg_type = "ssu", 28 .dfx_msg = &hclge_dbg_ssu_reg_2[0], 29 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2), 30 .offset = HCLGE_DBG_DFX_SSU_2_OFFSET, 31 .cmd = HCLGE_OPC_DFX_SSU_REG_2 } }, 32 { .reg_type = "igu egu", 33 .dfx_msg = &hclge_dbg_igu_egu_reg[0], 34 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg), 35 .offset = HCLGE_DBG_DFX_IGU_OFFSET, 36 .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } }, 37 { .reg_type = "rpu", 38 .dfx_msg = &hclge_dbg_rpu_reg_0[0], 39 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0), 40 .offset = HCLGE_DBG_DFX_RPU_0_OFFSET, 41 .cmd = HCLGE_OPC_DFX_RPU_REG_0 } }, 42 { .reg_type = "rpu", 43 .dfx_msg = &hclge_dbg_rpu_reg_1[0], 44 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1), 45 .offset = HCLGE_DBG_DFX_RPU_1_OFFSET, 46 .cmd = HCLGE_OPC_DFX_RPU_REG_1 } }, 47 { .reg_type = "ncsi", 48 .dfx_msg = &hclge_dbg_ncsi_reg[0], 49 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg), 50 .offset = HCLGE_DBG_DFX_NCSI_OFFSET, 51 .cmd = HCLGE_OPC_DFX_NCSI_REG } }, 52 { .reg_type = "rtc", 53 .dfx_msg = &hclge_dbg_rtc_reg[0], 54 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg), 55 .offset = HCLGE_DBG_DFX_RTC_OFFSET, 56 .cmd = HCLGE_OPC_DFX_RTC_REG } }, 57 { .reg_type = "ppp", 58 .dfx_msg = &hclge_dbg_ppp_reg[0], 59 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg), 60 .offset = HCLGE_DBG_DFX_PPP_OFFSET, 61 .cmd = HCLGE_OPC_DFX_PPP_REG } }, 62 { .reg_type = "rcb", 63 .dfx_msg = &hclge_dbg_rcb_reg[0], 64 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg), 65 .offset = HCLGE_DBG_DFX_RCB_OFFSET, 66 .cmd = HCLGE_OPC_DFX_RCB_REG } }, 67 { .reg_type = "tqp", 68 .dfx_msg = &hclge_dbg_tqp_reg[0], 69 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg), 70 .offset = HCLGE_DBG_DFX_TQP_OFFSET, 71 .cmd = HCLGE_OPC_DFX_TQP_REG } }, 72 }; 73 74 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset) 75 { 76 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT]; 77 int entries_per_desc; 78 int index; 79 int ret; 80 81 ret = hclge_query_bd_num_cmd_send(hdev, desc); 82 if (ret) { 83 dev_err(&hdev->pdev->dev, 84 "get dfx bdnum fail, ret = %d\n", ret); 85 return ret; 86 } 87 88 entries_per_desc = ARRAY_SIZE(desc[0].data); 89 index = offset % entries_per_desc; 90 return le32_to_cpu(desc[offset / entries_per_desc].data[index]); 91 } 92 93 static int hclge_dbg_cmd_send(struct hclge_dev *hdev, 94 struct hclge_desc *desc_src, 95 int index, int bd_num, 96 enum hclge_opcode_type cmd) 97 { 98 struct hclge_desc *desc = desc_src; 99 int ret, i; 100 101 hclge_cmd_setup_basic_desc(desc, cmd, true); 102 desc->data[0] = cpu_to_le32(index); 103 104 for (i = 1; i < bd_num; i++) { 105 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 106 desc++; 107 hclge_cmd_setup_basic_desc(desc, cmd, true); 108 } 109 110 ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num); 111 if (ret) 112 dev_err(&hdev->pdev->dev, 113 "cmd(0x%x) send fail, ret = %d\n", cmd, ret); 114 return ret; 115 } 116 117 static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, 118 struct hclge_dbg_reg_type_info *reg_info, 119 const char *cmd_buf) 120 { 121 #define IDX_OFFSET 1 122 123 const char *s = &cmd_buf[strlen(reg_info->reg_type) + IDX_OFFSET]; 124 struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg; 125 struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg; 126 struct hclge_desc *desc_src; 127 struct hclge_desc *desc; 128 int entries_per_desc; 129 int bd_num, buf_len; 130 int index = 0; 131 int min_num; 132 int ret, i; 133 134 if (*s) { 135 ret = kstrtouint(s, 0, &index); 136 index = (ret != 0) ? 0 : index; 137 } 138 139 bd_num = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset); 140 if (bd_num <= 0) { 141 dev_err(&hdev->pdev->dev, "get cmd(%d) bd num(%d) failed\n", 142 reg_msg->offset, bd_num); 143 return; 144 } 145 146 buf_len = sizeof(struct hclge_desc) * bd_num; 147 desc_src = kzalloc(buf_len, GFP_KERNEL); 148 if (!desc_src) 149 return; 150 151 desc = desc_src; 152 ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd); 153 if (ret) { 154 kfree(desc_src); 155 return; 156 } 157 158 entries_per_desc = ARRAY_SIZE(desc->data); 159 min_num = min_t(int, bd_num * entries_per_desc, reg_msg->msg_num); 160 161 desc = desc_src; 162 for (i = 0; i < min_num; i++) { 163 if (i > 0 && (i % entries_per_desc) == 0) 164 desc++; 165 if (dfx_message->flag) 166 dev_info(&hdev->pdev->dev, "%s: 0x%x\n", 167 dfx_message->message, 168 le32_to_cpu(desc->data[i % entries_per_desc])); 169 170 dfx_message++; 171 } 172 173 kfree(desc_src); 174 } 175 176 static void hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev) 177 { 178 struct hclge_config_mac_mode_cmd *req; 179 struct hclge_desc desc; 180 u32 loop_en; 181 int ret; 182 183 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); 184 185 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 186 if (ret) { 187 dev_err(&hdev->pdev->dev, 188 "failed to dump mac enable status, ret = %d\n", ret); 189 return; 190 } 191 192 req = (struct hclge_config_mac_mode_cmd *)desc.data; 193 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); 194 195 dev_info(&hdev->pdev->dev, "config_mac_trans_en: %#x\n", 196 hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B)); 197 dev_info(&hdev->pdev->dev, "config_mac_rcv_en: %#x\n", 198 hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B)); 199 dev_info(&hdev->pdev->dev, "config_pad_trans_en: %#x\n", 200 hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B)); 201 dev_info(&hdev->pdev->dev, "config_pad_rcv_en: %#x\n", 202 hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B)); 203 dev_info(&hdev->pdev->dev, "config_1588_trans_en: %#x\n", 204 hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B)); 205 dev_info(&hdev->pdev->dev, "config_1588_rcv_en: %#x\n", 206 hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B)); 207 dev_info(&hdev->pdev->dev, "config_mac_app_loop_en: %#x\n", 208 hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B)); 209 dev_info(&hdev->pdev->dev, "config_mac_line_loop_en: %#x\n", 210 hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B)); 211 dev_info(&hdev->pdev->dev, "config_mac_fcs_tx_en: %#x\n", 212 hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B)); 213 dev_info(&hdev->pdev->dev, "config_mac_rx_oversize_truncate_en: %#x\n", 214 hnae3_get_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B)); 215 dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_strip_en: %#x\n", 216 hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B)); 217 dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_en: %#x\n", 218 hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B)); 219 dev_info(&hdev->pdev->dev, "config_mac_tx_under_min_err_en: %#x\n", 220 hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B)); 221 dev_info(&hdev->pdev->dev, "config_mac_tx_oversize_truncate_en: %#x\n", 222 hnae3_get_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B)); 223 } 224 225 static void hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev) 226 { 227 struct hclge_config_max_frm_size_cmd *req; 228 struct hclge_desc desc; 229 int ret; 230 231 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true); 232 233 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 234 if (ret) { 235 dev_err(&hdev->pdev->dev, 236 "failed to dump mac frame size, ret = %d\n", ret); 237 return; 238 } 239 240 req = (struct hclge_config_max_frm_size_cmd *)desc.data; 241 242 dev_info(&hdev->pdev->dev, "max_frame_size: %u\n", 243 le16_to_cpu(req->max_frm_size)); 244 dev_info(&hdev->pdev->dev, "min_frame_size: %u\n", req->min_frm_size); 245 } 246 247 static void hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev) 248 { 249 #define HCLGE_MAC_SPEED_SHIFT 0 250 #define HCLGE_MAC_SPEED_MASK GENMASK(5, 0) 251 #define HCLGE_MAC_DUPLEX_SHIFT 7 252 253 struct hclge_config_mac_speed_dup_cmd *req; 254 struct hclge_desc desc; 255 int ret; 256 257 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true); 258 259 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 260 if (ret) { 261 dev_err(&hdev->pdev->dev, 262 "failed to dump mac speed duplex, ret = %d\n", ret); 263 return; 264 } 265 266 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; 267 268 dev_info(&hdev->pdev->dev, "speed: %#lx\n", 269 hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK, 270 HCLGE_MAC_SPEED_SHIFT)); 271 dev_info(&hdev->pdev->dev, "duplex: %#x\n", 272 hnae3_get_bit(req->speed_dup, HCLGE_MAC_DUPLEX_SHIFT)); 273 } 274 275 static void hclge_dbg_dump_mac(struct hclge_dev *hdev) 276 { 277 hclge_dbg_dump_mac_enable_status(hdev); 278 279 hclge_dbg_dump_mac_frame_size(hdev); 280 281 hclge_dbg_dump_mac_speed_duplex(hdev); 282 } 283 284 static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf) 285 { 286 struct device *dev = &hdev->pdev->dev; 287 struct hclge_dbg_bitmap_cmd *bitmap; 288 enum hclge_opcode_type cmd; 289 int rq_id, pri_id, qset_id; 290 int port_id, nq_id, pg_id; 291 struct hclge_desc desc[2]; 292 293 int cnt, ret; 294 295 cnt = sscanf(cmd_buf, "%i %i %i %i %i %i", 296 &port_id, &pri_id, &pg_id, &rq_id, &nq_id, &qset_id); 297 if (cnt != 6) { 298 dev_err(&hdev->pdev->dev, 299 "dump dcb: bad command parameter, cnt=%d\n", cnt); 300 return; 301 } 302 303 cmd = HCLGE_OPC_QSET_DFX_STS; 304 ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1, cmd); 305 if (ret) 306 goto err_dcb_cmd_send; 307 308 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; 309 dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0); 310 dev_info(dev, "nic_qs_mask: 0x%x\n", bitmap->bit1); 311 dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2); 312 dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3); 313 314 cmd = HCLGE_OPC_PRI_DFX_STS; 315 ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, cmd); 316 if (ret) 317 goto err_dcb_cmd_send; 318 319 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; 320 dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0); 321 dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1); 322 dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2); 323 324 cmd = HCLGE_OPC_PG_DFX_STS; 325 ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, cmd); 326 if (ret) 327 goto err_dcb_cmd_send; 328 329 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; 330 dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0); 331 dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1); 332 dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2); 333 334 cmd = HCLGE_OPC_PORT_DFX_STS; 335 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd); 336 if (ret) 337 goto err_dcb_cmd_send; 338 339 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; 340 dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0); 341 dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1); 342 343 cmd = HCLGE_OPC_SCH_NQ_CNT; 344 ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd); 345 if (ret) 346 goto err_dcb_cmd_send; 347 348 dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1])); 349 350 cmd = HCLGE_OPC_SCH_RQ_CNT; 351 ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd); 352 if (ret) 353 goto err_dcb_cmd_send; 354 355 dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1])); 356 357 cmd = HCLGE_OPC_TM_INTERNAL_STS; 358 ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, cmd); 359 if (ret) 360 goto err_dcb_cmd_send; 361 362 dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1])); 363 dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2])); 364 dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n", 365 le32_to_cpu(desc[0].data[3])); 366 dev_info(dev, "tx_private_waterline: 0x%x\n", 367 le32_to_cpu(desc[0].data[4])); 368 dev_info(dev, "tm_bypass_en: 0x%x\n", le32_to_cpu(desc[0].data[5])); 369 dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0])); 370 dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1])); 371 372 cmd = HCLGE_OPC_TM_INTERNAL_CNT; 373 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd); 374 if (ret) 375 goto err_dcb_cmd_send; 376 377 dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1])); 378 dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2])); 379 380 cmd = HCLGE_OPC_TM_INTERNAL_STS_1; 381 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd); 382 if (ret) 383 goto err_dcb_cmd_send; 384 385 dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1])); 386 dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2])); 387 dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[3])); 388 dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n", 389 le32_to_cpu(desc[0].data[4])); 390 dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", 391 le32_to_cpu(desc[0].data[5])); 392 return; 393 394 err_dcb_cmd_send: 395 dev_err(&hdev->pdev->dev, 396 "failed to dump dcb dfx, cmd = %#x, ret = %d\n", 397 cmd, ret); 398 } 399 400 static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf) 401 { 402 struct hclge_dbg_reg_type_info *reg_info; 403 bool has_dump = false; 404 int i; 405 406 for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) { 407 reg_info = &hclge_dbg_reg_info[i]; 408 if (!strncmp(cmd_buf, reg_info->reg_type, 409 strlen(reg_info->reg_type))) { 410 hclge_dbg_dump_reg_common(hdev, reg_info, cmd_buf); 411 has_dump = true; 412 } 413 } 414 415 if (strncmp(cmd_buf, "mac", strlen("mac")) == 0) { 416 hclge_dbg_dump_mac(hdev); 417 has_dump = true; 418 } 419 420 if (strncmp(cmd_buf, "dcb", 3) == 0) { 421 hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]); 422 has_dump = true; 423 } 424 425 if (!has_dump) { 426 dev_info(&hdev->pdev->dev, "unknown command\n"); 427 return; 428 } 429 } 430 431 static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index, 432 char *title_buf, char *true_buf, 433 char *false_buf) 434 { 435 if (flag) 436 dev_info(&hdev->pdev->dev, "%s(%d): %s weight: %u\n", 437 title_buf, index, true_buf, 438 hdev->tm_info.pg_info[0].tc_dwrr[index]); 439 else 440 dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index, 441 false_buf); 442 } 443 444 static void hclge_dbg_dump_tc(struct hclge_dev *hdev) 445 { 446 struct hclge_ets_tc_weight_cmd *ets_weight; 447 struct hclge_desc desc; 448 int i, ret; 449 450 if (!hnae3_dev_dcb_supported(hdev)) { 451 dev_info(&hdev->pdev->dev, 452 "Only DCB-supported dev supports tc\n"); 453 return; 454 } 455 456 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true); 457 458 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 459 if (ret) { 460 dev_err(&hdev->pdev->dev, "dump tc fail, ret = %d\n", ret); 461 return; 462 } 463 464 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; 465 466 dev_info(&hdev->pdev->dev, "dump tc: %u tc enabled\n", 467 hdev->tm_info.num_tc); 468 dev_info(&hdev->pdev->dev, "weight_offset: %u\n", 469 ets_weight->weight_offset); 470 471 for (i = 0; i < HNAE3_MAX_TC; i++) 472 hclge_title_idx_print(hdev, ets_weight->tc_weight[i], i, 473 "tc", "no sp mode", "sp mode"); 474 } 475 476 static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) 477 { 478 struct hclge_port_shapping_cmd *port_shap_cfg_cmd; 479 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; 480 struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd; 481 enum hclge_opcode_type cmd; 482 struct hclge_desc desc; 483 int ret; 484 485 cmd = HCLGE_OPC_TM_PG_C_SHAPPING; 486 hclge_cmd_setup_basic_desc(&desc, cmd, true); 487 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 488 if (ret) 489 goto err_tm_pg_cmd_send; 490 491 pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; 492 dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id); 493 dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n", 494 le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para)); 495 496 cmd = HCLGE_OPC_TM_PG_P_SHAPPING; 497 hclge_cmd_setup_basic_desc(&desc, cmd, true); 498 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 499 if (ret) 500 goto err_tm_pg_cmd_send; 501 502 pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; 503 dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id); 504 dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n", 505 le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para)); 506 507 cmd = HCLGE_OPC_TM_PORT_SHAPPING; 508 hclge_cmd_setup_basic_desc(&desc, cmd, true); 509 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 510 if (ret) 511 goto err_tm_pg_cmd_send; 512 513 port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; 514 dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n", 515 le32_to_cpu(port_shap_cfg_cmd->port_shapping_para)); 516 517 cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG; 518 hclge_cmd_setup_basic_desc(&desc, cmd, true); 519 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 520 if (ret) 521 goto err_tm_pg_cmd_send; 522 523 dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", 524 le32_to_cpu(desc.data[0])); 525 526 cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG; 527 hclge_cmd_setup_basic_desc(&desc, cmd, true); 528 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 529 if (ret) 530 goto err_tm_pg_cmd_send; 531 532 dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n", 533 le32_to_cpu(desc.data[0])); 534 535 cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG; 536 hclge_cmd_setup_basic_desc(&desc, cmd, true); 537 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 538 if (ret) 539 goto err_tm_pg_cmd_send; 540 541 dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", 542 le32_to_cpu(desc.data[0])); 543 544 if (!hnae3_dev_dcb_supported(hdev)) { 545 dev_info(&hdev->pdev->dev, 546 "Only DCB-supported dev supports tm mapping\n"); 547 return; 548 } 549 550 cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; 551 hclge_cmd_setup_basic_desc(&desc, cmd, true); 552 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 553 if (ret) 554 goto err_tm_pg_cmd_send; 555 556 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; 557 dev_info(&hdev->pdev->dev, "BP_TO_QSET tc_id: %u\n", 558 bp_to_qs_map_cmd->tc_id); 559 dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n", 560 bp_to_qs_map_cmd->qs_group_id); 561 dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n", 562 le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map)); 563 return; 564 565 err_tm_pg_cmd_send: 566 dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), ret = %d\n", 567 cmd, ret); 568 } 569 570 static void hclge_dbg_dump_tm(struct hclge_dev *hdev) 571 { 572 struct hclge_priority_weight_cmd *priority_weight; 573 struct hclge_pg_to_pri_link_cmd *pg_to_pri_map; 574 struct hclge_qs_to_pri_link_cmd *qs_to_pri_map; 575 struct hclge_nq_to_qs_link_cmd *nq_to_qs_map; 576 struct hclge_pri_shapping_cmd *shap_cfg_cmd; 577 struct hclge_pg_weight_cmd *pg_weight; 578 struct hclge_qs_weight_cmd *qs_weight; 579 enum hclge_opcode_type cmd; 580 struct hclge_desc desc; 581 int ret; 582 583 cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK; 584 hclge_cmd_setup_basic_desc(&desc, cmd, true); 585 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 586 if (ret) 587 goto err_tm_cmd_send; 588 589 pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data; 590 dev_info(&hdev->pdev->dev, "dump tm\n"); 591 dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n", 592 pg_to_pri_map->pg_id); 593 dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n", 594 pg_to_pri_map->pri_bit_map); 595 596 cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK; 597 hclge_cmd_setup_basic_desc(&desc, cmd, true); 598 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 599 if (ret) 600 goto err_tm_cmd_send; 601 602 qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 603 dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n", 604 le16_to_cpu(qs_to_pri_map->qs_id)); 605 dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n", 606 qs_to_pri_map->priority); 607 dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n", 608 qs_to_pri_map->link_vld); 609 610 cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK; 611 hclge_cmd_setup_basic_desc(&desc, cmd, true); 612 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 613 if (ret) 614 goto err_tm_cmd_send; 615 616 nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data; 617 dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", 618 le16_to_cpu(nq_to_qs_map->nq_id)); 619 dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n", 620 le16_to_cpu(nq_to_qs_map->qset_id)); 621 622 cmd = HCLGE_OPC_TM_PG_WEIGHT; 623 hclge_cmd_setup_basic_desc(&desc, cmd, true); 624 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 625 if (ret) 626 goto err_tm_cmd_send; 627 628 pg_weight = (struct hclge_pg_weight_cmd *)desc.data; 629 dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id); 630 dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr); 631 632 cmd = HCLGE_OPC_TM_QS_WEIGHT; 633 hclge_cmd_setup_basic_desc(&desc, cmd, true); 634 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 635 if (ret) 636 goto err_tm_cmd_send; 637 638 qs_weight = (struct hclge_qs_weight_cmd *)desc.data; 639 dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", 640 le16_to_cpu(qs_weight->qs_id)); 641 dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr); 642 643 cmd = HCLGE_OPC_TM_PRI_WEIGHT; 644 hclge_cmd_setup_basic_desc(&desc, cmd, true); 645 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 646 if (ret) 647 goto err_tm_cmd_send; 648 649 priority_weight = (struct hclge_priority_weight_cmd *)desc.data; 650 dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id); 651 dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr); 652 653 cmd = HCLGE_OPC_TM_PRI_C_SHAPPING; 654 hclge_cmd_setup_basic_desc(&desc, cmd, true); 655 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 656 if (ret) 657 goto err_tm_cmd_send; 658 659 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 660 dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id); 661 dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n", 662 le32_to_cpu(shap_cfg_cmd->pri_shapping_para)); 663 664 cmd = HCLGE_OPC_TM_PRI_P_SHAPPING; 665 hclge_cmd_setup_basic_desc(&desc, cmd, true); 666 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 667 if (ret) 668 goto err_tm_cmd_send; 669 670 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 671 dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id); 672 dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n", 673 le32_to_cpu(shap_cfg_cmd->pri_shapping_para)); 674 675 hclge_dbg_dump_tm_pg(hdev); 676 677 return; 678 679 err_tm_cmd_send: 680 dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), ret = %d\n", 681 cmd, ret); 682 } 683 684 static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, 685 const char *cmd_buf) 686 { 687 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; 688 struct hclge_nq_to_qs_link_cmd *nq_to_qs_map; 689 struct hclge_qs_to_pri_link_cmd *map; 690 struct hclge_tqp_tx_queue_tc_cmd *tc; 691 enum hclge_opcode_type cmd; 692 struct hclge_desc desc; 693 int queue_id, group_id; 694 u32 qset_mapping[32]; 695 int tc_id, qset_id; 696 int pri_id, ret; 697 u32 i; 698 699 ret = kstrtouint(cmd_buf, 0, &queue_id); 700 queue_id = (ret != 0) ? 0 : queue_id; 701 702 cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK; 703 nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data; 704 hclge_cmd_setup_basic_desc(&desc, cmd, true); 705 nq_to_qs_map->nq_id = cpu_to_le16(queue_id); 706 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 707 if (ret) 708 goto err_tm_map_cmd_send; 709 qset_id = le16_to_cpu(nq_to_qs_map->qset_id) & 0x3FF; 710 711 cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK; 712 map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 713 hclge_cmd_setup_basic_desc(&desc, cmd, true); 714 map->qs_id = cpu_to_le16(qset_id); 715 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 716 if (ret) 717 goto err_tm_map_cmd_send; 718 pri_id = map->priority; 719 720 cmd = HCLGE_OPC_TQP_TX_QUEUE_TC; 721 tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data; 722 hclge_cmd_setup_basic_desc(&desc, cmd, true); 723 tc->queue_id = cpu_to_le16(queue_id); 724 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 725 if (ret) 726 goto err_tm_map_cmd_send; 727 tc_id = tc->tc_id & 0x7; 728 729 dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n"); 730 dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n", 731 queue_id, qset_id, pri_id, tc_id); 732 733 if (!hnae3_dev_dcb_supported(hdev)) { 734 dev_info(&hdev->pdev->dev, 735 "Only DCB-supported dev supports tm mapping\n"); 736 return; 737 } 738 739 cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; 740 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; 741 for (group_id = 0; group_id < 32; group_id++) { 742 hclge_cmd_setup_basic_desc(&desc, cmd, true); 743 bp_to_qs_map_cmd->tc_id = tc_id; 744 bp_to_qs_map_cmd->qs_group_id = group_id; 745 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 746 if (ret) 747 goto err_tm_map_cmd_send; 748 749 qset_mapping[group_id] = 750 le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map); 751 } 752 753 dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n"); 754 755 i = 0; 756 for (group_id = 0; group_id < 4; group_id++) { 757 dev_info(&hdev->pdev->dev, 758 "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n", 759 group_id * 256, qset_mapping[(u32)(i + 7)], 760 qset_mapping[(u32)(i + 6)], qset_mapping[(u32)(i + 5)], 761 qset_mapping[(u32)(i + 4)], qset_mapping[(u32)(i + 3)], 762 qset_mapping[(u32)(i + 2)], qset_mapping[(u32)(i + 1)], 763 qset_mapping[i]); 764 i += 8; 765 } 766 767 return; 768 769 err_tm_map_cmd_send: 770 dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), ret = %d\n", 771 cmd, ret); 772 } 773 774 static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev) 775 { 776 struct hclge_cfg_pause_param_cmd *pause_param; 777 struct hclge_desc desc; 778 int ret; 779 780 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); 781 782 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 783 if (ret) { 784 dev_err(&hdev->pdev->dev, "dump checksum fail, ret = %d\n", 785 ret); 786 return; 787 } 788 789 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; 790 dev_info(&hdev->pdev->dev, "dump qos pause cfg\n"); 791 dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n", 792 pause_param->pause_trans_gap); 793 dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n", 794 le16_to_cpu(pause_param->pause_trans_time)); 795 } 796 797 static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev) 798 { 799 struct hclge_qos_pri_map_cmd *pri_map; 800 struct hclge_desc desc; 801 int ret; 802 803 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true); 804 805 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 806 if (ret) { 807 dev_err(&hdev->pdev->dev, 808 "dump qos pri map fail, ret = %d\n", ret); 809 return; 810 } 811 812 pri_map = (struct hclge_qos_pri_map_cmd *)desc.data; 813 dev_info(&hdev->pdev->dev, "dump qos pri map\n"); 814 dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri); 815 dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc); 816 dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc); 817 dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc); 818 dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc); 819 dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc); 820 dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc); 821 dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc); 822 dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc); 823 } 824 825 static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) 826 { 827 struct hclge_tx_buff_alloc_cmd *tx_buf_cmd; 828 struct hclge_rx_priv_buff_cmd *rx_buf_cmd; 829 struct hclge_rx_priv_wl_buf *rx_priv_wl; 830 struct hclge_rx_com_wl *rx_packet_cnt; 831 struct hclge_rx_com_thrd *rx_com_thrd; 832 struct hclge_rx_com_wl *rx_com_wl; 833 enum hclge_opcode_type cmd; 834 struct hclge_desc desc[2]; 835 int i, ret; 836 837 cmd = HCLGE_OPC_TX_BUFF_ALLOC; 838 hclge_cmd_setup_basic_desc(desc, cmd, true); 839 ret = hclge_cmd_send(&hdev->hw, desc, 1); 840 if (ret) 841 goto err_qos_cmd_send; 842 843 dev_info(&hdev->pdev->dev, "dump qos buf cfg\n"); 844 845 tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data; 846 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 847 dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i, 848 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i])); 849 850 cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC; 851 hclge_cmd_setup_basic_desc(desc, cmd, true); 852 ret = hclge_cmd_send(&hdev->hw, desc, 1); 853 if (ret) 854 goto err_qos_cmd_send; 855 856 dev_info(&hdev->pdev->dev, "\n"); 857 rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data; 858 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 859 dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i, 860 le16_to_cpu(rx_buf_cmd->buf_num[i])); 861 862 dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n", 863 le16_to_cpu(rx_buf_cmd->shared_buf)); 864 865 cmd = HCLGE_OPC_RX_COM_WL_ALLOC; 866 hclge_cmd_setup_basic_desc(desc, cmd, true); 867 ret = hclge_cmd_send(&hdev->hw, desc, 1); 868 if (ret) 869 goto err_qos_cmd_send; 870 871 rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data; 872 dev_info(&hdev->pdev->dev, "\n"); 873 dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n", 874 le16_to_cpu(rx_com_wl->com_wl.high), 875 le16_to_cpu(rx_com_wl->com_wl.low)); 876 877 cmd = HCLGE_OPC_RX_GBL_PKT_CNT; 878 hclge_cmd_setup_basic_desc(desc, cmd, true); 879 ret = hclge_cmd_send(&hdev->hw, desc, 1); 880 if (ret) 881 goto err_qos_cmd_send; 882 883 rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data; 884 dev_info(&hdev->pdev->dev, 885 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n", 886 le16_to_cpu(rx_packet_cnt->com_wl.high), 887 le16_to_cpu(rx_packet_cnt->com_wl.low)); 888 dev_info(&hdev->pdev->dev, "\n"); 889 890 if (!hnae3_dev_dcb_supported(hdev)) { 891 dev_info(&hdev->pdev->dev, 892 "Only DCB-supported dev supports rx priv wl\n"); 893 return; 894 } 895 cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC; 896 hclge_cmd_setup_basic_desc(&desc[0], cmd, true); 897 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 898 hclge_cmd_setup_basic_desc(&desc[1], cmd, true); 899 ret = hclge_cmd_send(&hdev->hw, desc, 2); 900 if (ret) 901 goto err_qos_cmd_send; 902 903 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data; 904 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 905 dev_info(&hdev->pdev->dev, 906 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i, 907 le16_to_cpu(rx_priv_wl->tc_wl[i].high), 908 le16_to_cpu(rx_priv_wl->tc_wl[i].low)); 909 910 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data; 911 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 912 dev_info(&hdev->pdev->dev, 913 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", 914 i + HCLGE_TC_NUM_ONE_DESC, 915 le16_to_cpu(rx_priv_wl->tc_wl[i].high), 916 le16_to_cpu(rx_priv_wl->tc_wl[i].low)); 917 918 cmd = HCLGE_OPC_RX_COM_THRD_ALLOC; 919 hclge_cmd_setup_basic_desc(&desc[0], cmd, true); 920 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 921 hclge_cmd_setup_basic_desc(&desc[1], cmd, true); 922 ret = hclge_cmd_send(&hdev->hw, desc, 2); 923 if (ret) 924 goto err_qos_cmd_send; 925 926 dev_info(&hdev->pdev->dev, "\n"); 927 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data; 928 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 929 dev_info(&hdev->pdev->dev, 930 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i, 931 le16_to_cpu(rx_com_thrd->com_thrd[i].high), 932 le16_to_cpu(rx_com_thrd->com_thrd[i].low)); 933 934 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data; 935 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 936 dev_info(&hdev->pdev->dev, 937 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", 938 i + HCLGE_TC_NUM_ONE_DESC, 939 le16_to_cpu(rx_com_thrd->com_thrd[i].high), 940 le16_to_cpu(rx_com_thrd->com_thrd[i].low)); 941 return; 942 943 err_qos_cmd_send: 944 dev_err(&hdev->pdev->dev, 945 "dump qos buf cfg fail(0x%x), ret = %d\n", cmd, ret); 946 } 947 948 static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev) 949 { 950 struct hclge_mac_ethertype_idx_rd_cmd *req0; 951 char printf_buf[HCLGE_DBG_BUF_LEN]; 952 struct hclge_desc desc; 953 u32 msg_egress_port; 954 int ret, i; 955 956 dev_info(&hdev->pdev->dev, "mng tab:\n"); 957 memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); 958 strncat(printf_buf, 959 "entry|mac_addr |mask|ether|mask|vlan|mask", 960 HCLGE_DBG_BUF_LEN - 1); 961 strncat(printf_buf + strlen(printf_buf), 962 "|i_map|i_dir|e_type|pf_id|vf_id|q_id|drop\n", 963 HCLGE_DBG_BUF_LEN - strlen(printf_buf) - 1); 964 965 dev_info(&hdev->pdev->dev, "%s", printf_buf); 966 967 for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) { 968 hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD, 969 true); 970 req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data; 971 req0->index = cpu_to_le16(i); 972 973 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 974 if (ret) { 975 dev_err(&hdev->pdev->dev, 976 "call hclge_cmd_send fail, ret = %d\n", ret); 977 return; 978 } 979 980 if (!req0->resp_code) 981 continue; 982 983 memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); 984 snprintf(printf_buf, HCLGE_DBG_BUF_LEN, 985 "%02u |%02x:%02x:%02x:%02x:%02x:%02x|", 986 le16_to_cpu(req0->index), 987 req0->mac_addr[0], req0->mac_addr[1], 988 req0->mac_addr[2], req0->mac_addr[3], 989 req0->mac_addr[4], req0->mac_addr[5]); 990 991 snprintf(printf_buf + strlen(printf_buf), 992 HCLGE_DBG_BUF_LEN - strlen(printf_buf), 993 "%x |%04x |%x |%04x|%x |%02x |%02x |", 994 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B), 995 le16_to_cpu(req0->ethter_type), 996 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B), 997 le16_to_cpu(req0->vlan_tag) & HCLGE_DBG_MNG_VLAN_TAG, 998 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B), 999 req0->i_port_bitmap, req0->i_port_direction); 1000 1001 msg_egress_port = le16_to_cpu(req0->egress_port); 1002 snprintf(printf_buf + strlen(printf_buf), 1003 HCLGE_DBG_BUF_LEN - strlen(printf_buf), 1004 "%x |%x |%02x |%04x|%x\n", 1005 !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B), 1006 msg_egress_port & HCLGE_DBG_MNG_PF_ID, 1007 (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID, 1008 le16_to_cpu(req0->egress_queue), 1009 !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B)); 1010 1011 dev_info(&hdev->pdev->dev, "%s", printf_buf); 1012 } 1013 } 1014 1015 static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage, 1016 bool sel_x, u32 loc) 1017 { 1018 struct hclge_fd_tcam_config_1_cmd *req1; 1019 struct hclge_fd_tcam_config_2_cmd *req2; 1020 struct hclge_fd_tcam_config_3_cmd *req3; 1021 struct hclge_desc desc[3]; 1022 int ret, i; 1023 u32 *req; 1024 1025 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true); 1026 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1027 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true); 1028 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1029 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true); 1030 1031 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; 1032 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; 1033 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; 1034 1035 req1->stage = stage; 1036 req1->xy_sel = sel_x ? 1 : 0; 1037 req1->index = cpu_to_le32(loc); 1038 1039 ret = hclge_cmd_send(&hdev->hw, desc, 3); 1040 if (ret) 1041 return ret; 1042 1043 dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n", 1044 sel_x ? "x" : "y", loc); 1045 1046 /* tcam_data0 ~ tcam_data1 */ 1047 req = (u32 *)req1->tcam_data; 1048 for (i = 0; i < 2; i++) 1049 dev_info(&hdev->pdev->dev, "%08x\n", *req++); 1050 1051 /* tcam_data2 ~ tcam_data7 */ 1052 req = (u32 *)req2->tcam_data; 1053 for (i = 0; i < 6; i++) 1054 dev_info(&hdev->pdev->dev, "%08x\n", *req++); 1055 1056 /* tcam_data8 ~ tcam_data12 */ 1057 req = (u32 *)req3->tcam_data; 1058 for (i = 0; i < 5; i++) 1059 dev_info(&hdev->pdev->dev, "%08x\n", *req++); 1060 1061 return ret; 1062 } 1063 1064 static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs) 1065 { 1066 struct hclge_fd_rule *rule; 1067 struct hlist_node *node; 1068 int cnt = 0; 1069 1070 spin_lock_bh(&hdev->fd_rule_lock); 1071 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 1072 rule_locs[cnt] = rule->location; 1073 cnt++; 1074 } 1075 spin_unlock_bh(&hdev->fd_rule_lock); 1076 1077 if (cnt != hdev->hclge_fd_rule_num) 1078 return -EINVAL; 1079 1080 return cnt; 1081 } 1082 1083 static void hclge_dbg_fd_tcam(struct hclge_dev *hdev) 1084 { 1085 int i, ret, rule_cnt; 1086 u16 *rule_locs; 1087 1088 if (!hnae3_dev_fd_supported(hdev)) { 1089 dev_err(&hdev->pdev->dev, 1090 "Only FD-supported dev supports dump fd tcam\n"); 1091 return; 1092 } 1093 1094 if (!hdev->hclge_fd_rule_num || 1095 !hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) 1096 return; 1097 1098 rule_locs = kcalloc(hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], 1099 sizeof(u16), GFP_KERNEL); 1100 if (!rule_locs) 1101 return; 1102 1103 rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs); 1104 if (rule_cnt <= 0) { 1105 dev_err(&hdev->pdev->dev, 1106 "failed to get rule number, ret = %d\n", rule_cnt); 1107 kfree(rule_locs); 1108 return; 1109 } 1110 1111 for (i = 0; i < rule_cnt; i++) { 1112 ret = hclge_dbg_fd_tcam_read(hdev, 0, true, rule_locs[i]); 1113 if (ret) { 1114 dev_err(&hdev->pdev->dev, 1115 "failed to get fd tcam key x, ret = %d\n", ret); 1116 kfree(rule_locs); 1117 return; 1118 } 1119 1120 ret = hclge_dbg_fd_tcam_read(hdev, 0, false, rule_locs[i]); 1121 if (ret) { 1122 dev_err(&hdev->pdev->dev, 1123 "failed to get fd tcam key y, ret = %d\n", ret); 1124 kfree(rule_locs); 1125 return; 1126 } 1127 } 1128 1129 kfree(rule_locs); 1130 } 1131 1132 void hclge_dbg_dump_rst_info(struct hclge_dev *hdev) 1133 { 1134 dev_info(&hdev->pdev->dev, "PF reset count: %u\n", 1135 hdev->rst_stats.pf_rst_cnt); 1136 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1137 hdev->rst_stats.flr_rst_cnt); 1138 dev_info(&hdev->pdev->dev, "GLOBAL reset count: %u\n", 1139 hdev->rst_stats.global_rst_cnt); 1140 dev_info(&hdev->pdev->dev, "IMP reset count: %u\n", 1141 hdev->rst_stats.imp_rst_cnt); 1142 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1143 hdev->rst_stats.reset_done_cnt); 1144 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1145 hdev->rst_stats.hw_reset_done_cnt); 1146 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1147 hdev->rst_stats.reset_cnt); 1148 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1149 hdev->rst_stats.reset_fail_cnt); 1150 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1151 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE)); 1152 dev_info(&hdev->pdev->dev, "reset interrupt source: 0x%x\n", 1153 hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG)); 1154 dev_info(&hdev->pdev->dev, "reset interrupt status: 0x%x\n", 1155 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS)); 1156 dev_info(&hdev->pdev->dev, "hardware reset status: 0x%x\n", 1157 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); 1158 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1159 hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG)); 1160 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1161 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING)); 1162 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1163 } 1164 1165 static void hclge_dbg_dump_serv_info(struct hclge_dev *hdev) 1166 { 1167 dev_info(&hdev->pdev->dev, "last_serv_processed: %lu\n", 1168 hdev->last_serv_processed); 1169 dev_info(&hdev->pdev->dev, "last_serv_cnt: %lu\n", 1170 hdev->serv_processed_cnt); 1171 } 1172 1173 static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev) 1174 { 1175 struct hclge_desc *desc_src, *desc_tmp; 1176 struct hclge_get_m7_bd_cmd *req; 1177 struct hclge_desc desc; 1178 u32 bd_num, buf_len; 1179 int ret, i; 1180 1181 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_STATS_BD, true); 1182 1183 req = (struct hclge_get_m7_bd_cmd *)desc.data; 1184 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1185 if (ret) { 1186 dev_err(&hdev->pdev->dev, 1187 "get firmware statistics bd number failed, ret = %d\n", 1188 ret); 1189 return; 1190 } 1191 1192 bd_num = le32_to_cpu(req->bd_num); 1193 1194 buf_len = sizeof(struct hclge_desc) * bd_num; 1195 desc_src = kzalloc(buf_len, GFP_KERNEL); 1196 if (!desc_src) 1197 return; 1198 1199 desc_tmp = desc_src; 1200 ret = hclge_dbg_cmd_send(hdev, desc_tmp, 0, bd_num, 1201 HCLGE_OPC_M7_STATS_INFO); 1202 if (ret) { 1203 kfree(desc_src); 1204 dev_err(&hdev->pdev->dev, 1205 "get firmware statistics failed, ret = %d\n", ret); 1206 return; 1207 } 1208 1209 for (i = 0; i < bd_num; i++) { 1210 dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n", 1211 le32_to_cpu(desc_tmp->data[0]), 1212 le32_to_cpu(desc_tmp->data[1]), 1213 le32_to_cpu(desc_tmp->data[2])); 1214 dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n", 1215 le32_to_cpu(desc_tmp->data[3]), 1216 le32_to_cpu(desc_tmp->data[4]), 1217 le32_to_cpu(desc_tmp->data[5])); 1218 1219 desc_tmp++; 1220 } 1221 1222 kfree(desc_src); 1223 } 1224 1225 #define HCLGE_CMD_NCL_CONFIG_BD_NUM 5 1226 1227 static void hclge_ncl_config_data_print(struct hclge_dev *hdev, 1228 struct hclge_desc *desc, int *offset, 1229 int *length) 1230 { 1231 #define HCLGE_CMD_DATA_NUM 6 1232 1233 int i; 1234 int j; 1235 1236 for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) { 1237 for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) { 1238 if (i == 0 && j == 0) 1239 continue; 1240 1241 dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n", 1242 *offset, 1243 le32_to_cpu(desc[i].data[j])); 1244 *offset += sizeof(u32); 1245 *length -= sizeof(u32); 1246 if (*length <= 0) 1247 return; 1248 } 1249 } 1250 } 1251 1252 /* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file 1253 * @hdev: pointer to struct hclge_dev 1254 * @cmd_buf: string that contains offset and length 1255 */ 1256 static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, 1257 const char *cmd_buf) 1258 { 1259 #define HCLGE_MAX_NCL_CONFIG_OFFSET 4096 1260 #define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4) 1261 #define HCLGE_NCL_CONFIG_PARAM_NUM 2 1262 1263 struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM]; 1264 int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM; 1265 int offset; 1266 int length; 1267 int data0; 1268 int ret; 1269 1270 ret = sscanf(cmd_buf, "%x %x", &offset, &length); 1271 if (ret != HCLGE_NCL_CONFIG_PARAM_NUM) { 1272 dev_err(&hdev->pdev->dev, 1273 "Too few parameters, num = %d.\n", ret); 1274 return; 1275 } 1276 1277 if (offset < 0 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET || 1278 length <= 0 || length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) { 1279 dev_err(&hdev->pdev->dev, 1280 "Invalid input, offset = %d, length = %d.\n", 1281 offset, length); 1282 return; 1283 } 1284 1285 dev_info(&hdev->pdev->dev, "offset | data\n"); 1286 1287 while (length > 0) { 1288 data0 = offset; 1289 if (length >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD) 1290 data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16; 1291 else 1292 data0 |= length << 16; 1293 ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num, 1294 HCLGE_OPC_QUERY_NCL_CONFIG); 1295 if (ret) 1296 return; 1297 1298 hclge_ncl_config_data_print(hdev, desc, &offset, &length); 1299 } 1300 } 1301 1302 static void hclge_dbg_dump_loopback(struct hclge_dev *hdev, 1303 const char *cmd_buf) 1304 { 1305 struct phy_device *phydev = hdev->hw.mac.phydev; 1306 struct hclge_config_mac_mode_cmd *req_app; 1307 struct hclge_serdes_lb_cmd *req_serdes; 1308 struct hclge_desc desc; 1309 u8 loopback_en; 1310 int ret; 1311 1312 req_app = (struct hclge_config_mac_mode_cmd *)desc.data; 1313 req_serdes = (struct hclge_serdes_lb_cmd *)desc.data; 1314 1315 dev_info(&hdev->pdev->dev, "mac id: %u\n", hdev->hw.mac.mac_id); 1316 1317 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); 1318 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1319 if (ret) { 1320 dev_err(&hdev->pdev->dev, 1321 "failed to dump app loopback status, ret = %d\n", ret); 1322 return; 1323 } 1324 1325 loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en), 1326 HCLGE_MAC_APP_LP_B); 1327 dev_info(&hdev->pdev->dev, "app loopback: %s\n", 1328 loopback_en ? "on" : "off"); 1329 1330 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, true); 1331 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1332 if (ret) { 1333 dev_err(&hdev->pdev->dev, 1334 "failed to dump serdes loopback status, ret = %d\n", 1335 ret); 1336 return; 1337 } 1338 1339 loopback_en = req_serdes->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; 1340 dev_info(&hdev->pdev->dev, "serdes serial loopback: %s\n", 1341 loopback_en ? "on" : "off"); 1342 1343 loopback_en = req_serdes->enable & 1344 HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; 1345 dev_info(&hdev->pdev->dev, "serdes parallel loopback: %s\n", 1346 loopback_en ? "on" : "off"); 1347 1348 if (phydev) 1349 dev_info(&hdev->pdev->dev, "phy loopback: %s\n", 1350 phydev->loopback_enabled ? "on" : "off"); 1351 } 1352 1353 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt 1354 * @hdev: pointer to struct hclge_dev 1355 */ 1356 static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev) 1357 { 1358 #define HCLGE_BILLION_NANO_SECONDS 1000000000 1359 1360 struct hclge_mac_tnl_stats stats; 1361 unsigned long rem_nsec; 1362 1363 dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n"); 1364 1365 while (kfifo_get(&hdev->mac_tnl_log, &stats)) { 1366 rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS); 1367 dev_info(&hdev->pdev->dev, "[%07lu.%03lu] status = 0x%x\n", 1368 (unsigned long)stats.time, rem_nsec / 1000, 1369 stats.status); 1370 } 1371 } 1372 1373 static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid) 1374 { 1375 struct hclge_qs_shapping_cmd *shap_cfg_cmd; 1376 u8 ir_u, ir_b, ir_s, bs_b, bs_s; 1377 struct hclge_desc desc; 1378 u32 shapping_para; 1379 int ret; 1380 1381 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true); 1382 1383 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; 1384 shap_cfg_cmd->qs_id = cpu_to_le16(qsid); 1385 1386 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1387 if (ret) { 1388 dev_err(&hdev->pdev->dev, 1389 "qs%u failed to get tx_rate, ret=%d\n", 1390 qsid, ret); 1391 return; 1392 } 1393 1394 shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para); 1395 ir_b = hclge_tm_get_field(shapping_para, IR_B); 1396 ir_u = hclge_tm_get_field(shapping_para, IR_U); 1397 ir_s = hclge_tm_get_field(shapping_para, IR_S); 1398 bs_b = hclge_tm_get_field(shapping_para, BS_B); 1399 bs_s = hclge_tm_get_field(shapping_para, BS_S); 1400 1401 dev_info(&hdev->pdev->dev, 1402 "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u\n", 1403 qsid, ir_b, ir_u, ir_s, bs_b, bs_s); 1404 } 1405 1406 static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev) 1407 { 1408 struct hnae3_knic_private_info *kinfo; 1409 struct hclge_vport *vport; 1410 int vport_id, i; 1411 1412 for (vport_id = 0; vport_id <= pci_num_vf(hdev->pdev); vport_id++) { 1413 vport = &hdev->vport[vport_id]; 1414 kinfo = &vport->nic.kinfo; 1415 1416 dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id); 1417 1418 for (i = 0; i < kinfo->num_tc; i++) { 1419 u16 qsid = vport->qs_offset + i; 1420 1421 hclge_dbg_dump_qs_shaper_single(hdev, qsid); 1422 } 1423 } 1424 } 1425 1426 static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev, 1427 const char *cmd_buf) 1428 { 1429 #define HCLGE_MAX_QSET_NUM 1024 1430 1431 u16 qsid; 1432 int ret; 1433 1434 ret = kstrtou16(cmd_buf, 0, &qsid); 1435 if (ret) { 1436 hclge_dbg_dump_qs_shaper_all(hdev); 1437 return; 1438 } 1439 1440 if (qsid >= HCLGE_MAX_QSET_NUM) { 1441 dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-1023]\n", 1442 qsid); 1443 return; 1444 } 1445 1446 hclge_dbg_dump_qs_shaper_single(hdev, qsid); 1447 } 1448 1449 static int hclge_dbg_dump_mac_list(struct hclge_dev *hdev, const char *cmd_buf, 1450 bool is_unicast) 1451 { 1452 struct hclge_mac_node *mac_node, *tmp; 1453 struct hclge_vport *vport; 1454 struct list_head *list; 1455 u32 func_id; 1456 int ret; 1457 1458 ret = kstrtouint(cmd_buf, 0, &func_id); 1459 if (ret < 0) { 1460 dev_err(&hdev->pdev->dev, 1461 "dump mac list: bad command string, ret = %d\n", ret); 1462 return -EINVAL; 1463 } 1464 1465 if (func_id >= hdev->num_alloc_vport) { 1466 dev_err(&hdev->pdev->dev, 1467 "function id(%u) is out of range(0-%u)\n", func_id, 1468 hdev->num_alloc_vport - 1); 1469 return -EINVAL; 1470 } 1471 1472 vport = &hdev->vport[func_id]; 1473 1474 list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list; 1475 1476 dev_info(&hdev->pdev->dev, "vport %u %s mac list:\n", 1477 func_id, is_unicast ? "uc" : "mc"); 1478 dev_info(&hdev->pdev->dev, "mac address state\n"); 1479 1480 spin_lock_bh(&vport->mac_list_lock); 1481 1482 list_for_each_entry_safe(mac_node, tmp, list, node) { 1483 dev_info(&hdev->pdev->dev, "%pM %d\n", 1484 mac_node->mac_addr, mac_node->state); 1485 } 1486 1487 spin_unlock_bh(&vport->mac_list_lock); 1488 1489 return 0; 1490 } 1491 1492 int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf) 1493 { 1494 #define DUMP_REG "dump reg" 1495 #define DUMP_TM_MAP "dump tm map" 1496 #define DUMP_LOOPBACK "dump loopback" 1497 1498 struct hclge_vport *vport = hclge_get_vport(handle); 1499 struct hclge_dev *hdev = vport->back; 1500 1501 if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) { 1502 hclge_dbg_fd_tcam(hdev); 1503 } else if (strncmp(cmd_buf, "dump tc", 7) == 0) { 1504 hclge_dbg_dump_tc(hdev); 1505 } else if (strncmp(cmd_buf, DUMP_TM_MAP, strlen(DUMP_TM_MAP)) == 0) { 1506 hclge_dbg_dump_tm_map(hdev, &cmd_buf[sizeof(DUMP_TM_MAP)]); 1507 } else if (strncmp(cmd_buf, "dump tm", 7) == 0) { 1508 hclge_dbg_dump_tm(hdev); 1509 } else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) { 1510 hclge_dbg_dump_qos_pause_cfg(hdev); 1511 } else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) { 1512 hclge_dbg_dump_qos_pri_map(hdev); 1513 } else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) { 1514 hclge_dbg_dump_qos_buf_cfg(hdev); 1515 } else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) { 1516 hclge_dbg_dump_mng_table(hdev); 1517 } else if (strncmp(cmd_buf, DUMP_REG, strlen(DUMP_REG)) == 0) { 1518 hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]); 1519 } else if (strncmp(cmd_buf, "dump reset info", 15) == 0) { 1520 hclge_dbg_dump_rst_info(hdev); 1521 } else if (strncmp(cmd_buf, "dump serv info", 14) == 0) { 1522 hclge_dbg_dump_serv_info(hdev); 1523 } else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) { 1524 hclge_dbg_get_m7_stats_info(hdev); 1525 } else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) { 1526 hclge_dbg_dump_ncl_config(hdev, 1527 &cmd_buf[sizeof("dump ncl_config")]); 1528 } else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) { 1529 hclge_dbg_dump_mac_tnl_status(hdev); 1530 } else if (strncmp(cmd_buf, DUMP_LOOPBACK, 1531 strlen(DUMP_LOOPBACK)) == 0) { 1532 hclge_dbg_dump_loopback(hdev, &cmd_buf[sizeof(DUMP_LOOPBACK)]); 1533 } else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) { 1534 hclge_dbg_dump_qs_shaper(hdev, 1535 &cmd_buf[sizeof("dump qs shaper")]); 1536 } else if (strncmp(cmd_buf, "dump uc mac list", 16) == 0) { 1537 hclge_dbg_dump_mac_list(hdev, 1538 &cmd_buf[sizeof("dump uc mac list")], 1539 true); 1540 } else if (strncmp(cmd_buf, "dump mc mac list", 16) == 0) { 1541 hclge_dbg_dump_mac_list(hdev, 1542 &cmd_buf[sizeof("dump mc mac list")], 1543 false); 1544 } else { 1545 dev_info(&hdev->pdev->dev, "unknown command\n"); 1546 return -EINVAL; 1547 } 1548 1549 return 0; 1550 } 1551