1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Copyright (c) 2018-2019 Hisilicon Limited. */ 3 4 #include <linux/device.h> 5 6 #include "hclge_debugfs.h" 7 #include "hclge_main.h" 8 #include "hclge_tm.h" 9 #include "hnae3.h" 10 11 static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = { 12 { .reg_type = "bios common", 13 .dfx_msg = &hclge_dbg_bios_common_reg[0], 14 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg), 15 .offset = HCLGE_DBG_DFX_BIOS_OFFSET, 16 .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } }, 17 { .reg_type = "ssu", 18 .dfx_msg = &hclge_dbg_ssu_reg_0[0], 19 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0), 20 .offset = HCLGE_DBG_DFX_SSU_0_OFFSET, 21 .cmd = HCLGE_OPC_DFX_SSU_REG_0 } }, 22 { .reg_type = "ssu", 23 .dfx_msg = &hclge_dbg_ssu_reg_1[0], 24 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1), 25 .offset = HCLGE_DBG_DFX_SSU_1_OFFSET, 26 .cmd = HCLGE_OPC_DFX_SSU_REG_1 } }, 27 { .reg_type = "ssu", 28 .dfx_msg = &hclge_dbg_ssu_reg_2[0], 29 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2), 30 .offset = HCLGE_DBG_DFX_SSU_2_OFFSET, 31 .cmd = HCLGE_OPC_DFX_SSU_REG_2 } }, 32 { .reg_type = "igu egu", 33 .dfx_msg = &hclge_dbg_igu_egu_reg[0], 34 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg), 35 .offset = HCLGE_DBG_DFX_IGU_OFFSET, 36 .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } }, 37 { .reg_type = "rpu", 38 .dfx_msg = &hclge_dbg_rpu_reg_0[0], 39 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0), 40 .offset = HCLGE_DBG_DFX_RPU_0_OFFSET, 41 .cmd = HCLGE_OPC_DFX_RPU_REG_0 } }, 42 { .reg_type = "rpu", 43 .dfx_msg = &hclge_dbg_rpu_reg_1[0], 44 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1), 45 .offset = HCLGE_DBG_DFX_RPU_1_OFFSET, 46 .cmd = HCLGE_OPC_DFX_RPU_REG_1 } }, 47 { .reg_type = "ncsi", 48 .dfx_msg = &hclge_dbg_ncsi_reg[0], 49 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg), 50 .offset = HCLGE_DBG_DFX_NCSI_OFFSET, 51 .cmd = HCLGE_OPC_DFX_NCSI_REG } }, 52 { .reg_type = "rtc", 53 .dfx_msg = &hclge_dbg_rtc_reg[0], 54 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg), 55 .offset = HCLGE_DBG_DFX_RTC_OFFSET, 56 .cmd = HCLGE_OPC_DFX_RTC_REG } }, 57 { .reg_type = "ppp", 58 .dfx_msg = &hclge_dbg_ppp_reg[0], 59 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg), 60 .offset = HCLGE_DBG_DFX_PPP_OFFSET, 61 .cmd = HCLGE_OPC_DFX_PPP_REG } }, 62 { .reg_type = "rcb", 63 .dfx_msg = &hclge_dbg_rcb_reg[0], 64 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg), 65 .offset = HCLGE_DBG_DFX_RCB_OFFSET, 66 .cmd = HCLGE_OPC_DFX_RCB_REG } }, 67 { .reg_type = "tqp", 68 .dfx_msg = &hclge_dbg_tqp_reg[0], 69 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg), 70 .offset = HCLGE_DBG_DFX_TQP_OFFSET, 71 .cmd = HCLGE_OPC_DFX_TQP_REG } }, 72 }; 73 74 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset) 75 { 76 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT]; 77 int entries_per_desc; 78 int index; 79 int ret; 80 81 ret = hclge_query_bd_num_cmd_send(hdev, desc); 82 if (ret) { 83 dev_err(&hdev->pdev->dev, 84 "get dfx bdnum fail, ret = %d\n", ret); 85 return ret; 86 } 87 88 entries_per_desc = ARRAY_SIZE(desc[0].data); 89 index = offset % entries_per_desc; 90 return le32_to_cpu(desc[offset / entries_per_desc].data[index]); 91 } 92 93 static int hclge_dbg_cmd_send(struct hclge_dev *hdev, 94 struct hclge_desc *desc_src, 95 int index, int bd_num, 96 enum hclge_opcode_type cmd) 97 { 98 struct hclge_desc *desc = desc_src; 99 int ret, i; 100 101 hclge_cmd_setup_basic_desc(desc, cmd, true); 102 desc->data[0] = cpu_to_le32(index); 103 104 for (i = 1; i < bd_num; i++) { 105 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 106 desc++; 107 hclge_cmd_setup_basic_desc(desc, cmd, true); 108 } 109 110 ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num); 111 if (ret) 112 dev_err(&hdev->pdev->dev, 113 "cmd(0x%x) send fail, ret = %d\n", cmd, ret); 114 return ret; 115 } 116 117 static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, 118 const struct hclge_dbg_reg_type_info *reg_info, 119 const char *cmd_buf) 120 { 121 #define IDX_OFFSET 1 122 123 const char *s = &cmd_buf[strlen(reg_info->reg_type) + IDX_OFFSET]; 124 const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg; 125 const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg; 126 struct hclge_desc *desc_src; 127 struct hclge_desc *desc; 128 int entries_per_desc; 129 int bd_num, buf_len; 130 int index = 0; 131 int min_num; 132 int ret, i; 133 134 if (*s) { 135 ret = kstrtouint(s, 0, &index); 136 index = (ret != 0) ? 0 : index; 137 } 138 139 bd_num = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset); 140 if (bd_num <= 0) { 141 dev_err(&hdev->pdev->dev, "get cmd(%d) bd num(%d) failed\n", 142 reg_msg->offset, bd_num); 143 return; 144 } 145 146 buf_len = sizeof(struct hclge_desc) * bd_num; 147 desc_src = kzalloc(buf_len, GFP_KERNEL); 148 if (!desc_src) 149 return; 150 151 desc = desc_src; 152 ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd); 153 if (ret) { 154 kfree(desc_src); 155 return; 156 } 157 158 entries_per_desc = ARRAY_SIZE(desc->data); 159 min_num = min_t(int, bd_num * entries_per_desc, reg_msg->msg_num); 160 161 desc = desc_src; 162 for (i = 0; i < min_num; i++) { 163 if (i > 0 && (i % entries_per_desc) == 0) 164 desc++; 165 if (dfx_message->flag) 166 dev_info(&hdev->pdev->dev, "%s: 0x%x\n", 167 dfx_message->message, 168 le32_to_cpu(desc->data[i % entries_per_desc])); 169 170 dfx_message++; 171 } 172 173 kfree(desc_src); 174 } 175 176 static void hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev) 177 { 178 struct hclge_config_mac_mode_cmd *req; 179 struct hclge_desc desc; 180 u32 loop_en; 181 int ret; 182 183 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); 184 185 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 186 if (ret) { 187 dev_err(&hdev->pdev->dev, 188 "failed to dump mac enable status, ret = %d\n", ret); 189 return; 190 } 191 192 req = (struct hclge_config_mac_mode_cmd *)desc.data; 193 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); 194 195 dev_info(&hdev->pdev->dev, "config_mac_trans_en: %#x\n", 196 hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B)); 197 dev_info(&hdev->pdev->dev, "config_mac_rcv_en: %#x\n", 198 hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B)); 199 dev_info(&hdev->pdev->dev, "config_pad_trans_en: %#x\n", 200 hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B)); 201 dev_info(&hdev->pdev->dev, "config_pad_rcv_en: %#x\n", 202 hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B)); 203 dev_info(&hdev->pdev->dev, "config_1588_trans_en: %#x\n", 204 hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B)); 205 dev_info(&hdev->pdev->dev, "config_1588_rcv_en: %#x\n", 206 hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B)); 207 dev_info(&hdev->pdev->dev, "config_mac_app_loop_en: %#x\n", 208 hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B)); 209 dev_info(&hdev->pdev->dev, "config_mac_line_loop_en: %#x\n", 210 hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B)); 211 dev_info(&hdev->pdev->dev, "config_mac_fcs_tx_en: %#x\n", 212 hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B)); 213 dev_info(&hdev->pdev->dev, "config_mac_rx_oversize_truncate_en: %#x\n", 214 hnae3_get_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B)); 215 dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_strip_en: %#x\n", 216 hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B)); 217 dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_en: %#x\n", 218 hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B)); 219 dev_info(&hdev->pdev->dev, "config_mac_tx_under_min_err_en: %#x\n", 220 hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B)); 221 dev_info(&hdev->pdev->dev, "config_mac_tx_oversize_truncate_en: %#x\n", 222 hnae3_get_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B)); 223 } 224 225 static void hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev) 226 { 227 struct hclge_config_max_frm_size_cmd *req; 228 struct hclge_desc desc; 229 int ret; 230 231 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true); 232 233 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 234 if (ret) { 235 dev_err(&hdev->pdev->dev, 236 "failed to dump mac frame size, ret = %d\n", ret); 237 return; 238 } 239 240 req = (struct hclge_config_max_frm_size_cmd *)desc.data; 241 242 dev_info(&hdev->pdev->dev, "max_frame_size: %u\n", 243 le16_to_cpu(req->max_frm_size)); 244 dev_info(&hdev->pdev->dev, "min_frame_size: %u\n", req->min_frm_size); 245 } 246 247 static void hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev) 248 { 249 #define HCLGE_MAC_SPEED_SHIFT 0 250 #define HCLGE_MAC_SPEED_MASK GENMASK(5, 0) 251 #define HCLGE_MAC_DUPLEX_SHIFT 7 252 253 struct hclge_config_mac_speed_dup_cmd *req; 254 struct hclge_desc desc; 255 int ret; 256 257 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true); 258 259 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 260 if (ret) { 261 dev_err(&hdev->pdev->dev, 262 "failed to dump mac speed duplex, ret = %d\n", ret); 263 return; 264 } 265 266 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; 267 268 dev_info(&hdev->pdev->dev, "speed: %#lx\n", 269 hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK, 270 HCLGE_MAC_SPEED_SHIFT)); 271 dev_info(&hdev->pdev->dev, "duplex: %#x\n", 272 hnae3_get_bit(req->speed_dup, HCLGE_MAC_DUPLEX_SHIFT)); 273 } 274 275 static void hclge_dbg_dump_mac(struct hclge_dev *hdev) 276 { 277 hclge_dbg_dump_mac_enable_status(hdev); 278 279 hclge_dbg_dump_mac_frame_size(hdev); 280 281 hclge_dbg_dump_mac_speed_duplex(hdev); 282 } 283 284 static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf) 285 { 286 struct device *dev = &hdev->pdev->dev; 287 struct hclge_dbg_bitmap_cmd *bitmap; 288 enum hclge_opcode_type cmd; 289 int rq_id, pri_id, qset_id; 290 int port_id, nq_id, pg_id; 291 struct hclge_desc desc[2]; 292 293 int cnt, ret; 294 295 cnt = sscanf(cmd_buf, "%i %i %i %i %i %i", 296 &port_id, &pri_id, &pg_id, &rq_id, &nq_id, &qset_id); 297 if (cnt != 6) { 298 dev_err(&hdev->pdev->dev, 299 "dump dcb: bad command parameter, cnt=%d\n", cnt); 300 return; 301 } 302 303 cmd = HCLGE_OPC_QSET_DFX_STS; 304 ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1, cmd); 305 if (ret) 306 goto err_dcb_cmd_send; 307 308 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; 309 dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0); 310 dev_info(dev, "nic_qs_mask: 0x%x\n", bitmap->bit1); 311 dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2); 312 dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3); 313 314 cmd = HCLGE_OPC_PRI_DFX_STS; 315 ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, cmd); 316 if (ret) 317 goto err_dcb_cmd_send; 318 319 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; 320 dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0); 321 dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1); 322 dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2); 323 324 cmd = HCLGE_OPC_PG_DFX_STS; 325 ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, cmd); 326 if (ret) 327 goto err_dcb_cmd_send; 328 329 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; 330 dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0); 331 dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1); 332 dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2); 333 334 cmd = HCLGE_OPC_PORT_DFX_STS; 335 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd); 336 if (ret) 337 goto err_dcb_cmd_send; 338 339 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; 340 dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0); 341 dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1); 342 343 cmd = HCLGE_OPC_SCH_NQ_CNT; 344 ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd); 345 if (ret) 346 goto err_dcb_cmd_send; 347 348 dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1])); 349 350 cmd = HCLGE_OPC_SCH_RQ_CNT; 351 ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd); 352 if (ret) 353 goto err_dcb_cmd_send; 354 355 dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1])); 356 357 cmd = HCLGE_OPC_TM_INTERNAL_STS; 358 ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, cmd); 359 if (ret) 360 goto err_dcb_cmd_send; 361 362 dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1])); 363 dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2])); 364 dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n", 365 le32_to_cpu(desc[0].data[3])); 366 dev_info(dev, "tx_private_waterline: 0x%x\n", 367 le32_to_cpu(desc[0].data[4])); 368 dev_info(dev, "tm_bypass_en: 0x%x\n", le32_to_cpu(desc[0].data[5])); 369 dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0])); 370 dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1])); 371 372 cmd = HCLGE_OPC_TM_INTERNAL_CNT; 373 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd); 374 if (ret) 375 goto err_dcb_cmd_send; 376 377 dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1])); 378 dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2])); 379 380 cmd = HCLGE_OPC_TM_INTERNAL_STS_1; 381 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd); 382 if (ret) 383 goto err_dcb_cmd_send; 384 385 dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1])); 386 dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2])); 387 dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[3])); 388 dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n", 389 le32_to_cpu(desc[0].data[4])); 390 dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", 391 le32_to_cpu(desc[0].data[5])); 392 return; 393 394 err_dcb_cmd_send: 395 dev_err(&hdev->pdev->dev, 396 "failed to dump dcb dfx, cmd = %#x, ret = %d\n", 397 cmd, ret); 398 } 399 400 static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf) 401 { 402 const struct hclge_dbg_reg_type_info *reg_info; 403 bool has_dump = false; 404 int i; 405 406 for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) { 407 reg_info = &hclge_dbg_reg_info[i]; 408 if (!strncmp(cmd_buf, reg_info->reg_type, 409 strlen(reg_info->reg_type))) { 410 hclge_dbg_dump_reg_common(hdev, reg_info, cmd_buf); 411 has_dump = true; 412 } 413 } 414 415 if (strncmp(cmd_buf, "mac", strlen("mac")) == 0) { 416 hclge_dbg_dump_mac(hdev); 417 has_dump = true; 418 } 419 420 if (strncmp(cmd_buf, "dcb", 3) == 0) { 421 hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]); 422 has_dump = true; 423 } 424 425 if (!has_dump) { 426 dev_info(&hdev->pdev->dev, "unknown command\n"); 427 return; 428 } 429 } 430 431 static void hclge_print_tc_info(struct hclge_dev *hdev, bool flag, int index) 432 { 433 if (flag) 434 dev_info(&hdev->pdev->dev, "tc(%d): no sp mode weight: %u\n", 435 index, hdev->tm_info.pg_info[0].tc_dwrr[index]); 436 else 437 dev_info(&hdev->pdev->dev, "tc(%d): sp mode\n", index); 438 } 439 440 static void hclge_dbg_dump_tc(struct hclge_dev *hdev) 441 { 442 struct hclge_ets_tc_weight_cmd *ets_weight; 443 struct hclge_desc desc; 444 int i, ret; 445 446 if (!hnae3_dev_dcb_supported(hdev)) { 447 dev_info(&hdev->pdev->dev, 448 "Only DCB-supported dev supports tc\n"); 449 return; 450 } 451 452 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true); 453 454 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 455 if (ret) { 456 dev_err(&hdev->pdev->dev, "dump tc fail, ret = %d\n", ret); 457 return; 458 } 459 460 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; 461 462 dev_info(&hdev->pdev->dev, "dump tc: %u tc enabled\n", 463 hdev->tm_info.num_tc); 464 dev_info(&hdev->pdev->dev, "weight_offset: %u\n", 465 ets_weight->weight_offset); 466 467 for (i = 0; i < HNAE3_MAX_TC; i++) 468 hclge_print_tc_info(hdev, ets_weight->tc_weight[i], i); 469 } 470 471 static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) 472 { 473 struct hclge_port_shapping_cmd *port_shap_cfg_cmd; 474 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; 475 struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd; 476 enum hclge_opcode_type cmd; 477 struct hclge_desc desc; 478 int ret; 479 480 cmd = HCLGE_OPC_TM_PG_C_SHAPPING; 481 hclge_cmd_setup_basic_desc(&desc, cmd, true); 482 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 483 if (ret) 484 goto err_tm_pg_cmd_send; 485 486 pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; 487 dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id); 488 dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n", 489 le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para)); 490 491 cmd = HCLGE_OPC_TM_PG_P_SHAPPING; 492 hclge_cmd_setup_basic_desc(&desc, cmd, true); 493 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 494 if (ret) 495 goto err_tm_pg_cmd_send; 496 497 pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; 498 dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id); 499 dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n", 500 le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para)); 501 dev_info(&hdev->pdev->dev, "PG_P flag: %#x\n", pg_shap_cfg_cmd->flag); 502 dev_info(&hdev->pdev->dev, "PG_P pg_rate: %u(Mbps)\n", 503 le32_to_cpu(pg_shap_cfg_cmd->pg_rate)); 504 505 cmd = HCLGE_OPC_TM_PORT_SHAPPING; 506 hclge_cmd_setup_basic_desc(&desc, cmd, true); 507 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 508 if (ret) 509 goto err_tm_pg_cmd_send; 510 511 port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; 512 dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n", 513 le32_to_cpu(port_shap_cfg_cmd->port_shapping_para)); 514 dev_info(&hdev->pdev->dev, "PORT flag: %#x\n", port_shap_cfg_cmd->flag); 515 dev_info(&hdev->pdev->dev, "PORT port_rate: %u(Mbps)\n", 516 le32_to_cpu(port_shap_cfg_cmd->port_rate)); 517 518 cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG; 519 hclge_cmd_setup_basic_desc(&desc, cmd, true); 520 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 521 if (ret) 522 goto err_tm_pg_cmd_send; 523 524 dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", 525 le32_to_cpu(desc.data[0])); 526 527 cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG; 528 hclge_cmd_setup_basic_desc(&desc, cmd, true); 529 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 530 if (ret) 531 goto err_tm_pg_cmd_send; 532 533 dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n", 534 le32_to_cpu(desc.data[0])); 535 536 cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG; 537 hclge_cmd_setup_basic_desc(&desc, cmd, true); 538 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 539 if (ret) 540 goto err_tm_pg_cmd_send; 541 542 dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", 543 le32_to_cpu(desc.data[0])); 544 545 if (!hnae3_dev_dcb_supported(hdev)) { 546 dev_info(&hdev->pdev->dev, 547 "Only DCB-supported dev supports tm mapping\n"); 548 return; 549 } 550 551 cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; 552 hclge_cmd_setup_basic_desc(&desc, cmd, true); 553 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 554 if (ret) 555 goto err_tm_pg_cmd_send; 556 557 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; 558 dev_info(&hdev->pdev->dev, "BP_TO_QSET tc_id: %u\n", 559 bp_to_qs_map_cmd->tc_id); 560 dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n", 561 bp_to_qs_map_cmd->qs_group_id); 562 dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n", 563 le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map)); 564 return; 565 566 err_tm_pg_cmd_send: 567 dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), ret = %d\n", 568 cmd, ret); 569 } 570 571 static void hclge_dbg_dump_tm(struct hclge_dev *hdev) 572 { 573 struct hclge_priority_weight_cmd *priority_weight; 574 struct hclge_pg_to_pri_link_cmd *pg_to_pri_map; 575 struct hclge_qs_to_pri_link_cmd *qs_to_pri_map; 576 struct hclge_nq_to_qs_link_cmd *nq_to_qs_map; 577 struct hclge_pri_shapping_cmd *shap_cfg_cmd; 578 struct hclge_pg_weight_cmd *pg_weight; 579 struct hclge_qs_weight_cmd *qs_weight; 580 enum hclge_opcode_type cmd; 581 struct hclge_desc desc; 582 int ret; 583 584 cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK; 585 hclge_cmd_setup_basic_desc(&desc, cmd, true); 586 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 587 if (ret) 588 goto err_tm_cmd_send; 589 590 pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data; 591 dev_info(&hdev->pdev->dev, "dump tm\n"); 592 dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n", 593 pg_to_pri_map->pg_id); 594 dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n", 595 pg_to_pri_map->pri_bit_map); 596 597 cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK; 598 hclge_cmd_setup_basic_desc(&desc, cmd, true); 599 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 600 if (ret) 601 goto err_tm_cmd_send; 602 603 qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 604 dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n", 605 le16_to_cpu(qs_to_pri_map->qs_id)); 606 dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n", 607 qs_to_pri_map->priority); 608 dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n", 609 qs_to_pri_map->link_vld); 610 611 cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK; 612 hclge_cmd_setup_basic_desc(&desc, cmd, true); 613 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 614 if (ret) 615 goto err_tm_cmd_send; 616 617 nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data; 618 dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", 619 le16_to_cpu(nq_to_qs_map->nq_id)); 620 dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n", 621 le16_to_cpu(nq_to_qs_map->qset_id)); 622 623 cmd = HCLGE_OPC_TM_PG_WEIGHT; 624 hclge_cmd_setup_basic_desc(&desc, cmd, true); 625 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 626 if (ret) 627 goto err_tm_cmd_send; 628 629 pg_weight = (struct hclge_pg_weight_cmd *)desc.data; 630 dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id); 631 dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr); 632 633 cmd = HCLGE_OPC_TM_QS_WEIGHT; 634 hclge_cmd_setup_basic_desc(&desc, cmd, true); 635 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 636 if (ret) 637 goto err_tm_cmd_send; 638 639 qs_weight = (struct hclge_qs_weight_cmd *)desc.data; 640 dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", 641 le16_to_cpu(qs_weight->qs_id)); 642 dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr); 643 644 cmd = HCLGE_OPC_TM_PRI_WEIGHT; 645 hclge_cmd_setup_basic_desc(&desc, cmd, true); 646 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 647 if (ret) 648 goto err_tm_cmd_send; 649 650 priority_weight = (struct hclge_priority_weight_cmd *)desc.data; 651 dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id); 652 dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr); 653 654 cmd = HCLGE_OPC_TM_PRI_C_SHAPPING; 655 hclge_cmd_setup_basic_desc(&desc, cmd, true); 656 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 657 if (ret) 658 goto err_tm_cmd_send; 659 660 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 661 dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id); 662 dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n", 663 le32_to_cpu(shap_cfg_cmd->pri_shapping_para)); 664 dev_info(&hdev->pdev->dev, "PRI_C flag: %#x\n", shap_cfg_cmd->flag); 665 dev_info(&hdev->pdev->dev, "PRI_C pri_rate: %u(Mbps)\n", 666 le32_to_cpu(shap_cfg_cmd->pri_rate)); 667 668 cmd = HCLGE_OPC_TM_PRI_P_SHAPPING; 669 hclge_cmd_setup_basic_desc(&desc, cmd, true); 670 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 671 if (ret) 672 goto err_tm_cmd_send; 673 674 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 675 dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id); 676 dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n", 677 le32_to_cpu(shap_cfg_cmd->pri_shapping_para)); 678 dev_info(&hdev->pdev->dev, "PRI_P flag: %#x\n", shap_cfg_cmd->flag); 679 dev_info(&hdev->pdev->dev, "PRI_P pri_rate: %u(Mbps)\n", 680 le32_to_cpu(shap_cfg_cmd->pri_rate)); 681 682 hclge_dbg_dump_tm_pg(hdev); 683 684 return; 685 686 err_tm_cmd_send: 687 dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), ret = %d\n", 688 cmd, ret); 689 } 690 691 static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, 692 const char *cmd_buf) 693 { 694 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; 695 struct hclge_nq_to_qs_link_cmd *nq_to_qs_map; 696 u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM]; 697 struct hclge_qs_to_pri_link_cmd *map; 698 struct hclge_tqp_tx_queue_tc_cmd *tc; 699 u16 group_id, queue_id, qset_id; 700 enum hclge_opcode_type cmd; 701 u8 grp_num, pri_id, tc_id; 702 struct hclge_desc desc; 703 u16 qs_id_l; 704 u16 qs_id_h; 705 int ret; 706 u32 i; 707 708 ret = kstrtou16(cmd_buf, 0, &queue_id); 709 queue_id = (ret != 0) ? 0 : queue_id; 710 711 cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK; 712 nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data; 713 hclge_cmd_setup_basic_desc(&desc, cmd, true); 714 nq_to_qs_map->nq_id = cpu_to_le16(queue_id); 715 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 716 if (ret) 717 goto err_tm_map_cmd_send; 718 qset_id = le16_to_cpu(nq_to_qs_map->qset_id); 719 720 /* convert qset_id to the following format, drop the vld bit 721 * | qs_id_h | vld | qs_id_l | 722 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 | 723 * \ \ / / 724 * \ \ / / 725 * qset_id: | 15 | 14 ~ 10 | 9 ~ 0 | 726 */ 727 qs_id_l = hnae3_get_field(qset_id, HCLGE_TM_QS_ID_L_MSK, 728 HCLGE_TM_QS_ID_L_S); 729 qs_id_h = hnae3_get_field(qset_id, HCLGE_TM_QS_ID_H_EXT_MSK, 730 HCLGE_TM_QS_ID_H_EXT_S); 731 qset_id = 0; 732 hnae3_set_field(qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S, 733 qs_id_l); 734 hnae3_set_field(qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S, 735 qs_id_h); 736 737 cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK; 738 map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 739 hclge_cmd_setup_basic_desc(&desc, cmd, true); 740 map->qs_id = cpu_to_le16(qset_id); 741 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 742 if (ret) 743 goto err_tm_map_cmd_send; 744 pri_id = map->priority; 745 746 cmd = HCLGE_OPC_TQP_TX_QUEUE_TC; 747 tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data; 748 hclge_cmd_setup_basic_desc(&desc, cmd, true); 749 tc->queue_id = cpu_to_le16(queue_id); 750 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 751 if (ret) 752 goto err_tm_map_cmd_send; 753 tc_id = tc->tc_id & 0x7; 754 755 dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n"); 756 dev_info(&hdev->pdev->dev, "%04u | %04u | %02u | %02u\n", 757 queue_id, qset_id, pri_id, tc_id); 758 759 if (!hnae3_dev_dcb_supported(hdev)) { 760 dev_info(&hdev->pdev->dev, 761 "Only DCB-supported dev supports tm mapping\n"); 762 return; 763 } 764 765 grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ? 766 HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM; 767 cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; 768 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; 769 for (group_id = 0; group_id < grp_num; group_id++) { 770 hclge_cmd_setup_basic_desc(&desc, cmd, true); 771 bp_to_qs_map_cmd->tc_id = tc_id; 772 bp_to_qs_map_cmd->qs_group_id = group_id; 773 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 774 if (ret) 775 goto err_tm_map_cmd_send; 776 777 qset_mapping[group_id] = 778 le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map); 779 } 780 781 dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n"); 782 783 i = 0; 784 for (group_id = 0; group_id < grp_num / 8; group_id++) { 785 dev_info(&hdev->pdev->dev, 786 "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n", 787 group_id * 256, qset_mapping[(u32)(i + 7)], 788 qset_mapping[(u32)(i + 6)], qset_mapping[(u32)(i + 5)], 789 qset_mapping[(u32)(i + 4)], qset_mapping[(u32)(i + 3)], 790 qset_mapping[(u32)(i + 2)], qset_mapping[(u32)(i + 1)], 791 qset_mapping[i]); 792 i += 8; 793 } 794 795 return; 796 797 err_tm_map_cmd_send: 798 dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), ret = %d\n", 799 cmd, ret); 800 } 801 802 static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len) 803 { 804 struct hclge_tm_nodes_cmd *nodes; 805 struct hclge_desc desc; 806 int pos = 0; 807 int ret; 808 809 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); 810 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 811 if (ret) { 812 dev_err(&hdev->pdev->dev, 813 "failed to dump tm nodes, ret = %d\n", ret); 814 return ret; 815 } 816 817 nodes = (struct hclge_tm_nodes_cmd *)desc.data; 818 819 pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n"); 820 pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n", 821 nodes->pg_base_id, nodes->pg_num); 822 pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n", 823 nodes->pri_base_id, nodes->pri_num); 824 pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n", 825 le16_to_cpu(nodes->qset_base_id), 826 le16_to_cpu(nodes->qset_num)); 827 pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n", 828 le16_to_cpu(nodes->queue_base_id), 829 le16_to_cpu(nodes->queue_num)); 830 831 return 0; 832 } 833 834 static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len) 835 { 836 struct hclge_pri_shaper_para c_shaper_para; 837 struct hclge_pri_shaper_para p_shaper_para; 838 u8 pri_num, sch_mode, weight; 839 char *sch_mode_str; 840 int pos = 0; 841 int ret; 842 u8 i; 843 844 ret = hclge_tm_get_pri_num(hdev, &pri_num); 845 if (ret) 846 return ret; 847 848 pos += scnprintf(buf + pos, len - pos, 849 "ID MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B "); 850 pos += scnprintf(buf + pos, len - pos, 851 "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U "); 852 pos += scnprintf(buf + pos, len - pos, 853 "P_IR_S P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n"); 854 855 for (i = 0; i < pri_num; i++) { 856 ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode); 857 if (ret) 858 return ret; 859 860 ret = hclge_tm_get_pri_weight(hdev, i, &weight); 861 if (ret) 862 return ret; 863 864 ret = hclge_tm_get_pri_shaper(hdev, i, 865 HCLGE_OPC_TM_PRI_C_SHAPPING, 866 &c_shaper_para); 867 if (ret) 868 return ret; 869 870 ret = hclge_tm_get_pri_shaper(hdev, i, 871 HCLGE_OPC_TM_PRI_P_SHAPPING, 872 &p_shaper_para); 873 if (ret) 874 return ret; 875 876 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" : 877 "sp"; 878 879 pos += scnprintf(buf + pos, len - pos, 880 "%04u %4s %3u %3u %3u %3u ", 881 i, sch_mode_str, weight, c_shaper_para.ir_b, 882 c_shaper_para.ir_u, c_shaper_para.ir_s); 883 pos += scnprintf(buf + pos, len - pos, 884 "%3u %3u %1u %6u ", 885 c_shaper_para.bs_b, c_shaper_para.bs_s, 886 c_shaper_para.flag, c_shaper_para.rate); 887 pos += scnprintf(buf + pos, len - pos, 888 "%3u %3u %3u %3u %3u ", 889 p_shaper_para.ir_b, p_shaper_para.ir_u, 890 p_shaper_para.ir_s, p_shaper_para.bs_b, 891 p_shaper_para.bs_s); 892 pos += scnprintf(buf + pos, len - pos, "%1u %6u\n", 893 p_shaper_para.flag, p_shaper_para.rate); 894 } 895 896 return 0; 897 } 898 899 static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len) 900 { 901 u8 priority, link_vld, sch_mode, weight; 902 char *sch_mode_str; 903 int ret, pos; 904 u16 qset_num; 905 u16 i; 906 907 ret = hclge_tm_get_qset_num(hdev, &qset_num); 908 if (ret) 909 return ret; 910 911 pos = scnprintf(buf, len, "ID MAP_PRI LINK_VLD MODE DWRR\n"); 912 913 for (i = 0; i < qset_num; i++) { 914 ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld); 915 if (ret) 916 return ret; 917 918 ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode); 919 if (ret) 920 return ret; 921 922 ret = hclge_tm_get_qset_weight(hdev, i, &weight); 923 if (ret) 924 return ret; 925 926 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" : 927 "sp"; 928 pos += scnprintf(buf + pos, len - pos, 929 "%04u %4u %1u %4s %3u\n", 930 i, priority, link_vld, sch_mode_str, weight); 931 } 932 933 return 0; 934 } 935 936 static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev) 937 { 938 struct hclge_cfg_pause_param_cmd *pause_param; 939 struct hclge_desc desc; 940 int ret; 941 942 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); 943 944 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 945 if (ret) { 946 dev_err(&hdev->pdev->dev, "dump checksum fail, ret = %d\n", 947 ret); 948 return; 949 } 950 951 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; 952 dev_info(&hdev->pdev->dev, "dump qos pause cfg\n"); 953 dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n", 954 pause_param->pause_trans_gap); 955 dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n", 956 le16_to_cpu(pause_param->pause_trans_time)); 957 } 958 959 static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev) 960 { 961 struct hclge_qos_pri_map_cmd *pri_map; 962 struct hclge_desc desc; 963 int ret; 964 965 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true); 966 967 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 968 if (ret) { 969 dev_err(&hdev->pdev->dev, 970 "dump qos pri map fail, ret = %d\n", ret); 971 return; 972 } 973 974 pri_map = (struct hclge_qos_pri_map_cmd *)desc.data; 975 dev_info(&hdev->pdev->dev, "dump qos pri map\n"); 976 dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri); 977 dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc); 978 dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc); 979 dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc); 980 dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc); 981 dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc); 982 dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc); 983 dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc); 984 dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc); 985 } 986 987 static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev) 988 { 989 struct hclge_tx_buff_alloc_cmd *tx_buf_cmd; 990 struct hclge_desc desc; 991 int i, ret; 992 993 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true); 994 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 995 if (ret) 996 return ret; 997 998 dev_info(&hdev->pdev->dev, "dump qos buf cfg\n"); 999 tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data; 1000 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1001 dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i, 1002 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i])); 1003 1004 return 0; 1005 } 1006 1007 static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev) 1008 { 1009 struct hclge_rx_priv_buff_cmd *rx_buf_cmd; 1010 struct hclge_desc desc; 1011 int i, ret; 1012 1013 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true); 1014 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1015 if (ret) 1016 return ret; 1017 1018 dev_info(&hdev->pdev->dev, "\n"); 1019 rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data; 1020 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1021 dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i, 1022 le16_to_cpu(rx_buf_cmd->buf_num[i])); 1023 1024 dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n", 1025 le16_to_cpu(rx_buf_cmd->shared_buf)); 1026 1027 return 0; 1028 } 1029 1030 static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev) 1031 { 1032 struct hclge_rx_com_wl *rx_com_wl; 1033 struct hclge_desc desc; 1034 int ret; 1035 1036 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true); 1037 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1038 if (ret) 1039 return ret; 1040 1041 rx_com_wl = (struct hclge_rx_com_wl *)desc.data; 1042 dev_info(&hdev->pdev->dev, "\n"); 1043 dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n", 1044 le16_to_cpu(rx_com_wl->com_wl.high), 1045 le16_to_cpu(rx_com_wl->com_wl.low)); 1046 1047 return 0; 1048 } 1049 1050 static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev) 1051 { 1052 struct hclge_rx_com_wl *rx_packet_cnt; 1053 struct hclge_desc desc; 1054 int ret; 1055 1056 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true); 1057 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1058 if (ret) 1059 return ret; 1060 1061 rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data; 1062 dev_info(&hdev->pdev->dev, 1063 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n", 1064 le16_to_cpu(rx_packet_cnt->com_wl.high), 1065 le16_to_cpu(rx_packet_cnt->com_wl.low)); 1066 1067 return 0; 1068 } 1069 1070 static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev) 1071 { 1072 struct hclge_rx_priv_wl_buf *rx_priv_wl; 1073 struct hclge_desc desc[2]; 1074 int i, ret; 1075 1076 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true); 1077 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1078 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true); 1079 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1080 if (ret) 1081 return ret; 1082 1083 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data; 1084 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 1085 dev_info(&hdev->pdev->dev, 1086 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i, 1087 le16_to_cpu(rx_priv_wl->tc_wl[i].high), 1088 le16_to_cpu(rx_priv_wl->tc_wl[i].low)); 1089 1090 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data; 1091 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 1092 dev_info(&hdev->pdev->dev, 1093 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", 1094 i + HCLGE_TC_NUM_ONE_DESC, 1095 le16_to_cpu(rx_priv_wl->tc_wl[i].high), 1096 le16_to_cpu(rx_priv_wl->tc_wl[i].low)); 1097 1098 return 0; 1099 } 1100 1101 static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev) 1102 { 1103 struct hclge_rx_com_thrd *rx_com_thrd; 1104 struct hclge_desc desc[2]; 1105 int i, ret; 1106 1107 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true); 1108 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1109 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true); 1110 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1111 if (ret) 1112 return ret; 1113 1114 dev_info(&hdev->pdev->dev, "\n"); 1115 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data; 1116 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 1117 dev_info(&hdev->pdev->dev, 1118 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i, 1119 le16_to_cpu(rx_com_thrd->com_thrd[i].high), 1120 le16_to_cpu(rx_com_thrd->com_thrd[i].low)); 1121 1122 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data; 1123 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 1124 dev_info(&hdev->pdev->dev, 1125 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", 1126 i + HCLGE_TC_NUM_ONE_DESC, 1127 le16_to_cpu(rx_com_thrd->com_thrd[i].high), 1128 le16_to_cpu(rx_com_thrd->com_thrd[i].low)); 1129 1130 return 0; 1131 } 1132 1133 static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) 1134 { 1135 enum hclge_opcode_type cmd; 1136 int ret; 1137 1138 cmd = HCLGE_OPC_TX_BUFF_ALLOC; 1139 ret = hclge_dbg_dump_tx_buf_cfg(hdev); 1140 if (ret) 1141 goto err_qos_cmd_send; 1142 1143 cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC; 1144 ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev); 1145 if (ret) 1146 goto err_qos_cmd_send; 1147 1148 cmd = HCLGE_OPC_RX_COM_WL_ALLOC; 1149 ret = hclge_dbg_dump_rx_common_wl_cfg(hdev); 1150 if (ret) 1151 goto err_qos_cmd_send; 1152 1153 cmd = HCLGE_OPC_RX_GBL_PKT_CNT; 1154 ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev); 1155 if (ret) 1156 goto err_qos_cmd_send; 1157 1158 dev_info(&hdev->pdev->dev, "\n"); 1159 if (!hnae3_dev_dcb_supported(hdev)) { 1160 dev_info(&hdev->pdev->dev, 1161 "Only DCB-supported dev supports rx priv wl\n"); 1162 return; 1163 } 1164 1165 cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC; 1166 ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev); 1167 if (ret) 1168 goto err_qos_cmd_send; 1169 1170 cmd = HCLGE_OPC_RX_COM_THRD_ALLOC; 1171 ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev); 1172 if (ret) 1173 goto err_qos_cmd_send; 1174 1175 return; 1176 1177 err_qos_cmd_send: 1178 dev_err(&hdev->pdev->dev, 1179 "dump qos buf cfg fail(0x%x), ret = %d\n", cmd, ret); 1180 } 1181 1182 static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev) 1183 { 1184 struct hclge_mac_ethertype_idx_rd_cmd *req0; 1185 char printf_buf[HCLGE_DBG_BUF_LEN]; 1186 struct hclge_desc desc; 1187 u32 msg_egress_port; 1188 int ret, i; 1189 1190 dev_info(&hdev->pdev->dev, "mng tab:\n"); 1191 memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); 1192 strncat(printf_buf, 1193 "entry|mac_addr |mask|ether|mask|vlan|mask", 1194 HCLGE_DBG_BUF_LEN - 1); 1195 strncat(printf_buf + strlen(printf_buf), 1196 "|i_map|i_dir|e_type|pf_id|vf_id|q_id|drop\n", 1197 HCLGE_DBG_BUF_LEN - strlen(printf_buf) - 1); 1198 1199 dev_info(&hdev->pdev->dev, "%s", printf_buf); 1200 1201 for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) { 1202 hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD, 1203 true); 1204 req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data; 1205 req0->index = cpu_to_le16(i); 1206 1207 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1208 if (ret) { 1209 dev_err(&hdev->pdev->dev, 1210 "call hclge_cmd_send fail, ret = %d\n", ret); 1211 return; 1212 } 1213 1214 if (!req0->resp_code) 1215 continue; 1216 1217 memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); 1218 snprintf(printf_buf, HCLGE_DBG_BUF_LEN, 1219 "%02u |%02x:%02x:%02x:%02x:%02x:%02x|", 1220 le16_to_cpu(req0->index), 1221 req0->mac_addr[0], req0->mac_addr[1], 1222 req0->mac_addr[2], req0->mac_addr[3], 1223 req0->mac_addr[4], req0->mac_addr[5]); 1224 1225 snprintf(printf_buf + strlen(printf_buf), 1226 HCLGE_DBG_BUF_LEN - strlen(printf_buf), 1227 "%x |%04x |%x |%04x|%x |%02x |%02x |", 1228 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B), 1229 le16_to_cpu(req0->ethter_type), 1230 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B), 1231 le16_to_cpu(req0->vlan_tag) & HCLGE_DBG_MNG_VLAN_TAG, 1232 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B), 1233 req0->i_port_bitmap, req0->i_port_direction); 1234 1235 msg_egress_port = le16_to_cpu(req0->egress_port); 1236 snprintf(printf_buf + strlen(printf_buf), 1237 HCLGE_DBG_BUF_LEN - strlen(printf_buf), 1238 "%x |%x |%02x |%04x|%x\n", 1239 !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B), 1240 msg_egress_port & HCLGE_DBG_MNG_PF_ID, 1241 (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID, 1242 le16_to_cpu(req0->egress_queue), 1243 !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B)); 1244 1245 dev_info(&hdev->pdev->dev, "%s", printf_buf); 1246 } 1247 } 1248 1249 static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage, 1250 bool sel_x, u32 loc) 1251 { 1252 struct hclge_fd_tcam_config_1_cmd *req1; 1253 struct hclge_fd_tcam_config_2_cmd *req2; 1254 struct hclge_fd_tcam_config_3_cmd *req3; 1255 struct hclge_desc desc[3]; 1256 int ret, i; 1257 u32 *req; 1258 1259 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true); 1260 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1261 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true); 1262 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1263 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true); 1264 1265 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; 1266 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; 1267 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; 1268 1269 req1->stage = stage; 1270 req1->xy_sel = sel_x ? 1 : 0; 1271 req1->index = cpu_to_le32(loc); 1272 1273 ret = hclge_cmd_send(&hdev->hw, desc, 3); 1274 if (ret) 1275 return ret; 1276 1277 dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n", 1278 sel_x ? "x" : "y", loc); 1279 1280 /* tcam_data0 ~ tcam_data1 */ 1281 req = (u32 *)req1->tcam_data; 1282 for (i = 0; i < 2; i++) 1283 dev_info(&hdev->pdev->dev, "%08x\n", *req++); 1284 1285 /* tcam_data2 ~ tcam_data7 */ 1286 req = (u32 *)req2->tcam_data; 1287 for (i = 0; i < 6; i++) 1288 dev_info(&hdev->pdev->dev, "%08x\n", *req++); 1289 1290 /* tcam_data8 ~ tcam_data12 */ 1291 req = (u32 *)req3->tcam_data; 1292 for (i = 0; i < 5; i++) 1293 dev_info(&hdev->pdev->dev, "%08x\n", *req++); 1294 1295 return ret; 1296 } 1297 1298 static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs) 1299 { 1300 struct hclge_fd_rule *rule; 1301 struct hlist_node *node; 1302 int cnt = 0; 1303 1304 spin_lock_bh(&hdev->fd_rule_lock); 1305 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 1306 rule_locs[cnt] = rule->location; 1307 cnt++; 1308 } 1309 spin_unlock_bh(&hdev->fd_rule_lock); 1310 1311 if (cnt != hdev->hclge_fd_rule_num) 1312 return -EINVAL; 1313 1314 return cnt; 1315 } 1316 1317 static void hclge_dbg_fd_tcam(struct hclge_dev *hdev) 1318 { 1319 int i, ret, rule_cnt; 1320 u16 *rule_locs; 1321 1322 if (!hnae3_dev_fd_supported(hdev)) { 1323 dev_err(&hdev->pdev->dev, 1324 "Only FD-supported dev supports dump fd tcam\n"); 1325 return; 1326 } 1327 1328 if (!hdev->hclge_fd_rule_num || 1329 !hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) 1330 return; 1331 1332 rule_locs = kcalloc(hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], 1333 sizeof(u16), GFP_KERNEL); 1334 if (!rule_locs) 1335 return; 1336 1337 rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs); 1338 if (rule_cnt <= 0) { 1339 dev_err(&hdev->pdev->dev, 1340 "failed to get rule number, ret = %d\n", rule_cnt); 1341 kfree(rule_locs); 1342 return; 1343 } 1344 1345 for (i = 0; i < rule_cnt; i++) { 1346 ret = hclge_dbg_fd_tcam_read(hdev, 0, true, rule_locs[i]); 1347 if (ret) { 1348 dev_err(&hdev->pdev->dev, 1349 "failed to get fd tcam key x, ret = %d\n", ret); 1350 kfree(rule_locs); 1351 return; 1352 } 1353 1354 ret = hclge_dbg_fd_tcam_read(hdev, 0, false, rule_locs[i]); 1355 if (ret) { 1356 dev_err(&hdev->pdev->dev, 1357 "failed to get fd tcam key y, ret = %d\n", ret); 1358 kfree(rule_locs); 1359 return; 1360 } 1361 } 1362 1363 kfree(rule_locs); 1364 } 1365 1366 void hclge_dbg_dump_rst_info(struct hclge_dev *hdev) 1367 { 1368 dev_info(&hdev->pdev->dev, "PF reset count: %u\n", 1369 hdev->rst_stats.pf_rst_cnt); 1370 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1371 hdev->rst_stats.flr_rst_cnt); 1372 dev_info(&hdev->pdev->dev, "GLOBAL reset count: %u\n", 1373 hdev->rst_stats.global_rst_cnt); 1374 dev_info(&hdev->pdev->dev, "IMP reset count: %u\n", 1375 hdev->rst_stats.imp_rst_cnt); 1376 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1377 hdev->rst_stats.reset_done_cnt); 1378 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1379 hdev->rst_stats.hw_reset_done_cnt); 1380 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1381 hdev->rst_stats.reset_cnt); 1382 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1383 hdev->rst_stats.reset_fail_cnt); 1384 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1385 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE)); 1386 dev_info(&hdev->pdev->dev, "reset interrupt source: 0x%x\n", 1387 hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG)); 1388 dev_info(&hdev->pdev->dev, "reset interrupt status: 0x%x\n", 1389 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS)); 1390 dev_info(&hdev->pdev->dev, "hardware reset status: 0x%x\n", 1391 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); 1392 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1393 hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG)); 1394 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1395 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING)); 1396 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1397 } 1398 1399 static void hclge_dbg_dump_serv_info(struct hclge_dev *hdev) 1400 { 1401 dev_info(&hdev->pdev->dev, "last_serv_processed: %lu\n", 1402 hdev->last_serv_processed); 1403 dev_info(&hdev->pdev->dev, "last_serv_cnt: %lu\n", 1404 hdev->serv_processed_cnt); 1405 } 1406 1407 static void hclge_dbg_dump_interrupt(struct hclge_dev *hdev) 1408 { 1409 dev_info(&hdev->pdev->dev, "num_nic_msi: %u\n", hdev->num_nic_msi); 1410 dev_info(&hdev->pdev->dev, "num_roce_msi: %u\n", hdev->num_roce_msi); 1411 dev_info(&hdev->pdev->dev, "num_msi_used: %u\n", hdev->num_msi_used); 1412 dev_info(&hdev->pdev->dev, "num_msi_left: %u\n", hdev->num_msi_left); 1413 } 1414 1415 static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev) 1416 { 1417 struct hclge_desc *desc_src, *desc_tmp; 1418 struct hclge_get_m7_bd_cmd *req; 1419 struct hclge_desc desc; 1420 u32 bd_num, buf_len; 1421 int ret, i; 1422 1423 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_STATS_BD, true); 1424 1425 req = (struct hclge_get_m7_bd_cmd *)desc.data; 1426 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1427 if (ret) { 1428 dev_err(&hdev->pdev->dev, 1429 "get firmware statistics bd number failed, ret = %d\n", 1430 ret); 1431 return; 1432 } 1433 1434 bd_num = le32_to_cpu(req->bd_num); 1435 1436 buf_len = sizeof(struct hclge_desc) * bd_num; 1437 desc_src = kzalloc(buf_len, GFP_KERNEL); 1438 if (!desc_src) 1439 return; 1440 1441 desc_tmp = desc_src; 1442 ret = hclge_dbg_cmd_send(hdev, desc_tmp, 0, bd_num, 1443 HCLGE_OPC_M7_STATS_INFO); 1444 if (ret) { 1445 kfree(desc_src); 1446 dev_err(&hdev->pdev->dev, 1447 "get firmware statistics failed, ret = %d\n", ret); 1448 return; 1449 } 1450 1451 for (i = 0; i < bd_num; i++) { 1452 dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n", 1453 le32_to_cpu(desc_tmp->data[0]), 1454 le32_to_cpu(desc_tmp->data[1]), 1455 le32_to_cpu(desc_tmp->data[2])); 1456 dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n", 1457 le32_to_cpu(desc_tmp->data[3]), 1458 le32_to_cpu(desc_tmp->data[4]), 1459 le32_to_cpu(desc_tmp->data[5])); 1460 1461 desc_tmp++; 1462 } 1463 1464 kfree(desc_src); 1465 } 1466 1467 #define HCLGE_CMD_NCL_CONFIG_BD_NUM 5 1468 1469 static void hclge_ncl_config_data_print(struct hclge_dev *hdev, 1470 struct hclge_desc *desc, int *offset, 1471 int *length) 1472 { 1473 #define HCLGE_CMD_DATA_NUM 6 1474 1475 int i; 1476 int j; 1477 1478 for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) { 1479 for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) { 1480 if (i == 0 && j == 0) 1481 continue; 1482 1483 dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n", 1484 *offset, 1485 le32_to_cpu(desc[i].data[j])); 1486 *offset += sizeof(u32); 1487 *length -= sizeof(u32); 1488 if (*length <= 0) 1489 return; 1490 } 1491 } 1492 } 1493 1494 /* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file 1495 * @hdev: pointer to struct hclge_dev 1496 * @cmd_buf: string that contains offset and length 1497 */ 1498 static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, 1499 const char *cmd_buf) 1500 { 1501 #define HCLGE_MAX_NCL_CONFIG_OFFSET 4096 1502 #define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4) 1503 #define HCLGE_NCL_CONFIG_PARAM_NUM 2 1504 1505 struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM]; 1506 int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM; 1507 int offset; 1508 int length; 1509 int data0; 1510 int ret; 1511 1512 ret = sscanf(cmd_buf, "%x %x", &offset, &length); 1513 if (ret != HCLGE_NCL_CONFIG_PARAM_NUM) { 1514 dev_err(&hdev->pdev->dev, 1515 "Too few parameters, num = %d.\n", ret); 1516 return; 1517 } 1518 1519 if (offset < 0 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET || 1520 length <= 0 || length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) { 1521 dev_err(&hdev->pdev->dev, 1522 "Invalid input, offset = %d, length = %d.\n", 1523 offset, length); 1524 return; 1525 } 1526 1527 dev_info(&hdev->pdev->dev, "offset | data\n"); 1528 1529 while (length > 0) { 1530 data0 = offset; 1531 if (length >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD) 1532 data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16; 1533 else 1534 data0 |= length << 16; 1535 ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num, 1536 HCLGE_OPC_QUERY_NCL_CONFIG); 1537 if (ret) 1538 return; 1539 1540 hclge_ncl_config_data_print(hdev, desc, &offset, &length); 1541 } 1542 } 1543 1544 static void hclge_dbg_dump_loopback(struct hclge_dev *hdev, 1545 const char *cmd_buf) 1546 { 1547 struct phy_device *phydev = hdev->hw.mac.phydev; 1548 struct hclge_config_mac_mode_cmd *req_app; 1549 struct hclge_serdes_lb_cmd *req_serdes; 1550 struct hclge_desc desc; 1551 u8 loopback_en; 1552 int ret; 1553 1554 req_app = (struct hclge_config_mac_mode_cmd *)desc.data; 1555 req_serdes = (struct hclge_serdes_lb_cmd *)desc.data; 1556 1557 dev_info(&hdev->pdev->dev, "mac id: %u\n", hdev->hw.mac.mac_id); 1558 1559 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); 1560 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1561 if (ret) { 1562 dev_err(&hdev->pdev->dev, 1563 "failed to dump app loopback status, ret = %d\n", ret); 1564 return; 1565 } 1566 1567 loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en), 1568 HCLGE_MAC_APP_LP_B); 1569 dev_info(&hdev->pdev->dev, "app loopback: %s\n", 1570 loopback_en ? "on" : "off"); 1571 1572 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, true); 1573 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1574 if (ret) { 1575 dev_err(&hdev->pdev->dev, 1576 "failed to dump serdes loopback status, ret = %d\n", 1577 ret); 1578 return; 1579 } 1580 1581 loopback_en = req_serdes->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; 1582 dev_info(&hdev->pdev->dev, "serdes serial loopback: %s\n", 1583 loopback_en ? "on" : "off"); 1584 1585 loopback_en = req_serdes->enable & 1586 HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; 1587 dev_info(&hdev->pdev->dev, "serdes parallel loopback: %s\n", 1588 loopback_en ? "on" : "off"); 1589 1590 if (phydev) 1591 dev_info(&hdev->pdev->dev, "phy loopback: %s\n", 1592 phydev->loopback_enabled ? "on" : "off"); 1593 } 1594 1595 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt 1596 * @hdev: pointer to struct hclge_dev 1597 */ 1598 static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev) 1599 { 1600 #define HCLGE_BILLION_NANO_SECONDS 1000000000 1601 1602 struct hclge_mac_tnl_stats stats; 1603 unsigned long rem_nsec; 1604 1605 dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n"); 1606 1607 while (kfifo_get(&hdev->mac_tnl_log, &stats)) { 1608 rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS); 1609 dev_info(&hdev->pdev->dev, "[%07lu.%03lu] status = 0x%x\n", 1610 (unsigned long)stats.time, rem_nsec / 1000, 1611 stats.status); 1612 } 1613 } 1614 1615 static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid) 1616 { 1617 struct hclge_qs_shapping_cmd *shap_cfg_cmd; 1618 u8 ir_u, ir_b, ir_s, bs_b, bs_s; 1619 struct hclge_desc desc; 1620 u32 shapping_para; 1621 u32 rate; 1622 int ret; 1623 1624 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true); 1625 1626 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; 1627 shap_cfg_cmd->qs_id = cpu_to_le16(qsid); 1628 1629 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1630 if (ret) { 1631 dev_err(&hdev->pdev->dev, 1632 "qs%u failed to get tx_rate, ret=%d\n", 1633 qsid, ret); 1634 return; 1635 } 1636 1637 shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para); 1638 ir_b = hclge_tm_get_field(shapping_para, IR_B); 1639 ir_u = hclge_tm_get_field(shapping_para, IR_U); 1640 ir_s = hclge_tm_get_field(shapping_para, IR_S); 1641 bs_b = hclge_tm_get_field(shapping_para, BS_B); 1642 bs_s = hclge_tm_get_field(shapping_para, BS_S); 1643 rate = le32_to_cpu(shap_cfg_cmd->qs_rate); 1644 1645 dev_info(&hdev->pdev->dev, 1646 "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u, flag:%#x, rate:%u(Mbps)\n", 1647 qsid, ir_b, ir_u, ir_s, bs_b, bs_s, shap_cfg_cmd->flag, rate); 1648 } 1649 1650 static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev) 1651 { 1652 struct hnae3_knic_private_info *kinfo; 1653 struct hclge_vport *vport; 1654 int vport_id, i; 1655 1656 for (vport_id = 0; vport_id <= pci_num_vf(hdev->pdev); vport_id++) { 1657 vport = &hdev->vport[vport_id]; 1658 kinfo = &vport->nic.kinfo; 1659 1660 dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id); 1661 1662 for (i = 0; i < kinfo->tc_info.num_tc; i++) { 1663 u16 qsid = vport->qs_offset + i; 1664 1665 hclge_dbg_dump_qs_shaper_single(hdev, qsid); 1666 } 1667 } 1668 } 1669 1670 static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev, 1671 const char *cmd_buf) 1672 { 1673 u16 qsid; 1674 int ret; 1675 1676 ret = kstrtou16(cmd_buf, 0, &qsid); 1677 if (ret) { 1678 hclge_dbg_dump_qs_shaper_all(hdev); 1679 return; 1680 } 1681 1682 if (qsid >= hdev->ae_dev->dev_specs.max_qset_num) { 1683 dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-%u]\n", 1684 qsid, hdev->ae_dev->dev_specs.max_qset_num - 1); 1685 return; 1686 } 1687 1688 hclge_dbg_dump_qs_shaper_single(hdev, qsid); 1689 } 1690 1691 static int hclge_dbg_dump_mac_list(struct hclge_dev *hdev, const char *cmd_buf, 1692 bool is_unicast) 1693 { 1694 struct hclge_mac_node *mac_node, *tmp; 1695 struct hclge_vport *vport; 1696 struct list_head *list; 1697 u32 func_id; 1698 int ret; 1699 1700 ret = kstrtouint(cmd_buf, 0, &func_id); 1701 if (ret < 0) { 1702 dev_err(&hdev->pdev->dev, 1703 "dump mac list: bad command string, ret = %d\n", ret); 1704 return -EINVAL; 1705 } 1706 1707 if (func_id >= hdev->num_alloc_vport) { 1708 dev_err(&hdev->pdev->dev, 1709 "function id(%u) is out of range(0-%u)\n", func_id, 1710 hdev->num_alloc_vport - 1); 1711 return -EINVAL; 1712 } 1713 1714 vport = &hdev->vport[func_id]; 1715 1716 list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list; 1717 1718 dev_info(&hdev->pdev->dev, "vport %u %s mac list:\n", 1719 func_id, is_unicast ? "uc" : "mc"); 1720 dev_info(&hdev->pdev->dev, "mac address state\n"); 1721 1722 spin_lock_bh(&vport->mac_list_lock); 1723 1724 list_for_each_entry_safe(mac_node, tmp, list, node) { 1725 dev_info(&hdev->pdev->dev, "%pM %d\n", 1726 mac_node->mac_addr, mac_node->state); 1727 } 1728 1729 spin_unlock_bh(&vport->mac_list_lock); 1730 1731 return 0; 1732 } 1733 1734 int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf) 1735 { 1736 #define DUMP_REG "dump reg" 1737 #define DUMP_TM_MAP "dump tm map" 1738 #define DUMP_LOOPBACK "dump loopback" 1739 #define DUMP_INTERRUPT "dump intr" 1740 1741 struct hclge_vport *vport = hclge_get_vport(handle); 1742 struct hclge_dev *hdev = vport->back; 1743 1744 if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) { 1745 hclge_dbg_fd_tcam(hdev); 1746 } else if (strncmp(cmd_buf, "dump tc", 7) == 0) { 1747 hclge_dbg_dump_tc(hdev); 1748 } else if (strncmp(cmd_buf, DUMP_TM_MAP, strlen(DUMP_TM_MAP)) == 0) { 1749 hclge_dbg_dump_tm_map(hdev, &cmd_buf[sizeof(DUMP_TM_MAP)]); 1750 } else if (strncmp(cmd_buf, "dump tm", 7) == 0) { 1751 hclge_dbg_dump_tm(hdev); 1752 } else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) { 1753 hclge_dbg_dump_qos_pause_cfg(hdev); 1754 } else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) { 1755 hclge_dbg_dump_qos_pri_map(hdev); 1756 } else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) { 1757 hclge_dbg_dump_qos_buf_cfg(hdev); 1758 } else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) { 1759 hclge_dbg_dump_mng_table(hdev); 1760 } else if (strncmp(cmd_buf, DUMP_REG, strlen(DUMP_REG)) == 0) { 1761 hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]); 1762 } else if (strncmp(cmd_buf, "dump reset info", 15) == 0) { 1763 hclge_dbg_dump_rst_info(hdev); 1764 } else if (strncmp(cmd_buf, "dump serv info", 14) == 0) { 1765 hclge_dbg_dump_serv_info(hdev); 1766 } else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) { 1767 hclge_dbg_get_m7_stats_info(hdev); 1768 } else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) { 1769 hclge_dbg_dump_ncl_config(hdev, 1770 &cmd_buf[sizeof("dump ncl_config")]); 1771 } else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) { 1772 hclge_dbg_dump_mac_tnl_status(hdev); 1773 } else if (strncmp(cmd_buf, DUMP_LOOPBACK, 1774 strlen(DUMP_LOOPBACK)) == 0) { 1775 hclge_dbg_dump_loopback(hdev, &cmd_buf[sizeof(DUMP_LOOPBACK)]); 1776 } else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) { 1777 hclge_dbg_dump_qs_shaper(hdev, 1778 &cmd_buf[sizeof("dump qs shaper")]); 1779 } else if (strncmp(cmd_buf, "dump uc mac list", 16) == 0) { 1780 hclge_dbg_dump_mac_list(hdev, 1781 &cmd_buf[sizeof("dump uc mac list")], 1782 true); 1783 } else if (strncmp(cmd_buf, "dump mc mac list", 16) == 0) { 1784 hclge_dbg_dump_mac_list(hdev, 1785 &cmd_buf[sizeof("dump mc mac list")], 1786 false); 1787 } else if (strncmp(cmd_buf, DUMP_INTERRUPT, 1788 strlen(DUMP_INTERRUPT)) == 0) { 1789 hclge_dbg_dump_interrupt(hdev); 1790 } else { 1791 dev_info(&hdev->pdev->dev, "unknown command\n"); 1792 return -EINVAL; 1793 } 1794 1795 return 0; 1796 } 1797 1798 int hclge_dbg_read_cmd(struct hnae3_handle *handle, const char *cmd_buf, 1799 char *buf, int len) 1800 { 1801 struct hclge_vport *vport = hclge_get_vport(handle); 1802 struct hclge_dev *hdev = vport->back; 1803 1804 if (strncmp(cmd_buf, HNAE3_DBG_TM_NODES, 1805 strlen(HNAE3_DBG_TM_NODES)) == 0) 1806 return hclge_dbg_dump_tm_nodes(hdev, buf, len); 1807 else if (strncmp(cmd_buf, HNAE3_DBG_TM_PRI, 1808 strlen(HNAE3_DBG_TM_PRI)) == 0) 1809 return hclge_dbg_dump_tm_pri(hdev, buf, len); 1810 else if (strncmp(cmd_buf, HNAE3_DBG_TM_QSET, 1811 strlen(HNAE3_DBG_TM_QSET)) == 0) 1812 return hclge_dbg_dump_tm_qset(hdev, buf, len); 1813 1814 return -EINVAL; 1815 } 1816