1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Copyright (c) 2018-2019 Hisilicon Limited. */ 3 4 #include <linux/device.h> 5 6 #include "hclge_debugfs.h" 7 #include "hclge_err.h" 8 #include "hclge_main.h" 9 #include "hclge_tm.h" 10 #include "hnae3.h" 11 12 static const char * const state_str[] = { "off", "on" }; 13 static const char * const hclge_mac_state_str[] = { 14 "TO_ADD", "TO_DEL", "ACTIVE" 15 }; 16 17 static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = { 18 { .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON, 19 .dfx_msg = &hclge_dbg_bios_common_reg[0], 20 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg), 21 .offset = HCLGE_DBG_DFX_BIOS_OFFSET, 22 .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } }, 23 { .cmd = HNAE3_DBG_CMD_REG_SSU, 24 .dfx_msg = &hclge_dbg_ssu_reg_0[0], 25 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0), 26 .offset = HCLGE_DBG_DFX_SSU_0_OFFSET, 27 .cmd = HCLGE_OPC_DFX_SSU_REG_0 } }, 28 { .cmd = HNAE3_DBG_CMD_REG_SSU, 29 .dfx_msg = &hclge_dbg_ssu_reg_1[0], 30 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1), 31 .offset = HCLGE_DBG_DFX_SSU_1_OFFSET, 32 .cmd = HCLGE_OPC_DFX_SSU_REG_1 } }, 33 { .cmd = HNAE3_DBG_CMD_REG_SSU, 34 .dfx_msg = &hclge_dbg_ssu_reg_2[0], 35 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2), 36 .offset = HCLGE_DBG_DFX_SSU_2_OFFSET, 37 .cmd = HCLGE_OPC_DFX_SSU_REG_2 } }, 38 { .cmd = HNAE3_DBG_CMD_REG_IGU_EGU, 39 .dfx_msg = &hclge_dbg_igu_egu_reg[0], 40 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg), 41 .offset = HCLGE_DBG_DFX_IGU_OFFSET, 42 .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } }, 43 { .cmd = HNAE3_DBG_CMD_REG_RPU, 44 .dfx_msg = &hclge_dbg_rpu_reg_0[0], 45 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0), 46 .offset = HCLGE_DBG_DFX_RPU_0_OFFSET, 47 .cmd = HCLGE_OPC_DFX_RPU_REG_0 } }, 48 { .cmd = HNAE3_DBG_CMD_REG_RPU, 49 .dfx_msg = &hclge_dbg_rpu_reg_1[0], 50 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1), 51 .offset = HCLGE_DBG_DFX_RPU_1_OFFSET, 52 .cmd = HCLGE_OPC_DFX_RPU_REG_1 } }, 53 { .cmd = HNAE3_DBG_CMD_REG_NCSI, 54 .dfx_msg = &hclge_dbg_ncsi_reg[0], 55 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg), 56 .offset = HCLGE_DBG_DFX_NCSI_OFFSET, 57 .cmd = HCLGE_OPC_DFX_NCSI_REG } }, 58 { .cmd = HNAE3_DBG_CMD_REG_RTC, 59 .dfx_msg = &hclge_dbg_rtc_reg[0], 60 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg), 61 .offset = HCLGE_DBG_DFX_RTC_OFFSET, 62 .cmd = HCLGE_OPC_DFX_RTC_REG } }, 63 { .cmd = HNAE3_DBG_CMD_REG_PPP, 64 .dfx_msg = &hclge_dbg_ppp_reg[0], 65 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg), 66 .offset = HCLGE_DBG_DFX_PPP_OFFSET, 67 .cmd = HCLGE_OPC_DFX_PPP_REG } }, 68 { .cmd = HNAE3_DBG_CMD_REG_RCB, 69 .dfx_msg = &hclge_dbg_rcb_reg[0], 70 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg), 71 .offset = HCLGE_DBG_DFX_RCB_OFFSET, 72 .cmd = HCLGE_OPC_DFX_RCB_REG } }, 73 { .cmd = HNAE3_DBG_CMD_REG_TQP, 74 .dfx_msg = &hclge_dbg_tqp_reg[0], 75 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg), 76 .offset = HCLGE_DBG_DFX_TQP_OFFSET, 77 .cmd = HCLGE_OPC_DFX_TQP_REG } }, 78 }; 79 80 static void hclge_dbg_fill_content(char *content, u16 len, 81 const struct hclge_dbg_item *items, 82 const char **result, u16 size) 83 { 84 char *pos = content; 85 u16 i; 86 87 memset(content, ' ', len); 88 for (i = 0; i < size; i++) { 89 if (result) 90 strncpy(pos, result[i], strlen(result[i])); 91 else 92 strncpy(pos, items[i].name, strlen(items[i].name)); 93 pos += strlen(items[i].name) + items[i].interval; 94 } 95 *pos++ = '\n'; 96 *pos++ = '\0'; 97 } 98 99 static char *hclge_dbg_get_func_id_str(char *buf, u8 id) 100 { 101 if (id) 102 sprintf(buf, "vf%u", id - 1); 103 else 104 sprintf(buf, "pf"); 105 106 return buf; 107 } 108 109 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset, 110 u32 *bd_num) 111 { 112 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT]; 113 int entries_per_desc; 114 int index; 115 int ret; 116 117 ret = hclge_query_bd_num_cmd_send(hdev, desc); 118 if (ret) { 119 dev_err(&hdev->pdev->dev, 120 "failed to get dfx bd_num, offset = %d, ret = %d\n", 121 offset, ret); 122 return ret; 123 } 124 125 entries_per_desc = ARRAY_SIZE(desc[0].data); 126 index = offset % entries_per_desc; 127 128 *bd_num = le32_to_cpu(desc[offset / entries_per_desc].data[index]); 129 if (!(*bd_num)) { 130 dev_err(&hdev->pdev->dev, "The value of dfx bd_num is 0!\n"); 131 return -EINVAL; 132 } 133 134 return 0; 135 } 136 137 static int hclge_dbg_cmd_send(struct hclge_dev *hdev, 138 struct hclge_desc *desc_src, 139 int index, int bd_num, 140 enum hclge_opcode_type cmd) 141 { 142 struct hclge_desc *desc = desc_src; 143 int ret, i; 144 145 hclge_cmd_setup_basic_desc(desc, cmd, true); 146 desc->data[0] = cpu_to_le32(index); 147 148 for (i = 1; i < bd_num; i++) { 149 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 150 desc++; 151 hclge_cmd_setup_basic_desc(desc, cmd, true); 152 } 153 154 ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num); 155 if (ret) 156 dev_err(&hdev->pdev->dev, 157 "cmd(0x%x) send fail, ret = %d\n", cmd, ret); 158 return ret; 159 } 160 161 static int 162 hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev, 163 const struct hclge_dbg_reg_type_info *reg_info, 164 char *buf, int len, int *pos) 165 { 166 const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg; 167 const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg; 168 struct hclge_desc *desc_src; 169 u32 index, entry, i, cnt; 170 int bd_num, min_num, ret; 171 struct hclge_desc *desc; 172 173 ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num); 174 if (ret) 175 return ret; 176 177 desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); 178 if (!desc_src) 179 return -ENOMEM; 180 181 min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num); 182 183 for (i = 0, cnt = 0; i < min_num; i++, dfx_message++) 184 *pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n", 185 cnt++, dfx_message->message); 186 187 for (i = 0; i < cnt; i++) 188 *pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i); 189 190 *pos += scnprintf(buf + *pos, len - *pos, "\n"); 191 192 for (index = 0; index < hdev->vport[0].alloc_tqps; index++) { 193 dfx_message = reg_info->dfx_msg; 194 desc = desc_src; 195 ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, 196 reg_msg->cmd); 197 if (ret) 198 break; 199 200 for (i = 0; i < min_num; i++, dfx_message++) { 201 entry = i % HCLGE_DESC_DATA_LEN; 202 if (i > 0 && !entry) 203 desc++; 204 205 *pos += scnprintf(buf + *pos, len - *pos, "%#x\t", 206 le32_to_cpu(desc->data[entry])); 207 } 208 *pos += scnprintf(buf + *pos, len - *pos, "\n"); 209 } 210 211 kfree(desc_src); 212 return ret; 213 } 214 215 static int 216 hclge_dbg_dump_reg_common(struct hclge_dev *hdev, 217 const struct hclge_dbg_reg_type_info *reg_info, 218 char *buf, int len, int *pos) 219 { 220 const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg; 221 const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg; 222 struct hclge_desc *desc_src; 223 int bd_num, min_num, ret; 224 struct hclge_desc *desc; 225 u32 entry, i; 226 227 ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num); 228 if (ret) 229 return ret; 230 231 desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); 232 if (!desc_src) 233 return -ENOMEM; 234 235 desc = desc_src; 236 237 ret = hclge_dbg_cmd_send(hdev, desc, 0, bd_num, reg_msg->cmd); 238 if (ret) { 239 kfree(desc); 240 return ret; 241 } 242 243 min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num); 244 245 for (i = 0; i < min_num; i++, dfx_message++) { 246 entry = i % HCLGE_DESC_DATA_LEN; 247 if (i > 0 && !entry) 248 desc++; 249 if (!dfx_message->flag) 250 continue; 251 252 *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n", 253 dfx_message->message, 254 le32_to_cpu(desc->data[entry])); 255 } 256 257 kfree(desc_src); 258 return 0; 259 } 260 261 static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf, 262 int len, int *pos) 263 { 264 struct hclge_config_mac_mode_cmd *req; 265 struct hclge_desc desc; 266 u32 loop_en; 267 int ret; 268 269 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); 270 271 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 272 if (ret) { 273 dev_err(&hdev->pdev->dev, 274 "failed to dump mac enable status, ret = %d\n", ret); 275 return ret; 276 } 277 278 req = (struct hclge_config_mac_mode_cmd *)desc.data; 279 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); 280 281 *pos += scnprintf(buf + *pos, len - *pos, "mac_trans_en: %#x\n", 282 hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B)); 283 *pos += scnprintf(buf + *pos, len - *pos, "mac_rcv_en: %#x\n", 284 hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B)); 285 *pos += scnprintf(buf + *pos, len - *pos, "pad_trans_en: %#x\n", 286 hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B)); 287 *pos += scnprintf(buf + *pos, len - *pos, "pad_rcv_en: %#x\n", 288 hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B)); 289 *pos += scnprintf(buf + *pos, len - *pos, "1588_trans_en: %#x\n", 290 hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B)); 291 *pos += scnprintf(buf + *pos, len - *pos, "1588_rcv_en: %#x\n", 292 hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B)); 293 *pos += scnprintf(buf + *pos, len - *pos, "mac_app_loop_en: %#x\n", 294 hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B)); 295 *pos += scnprintf(buf + *pos, len - *pos, "mac_line_loop_en: %#x\n", 296 hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B)); 297 *pos += scnprintf(buf + *pos, len - *pos, "mac_fcs_tx_en: %#x\n", 298 hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B)); 299 *pos += scnprintf(buf + *pos, len - *pos, 300 "mac_rx_oversize_truncate_en: %#x\n", 301 hnae3_get_bit(loop_en, 302 HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B)); 303 *pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_strip_en: %#x\n", 304 hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B)); 305 *pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_en: %#x\n", 306 hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B)); 307 *pos += scnprintf(buf + *pos, len - *pos, 308 "mac_tx_under_min_err_en: %#x\n", 309 hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B)); 310 *pos += scnprintf(buf + *pos, len - *pos, 311 "mac_tx_oversize_truncate_en: %#x\n", 312 hnae3_get_bit(loop_en, 313 HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B)); 314 315 return 0; 316 } 317 318 static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf, 319 int len, int *pos) 320 { 321 struct hclge_config_max_frm_size_cmd *req; 322 struct hclge_desc desc; 323 int ret; 324 325 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true); 326 327 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 328 if (ret) { 329 dev_err(&hdev->pdev->dev, 330 "failed to dump mac frame size, ret = %d\n", ret); 331 return ret; 332 } 333 334 req = (struct hclge_config_max_frm_size_cmd *)desc.data; 335 336 *pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n", 337 le16_to_cpu(req->max_frm_size)); 338 *pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n", 339 req->min_frm_size); 340 341 return 0; 342 } 343 344 static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf, 345 int len, int *pos) 346 { 347 #define HCLGE_MAC_SPEED_SHIFT 0 348 #define HCLGE_MAC_SPEED_MASK GENMASK(5, 0) 349 #define HCLGE_MAC_DUPLEX_SHIFT 7 350 351 struct hclge_config_mac_speed_dup_cmd *req; 352 struct hclge_desc desc; 353 int ret; 354 355 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true); 356 357 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 358 if (ret) { 359 dev_err(&hdev->pdev->dev, 360 "failed to dump mac speed duplex, ret = %d\n", ret); 361 return ret; 362 } 363 364 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; 365 366 *pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n", 367 hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK, 368 HCLGE_MAC_SPEED_SHIFT)); 369 *pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n", 370 hnae3_get_bit(req->speed_dup, 371 HCLGE_MAC_DUPLEX_SHIFT)); 372 return 0; 373 } 374 375 static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len) 376 { 377 int pos = 0; 378 int ret; 379 380 ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos); 381 if (ret) 382 return ret; 383 384 ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos); 385 if (ret) 386 return ret; 387 388 return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos); 389 } 390 391 static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len, 392 int *pos) 393 { 394 struct hclge_dbg_bitmap_cmd *bitmap; 395 struct hclge_desc desc; 396 u16 qset_id, qset_num; 397 int ret; 398 399 ret = hclge_tm_get_qset_num(hdev, &qset_num); 400 if (ret) 401 return ret; 402 403 *pos += scnprintf(buf + *pos, len - *pos, 404 "qset_id roce_qset_mask nic_qset_mask qset_shaping_pass qset_bp_status\n"); 405 for (qset_id = 0; qset_id < qset_num; qset_id++) { 406 ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1, 407 HCLGE_OPC_QSET_DFX_STS); 408 if (ret) 409 return ret; 410 411 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1]; 412 413 *pos += scnprintf(buf + *pos, len - *pos, 414 "%04u %#x %#x %#x %#x\n", 415 qset_id, bitmap->bit0, bitmap->bit1, 416 bitmap->bit2, bitmap->bit3); 417 } 418 419 return 0; 420 } 421 422 static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len, 423 int *pos) 424 { 425 struct hclge_dbg_bitmap_cmd *bitmap; 426 struct hclge_desc desc; 427 u8 pri_id, pri_num; 428 int ret; 429 430 ret = hclge_tm_get_pri_num(hdev, &pri_num); 431 if (ret) 432 return ret; 433 434 *pos += scnprintf(buf + *pos, len - *pos, 435 "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n"); 436 for (pri_id = 0; pri_id < pri_num; pri_id++) { 437 ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1, 438 HCLGE_OPC_PRI_DFX_STS); 439 if (ret) 440 return ret; 441 442 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1]; 443 444 *pos += scnprintf(buf + *pos, len - *pos, 445 "%03u %#x %#x %#x\n", 446 pri_id, bitmap->bit0, bitmap->bit1, 447 bitmap->bit2); 448 } 449 450 return 0; 451 } 452 453 static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len, 454 int *pos) 455 { 456 struct hclge_dbg_bitmap_cmd *bitmap; 457 struct hclge_desc desc; 458 u8 pg_id; 459 int ret; 460 461 *pos += scnprintf(buf + *pos, len - *pos, 462 "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n"); 463 for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) { 464 ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1, 465 HCLGE_OPC_PG_DFX_STS); 466 if (ret) 467 return ret; 468 469 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1]; 470 471 *pos += scnprintf(buf + *pos, len - *pos, 472 "%03u %#x %#x %#x\n", 473 pg_id, bitmap->bit0, bitmap->bit1, 474 bitmap->bit2); 475 } 476 477 return 0; 478 } 479 480 static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len, 481 int *pos) 482 { 483 struct hclge_desc desc; 484 u16 nq_id; 485 int ret; 486 487 *pos += scnprintf(buf + *pos, len - *pos, 488 "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n"); 489 for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) { 490 ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1, 491 HCLGE_OPC_SCH_NQ_CNT); 492 if (ret) 493 return ret; 494 495 *pos += scnprintf(buf + *pos, len - *pos, "%04u %#x", 496 nq_id, le32_to_cpu(desc.data[1])); 497 498 ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1, 499 HCLGE_OPC_SCH_RQ_CNT); 500 if (ret) 501 return ret; 502 503 *pos += scnprintf(buf + *pos, len - *pos, 504 " %#x\n", 505 le32_to_cpu(desc.data[1])); 506 } 507 508 return 0; 509 } 510 511 static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len, 512 int *pos) 513 { 514 struct hclge_dbg_bitmap_cmd *bitmap; 515 struct hclge_desc desc; 516 u8 port_id = 0; 517 int ret; 518 519 ret = hclge_dbg_cmd_send(hdev, &desc, port_id, 1, 520 HCLGE_OPC_PORT_DFX_STS); 521 if (ret) 522 return ret; 523 524 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1]; 525 526 *pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n", 527 bitmap->bit0); 528 *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n", 529 bitmap->bit1); 530 531 return 0; 532 } 533 534 static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len, 535 int *pos) 536 { 537 struct hclge_desc desc[2]; 538 u8 port_id = 0; 539 int ret; 540 541 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, 542 HCLGE_OPC_TM_INTERNAL_CNT); 543 if (ret) 544 return ret; 545 546 *pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n", 547 le32_to_cpu(desc[0].data[1])); 548 *pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n", 549 le32_to_cpu(desc[0].data[2])); 550 551 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2, 552 HCLGE_OPC_TM_INTERNAL_STS); 553 if (ret) 554 return ret; 555 556 *pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n", 557 le32_to_cpu(desc[0].data[1])); 558 *pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n", 559 le32_to_cpu(desc[0].data[2])); 560 *pos += scnprintf(buf + *pos, len - *pos, 561 "sch_roce_fifo_afull_gap: %#x\n", 562 le32_to_cpu(desc[0].data[3])); 563 *pos += scnprintf(buf + *pos, len - *pos, 564 "tx_private_waterline: %#x\n", 565 le32_to_cpu(desc[0].data[4])); 566 *pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n", 567 le32_to_cpu(desc[0].data[5])); 568 *pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n", 569 le32_to_cpu(desc[1].data[0])); 570 *pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n", 571 le32_to_cpu(desc[1].data[1])); 572 573 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) 574 return 0; 575 576 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, 577 HCLGE_OPC_TM_INTERNAL_STS_1); 578 if (ret) 579 return ret; 580 581 *pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n", 582 le32_to_cpu(desc[0].data[1])); 583 *pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n", 584 le32_to_cpu(desc[0].data[2])); 585 *pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n", 586 le32_to_cpu(desc[0].data[3])); 587 *pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n", 588 le32_to_cpu(desc[0].data[4])); 589 *pos += scnprintf(buf + *pos, len - *pos, 590 "IGU_TX_PRI_MAP_TC_CFG: %#x\n", 591 le32_to_cpu(desc[0].data[5])); 592 593 return 0; 594 } 595 596 static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len) 597 { 598 int pos = 0; 599 int ret; 600 601 ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos); 602 if (ret) 603 return ret; 604 605 ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos); 606 if (ret) 607 return ret; 608 609 ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos); 610 if (ret) 611 return ret; 612 613 ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos); 614 if (ret) 615 return ret; 616 617 ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos); 618 if (ret) 619 return ret; 620 621 return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos); 622 } 623 624 static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, 625 enum hnae3_dbg_cmd cmd, char *buf, int len) 626 { 627 const struct hclge_dbg_reg_type_info *reg_info; 628 int pos = 0, ret = 0; 629 int i; 630 631 for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) { 632 reg_info = &hclge_dbg_reg_info[i]; 633 if (cmd == reg_info->cmd) { 634 if (cmd == HNAE3_DBG_CMD_REG_TQP) 635 return hclge_dbg_dump_reg_tqp(hdev, reg_info, 636 buf, len, &pos); 637 638 ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf, 639 len, &pos); 640 if (ret) 641 break; 642 } 643 } 644 645 return ret; 646 } 647 648 static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len) 649 { 650 struct hclge_ets_tc_weight_cmd *ets_weight; 651 struct hclge_desc desc; 652 char *sch_mode_str; 653 int pos = 0; 654 int ret; 655 u8 i; 656 657 if (!hnae3_dev_dcb_supported(hdev)) { 658 dev_err(&hdev->pdev->dev, 659 "Only DCB-supported dev supports tc\n"); 660 return -EOPNOTSUPP; 661 } 662 663 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true); 664 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 665 if (ret) { 666 dev_err(&hdev->pdev->dev, "failed to get tc weight, ret = %d\n", 667 ret); 668 return ret; 669 } 670 671 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; 672 673 pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n", 674 hdev->tm_info.num_tc); 675 pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n", 676 ets_weight->weight_offset); 677 678 pos += scnprintf(buf + pos, len - pos, "TC MODE WEIGHT\n"); 679 for (i = 0; i < HNAE3_MAX_TC; i++) { 680 sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp"; 681 pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n", 682 i, sch_mode_str, 683 hdev->tm_info.pg_info[0].tc_dwrr[i]); 684 } 685 686 return 0; 687 } 688 689 static const struct hclge_dbg_item tm_pg_items[] = { 690 { "ID", 2 }, 691 { "PRI_MAP", 2 }, 692 { "MODE", 2 }, 693 { "DWRR", 2 }, 694 { "C_IR_B", 2 }, 695 { "C_IR_U", 2 }, 696 { "C_IR_S", 2 }, 697 { "C_BS_B", 2 }, 698 { "C_BS_S", 2 }, 699 { "C_FLAG", 2 }, 700 { "C_RATE(Mbps)", 2 }, 701 { "P_IR_B", 2 }, 702 { "P_IR_U", 2 }, 703 { "P_IR_S", 2 }, 704 { "P_BS_B", 2 }, 705 { "P_BS_S", 2 }, 706 { "P_FLAG", 2 }, 707 { "P_RATE(Mbps)", 0 } 708 }; 709 710 static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para, 711 char **result, u8 *index) 712 { 713 sprintf(result[(*index)++], "%3u", para->ir_b); 714 sprintf(result[(*index)++], "%3u", para->ir_u); 715 sprintf(result[(*index)++], "%3u", para->ir_s); 716 sprintf(result[(*index)++], "%3u", para->bs_b); 717 sprintf(result[(*index)++], "%3u", para->bs_s); 718 sprintf(result[(*index)++], "%3u", para->flag); 719 sprintf(result[(*index)++], "%6u", para->rate); 720 } 721 722 static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len) 723 { 724 char data_str[ARRAY_SIZE(tm_pg_items)][HCLGE_DBG_DATA_STR_LEN]; 725 struct hclge_tm_shaper_para c_shaper_para, p_shaper_para; 726 char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str; 727 u8 pg_id, sch_mode, weight, pri_bit_map, i, j; 728 char content[HCLGE_DBG_TM_INFO_LEN]; 729 int pos = 0; 730 int ret; 731 732 for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) 733 result[i] = &data_str[i][0]; 734 735 hclge_dbg_fill_content(content, sizeof(content), tm_pg_items, 736 NULL, ARRAY_SIZE(tm_pg_items)); 737 pos += scnprintf(buf + pos, len - pos, "%s", content); 738 739 for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) { 740 ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map); 741 if (ret) 742 return ret; 743 744 ret = hclge_tm_get_pg_sch_mode(hdev, pg_id, &sch_mode); 745 if (ret) 746 return ret; 747 748 ret = hclge_tm_get_pg_weight(hdev, pg_id, &weight); 749 if (ret) 750 return ret; 751 752 ret = hclge_tm_get_pg_shaper(hdev, pg_id, 753 HCLGE_OPC_TM_PG_C_SHAPPING, 754 &c_shaper_para); 755 if (ret) 756 return ret; 757 758 ret = hclge_tm_get_pg_shaper(hdev, pg_id, 759 HCLGE_OPC_TM_PG_P_SHAPPING, 760 &p_shaper_para); 761 if (ret) 762 return ret; 763 764 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" : 765 "sp"; 766 767 j = 0; 768 sprintf(result[j++], "%02u", pg_id); 769 sprintf(result[j++], "0x%02x", pri_bit_map); 770 sprintf(result[j++], "%4s", sch_mode_str); 771 sprintf(result[j++], "%3u", weight); 772 hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j); 773 hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j); 774 775 hclge_dbg_fill_content(content, sizeof(content), tm_pg_items, 776 (const char **)result, 777 ARRAY_SIZE(tm_pg_items)); 778 pos += scnprintf(buf + pos, len - pos, "%s", content); 779 } 780 781 return 0; 782 } 783 784 static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev, char *buf, int len) 785 { 786 struct hclge_tm_shaper_para shaper_para; 787 int pos = 0; 788 int ret; 789 790 ret = hclge_tm_get_port_shaper(hdev, &shaper_para); 791 if (ret) 792 return ret; 793 794 pos += scnprintf(buf + pos, len - pos, 795 "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n"); 796 pos += scnprintf(buf + pos, len - pos, 797 "%3u %3u %3u %3u %3u %1u %6u\n", 798 shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s, 799 shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag, 800 shaper_para.rate); 801 802 return 0; 803 } 804 805 static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id, 806 char *buf, int len) 807 { 808 u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM]; 809 struct hclge_bp_to_qs_map_cmd *map; 810 struct hclge_desc desc; 811 int pos = 0; 812 u8 group_id; 813 u8 grp_num; 814 u16 i = 0; 815 int ret; 816 817 grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ? 818 HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM; 819 map = (struct hclge_bp_to_qs_map_cmd *)desc.data; 820 for (group_id = 0; group_id < grp_num; group_id++) { 821 hclge_cmd_setup_basic_desc(&desc, 822 HCLGE_OPC_TM_BP_TO_QSET_MAPPING, 823 true); 824 map->tc_id = tc_id; 825 map->qs_group_id = group_id; 826 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 827 if (ret) { 828 dev_err(&hdev->pdev->dev, 829 "failed to get bp to qset map, ret = %d\n", 830 ret); 831 return ret; 832 } 833 834 qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map); 835 } 836 837 pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n"); 838 for (group_id = 0; group_id < grp_num / 8; group_id++) { 839 pos += scnprintf(buf + pos, len - pos, 840 "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n", 841 group_id * 256, qset_mapping[i + 7], 842 qset_mapping[i + 6], qset_mapping[i + 5], 843 qset_mapping[i + 4], qset_mapping[i + 3], 844 qset_mapping[i + 2], qset_mapping[i + 1], 845 qset_mapping[i]); 846 i += 8; 847 } 848 849 return pos; 850 } 851 852 static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len) 853 { 854 u16 queue_id; 855 u16 qset_id; 856 u8 link_vld; 857 int pos = 0; 858 u8 pri_id; 859 u8 tc_id; 860 int ret; 861 862 for (queue_id = 0; queue_id < hdev->num_tqps; queue_id++) { 863 ret = hclge_tm_get_q_to_qs_map(hdev, queue_id, &qset_id); 864 if (ret) 865 return ret; 866 867 ret = hclge_tm_get_qset_map_pri(hdev, qset_id, &pri_id, 868 &link_vld); 869 if (ret) 870 return ret; 871 872 ret = hclge_tm_get_q_to_tc(hdev, queue_id, &tc_id); 873 if (ret) 874 return ret; 875 876 pos += scnprintf(buf + pos, len - pos, 877 "QUEUE_ID QSET_ID PRI_ID TC_ID\n"); 878 pos += scnprintf(buf + pos, len - pos, 879 "%04u %4u %3u %2u\n", 880 queue_id, qset_id, pri_id, tc_id); 881 882 if (!hnae3_dev_dcb_supported(hdev)) 883 continue; 884 885 ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos, 886 len - pos); 887 if (ret < 0) 888 return ret; 889 pos += ret; 890 891 pos += scnprintf(buf + pos, len - pos, "\n"); 892 } 893 894 return 0; 895 } 896 897 static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len) 898 { 899 struct hclge_tm_nodes_cmd *nodes; 900 struct hclge_desc desc; 901 int pos = 0; 902 int ret; 903 904 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); 905 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 906 if (ret) { 907 dev_err(&hdev->pdev->dev, 908 "failed to dump tm nodes, ret = %d\n", ret); 909 return ret; 910 } 911 912 nodes = (struct hclge_tm_nodes_cmd *)desc.data; 913 914 pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n"); 915 pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n", 916 nodes->pg_base_id, nodes->pg_num); 917 pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n", 918 nodes->pri_base_id, nodes->pri_num); 919 pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n", 920 le16_to_cpu(nodes->qset_base_id), 921 le16_to_cpu(nodes->qset_num)); 922 pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n", 923 le16_to_cpu(nodes->queue_base_id), 924 le16_to_cpu(nodes->queue_num)); 925 926 return 0; 927 } 928 929 static const struct hclge_dbg_item tm_pri_items[] = { 930 { "ID", 4 }, 931 { "MODE", 2 }, 932 { "DWRR", 2 }, 933 { "C_IR_B", 2 }, 934 { "C_IR_U", 2 }, 935 { "C_IR_S", 2 }, 936 { "C_BS_B", 2 }, 937 { "C_BS_S", 2 }, 938 { "C_FLAG", 2 }, 939 { "C_RATE(Mbps)", 2 }, 940 { "P_IR_B", 2 }, 941 { "P_IR_U", 2 }, 942 { "P_IR_S", 2 }, 943 { "P_BS_B", 2 }, 944 { "P_BS_S", 2 }, 945 { "P_FLAG", 2 }, 946 { "P_RATE(Mbps)", 0 } 947 }; 948 949 static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len) 950 { 951 char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN]; 952 struct hclge_tm_shaper_para c_shaper_para, p_shaper_para; 953 char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str; 954 char content[HCLGE_DBG_TM_INFO_LEN]; 955 u8 pri_num, sch_mode, weight, i, j; 956 int pos, ret; 957 958 ret = hclge_tm_get_pri_num(hdev, &pri_num); 959 if (ret) 960 return ret; 961 962 for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++) 963 result[i] = &data_str[i][0]; 964 965 hclge_dbg_fill_content(content, sizeof(content), tm_pri_items, 966 NULL, ARRAY_SIZE(tm_pri_items)); 967 pos = scnprintf(buf, len, "%s", content); 968 969 for (i = 0; i < pri_num; i++) { 970 ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode); 971 if (ret) 972 return ret; 973 974 ret = hclge_tm_get_pri_weight(hdev, i, &weight); 975 if (ret) 976 return ret; 977 978 ret = hclge_tm_get_pri_shaper(hdev, i, 979 HCLGE_OPC_TM_PRI_C_SHAPPING, 980 &c_shaper_para); 981 if (ret) 982 return ret; 983 984 ret = hclge_tm_get_pri_shaper(hdev, i, 985 HCLGE_OPC_TM_PRI_P_SHAPPING, 986 &p_shaper_para); 987 if (ret) 988 return ret; 989 990 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" : 991 "sp"; 992 993 j = 0; 994 sprintf(result[j++], "%04u", i); 995 sprintf(result[j++], "%4s", sch_mode_str); 996 sprintf(result[j++], "%3u", weight); 997 hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j); 998 hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j); 999 hclge_dbg_fill_content(content, sizeof(content), tm_pri_items, 1000 (const char **)result, 1001 ARRAY_SIZE(tm_pri_items)); 1002 pos += scnprintf(buf + pos, len - pos, "%s", content); 1003 } 1004 1005 return 0; 1006 } 1007 1008 static const struct hclge_dbg_item tm_qset_items[] = { 1009 { "ID", 4 }, 1010 { "MAP_PRI", 2 }, 1011 { "LINK_VLD", 2 }, 1012 { "MODE", 2 }, 1013 { "DWRR", 2 }, 1014 { "IR_B", 2 }, 1015 { "IR_U", 2 }, 1016 { "IR_S", 2 }, 1017 { "BS_B", 2 }, 1018 { "BS_S", 2 }, 1019 { "FLAG", 2 }, 1020 { "RATE(Mbps)", 0 } 1021 }; 1022 1023 static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len) 1024 { 1025 char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN]; 1026 char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str; 1027 u8 priority, link_vld, sch_mode, weight; 1028 struct hclge_tm_shaper_para shaper_para; 1029 char content[HCLGE_DBG_TM_INFO_LEN]; 1030 u16 qset_num, i; 1031 int ret, pos; 1032 u8 j; 1033 1034 ret = hclge_tm_get_qset_num(hdev, &qset_num); 1035 if (ret) 1036 return ret; 1037 1038 for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++) 1039 result[i] = &data_str[i][0]; 1040 1041 hclge_dbg_fill_content(content, sizeof(content), tm_qset_items, 1042 NULL, ARRAY_SIZE(tm_qset_items)); 1043 pos = scnprintf(buf, len, "%s", content); 1044 1045 for (i = 0; i < qset_num; i++) { 1046 ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld); 1047 if (ret) 1048 return ret; 1049 1050 ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode); 1051 if (ret) 1052 return ret; 1053 1054 ret = hclge_tm_get_qset_weight(hdev, i, &weight); 1055 if (ret) 1056 return ret; 1057 1058 ret = hclge_tm_get_qset_shaper(hdev, i, &shaper_para); 1059 if (ret) 1060 return ret; 1061 1062 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" : 1063 "sp"; 1064 1065 j = 0; 1066 sprintf(result[j++], "%04u", i); 1067 sprintf(result[j++], "%4u", priority); 1068 sprintf(result[j++], "%4u", link_vld); 1069 sprintf(result[j++], "%4s", sch_mode_str); 1070 sprintf(result[j++], "%3u", weight); 1071 hclge_dbg_fill_shaper_content(&shaper_para, result, &j); 1072 1073 hclge_dbg_fill_content(content, sizeof(content), tm_qset_items, 1074 (const char **)result, 1075 ARRAY_SIZE(tm_qset_items)); 1076 pos += scnprintf(buf + pos, len - pos, "%s", content); 1077 } 1078 1079 return 0; 1080 } 1081 1082 static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf, 1083 int len) 1084 { 1085 struct hclge_cfg_pause_param_cmd *pause_param; 1086 struct hclge_desc desc; 1087 int pos = 0; 1088 int ret; 1089 1090 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); 1091 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1092 if (ret) { 1093 dev_err(&hdev->pdev->dev, 1094 "failed to dump qos pause, ret = %d\n", ret); 1095 return ret; 1096 } 1097 1098 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; 1099 1100 pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n", 1101 pause_param->pause_trans_gap); 1102 pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n", 1103 le16_to_cpu(pause_param->pause_trans_time)); 1104 return 0; 1105 } 1106 1107 static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf, 1108 int len) 1109 { 1110 #define HCLGE_DBG_TC_MASK 0x0F 1111 #define HCLGE_DBG_TC_BIT_WIDTH 4 1112 1113 struct hclge_qos_pri_map_cmd *pri_map; 1114 struct hclge_desc desc; 1115 int pos = 0; 1116 u8 *pri_tc; 1117 u8 tc, i; 1118 int ret; 1119 1120 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true); 1121 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1122 if (ret) { 1123 dev_err(&hdev->pdev->dev, 1124 "failed to dump qos pri map, ret = %d\n", ret); 1125 return ret; 1126 } 1127 1128 pri_map = (struct hclge_qos_pri_map_cmd *)desc.data; 1129 1130 pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n", 1131 pri_map->vlan_pri); 1132 pos += scnprintf(buf + pos, len - pos, "PRI TC\n"); 1133 1134 pri_tc = (u8 *)pri_map; 1135 for (i = 0; i < HNAE3_MAX_TC; i++) { 1136 tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH); 1137 tc &= HCLGE_DBG_TC_MASK; 1138 pos += scnprintf(buf + pos, len - pos, "%u %u\n", i, tc); 1139 } 1140 1141 return 0; 1142 } 1143 1144 static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len) 1145 { 1146 struct hclge_tx_buff_alloc_cmd *tx_buf_cmd; 1147 struct hclge_desc desc; 1148 int pos = 0; 1149 int i, ret; 1150 1151 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true); 1152 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1153 if (ret) { 1154 dev_err(&hdev->pdev->dev, 1155 "failed to dump tx buf, ret = %d\n", ret); 1156 return ret; 1157 } 1158 1159 tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data; 1160 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1161 pos += scnprintf(buf + pos, len - pos, 1162 "tx_packet_buf_tc_%d: 0x%x\n", i, 1163 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i])); 1164 1165 return pos; 1166 } 1167 1168 static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf, 1169 int len) 1170 { 1171 struct hclge_rx_priv_buff_cmd *rx_buf_cmd; 1172 struct hclge_desc desc; 1173 int pos = 0; 1174 int i, ret; 1175 1176 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true); 1177 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1178 if (ret) { 1179 dev_err(&hdev->pdev->dev, 1180 "failed to dump rx priv buf, ret = %d\n", ret); 1181 return ret; 1182 } 1183 1184 pos += scnprintf(buf + pos, len - pos, "\n"); 1185 1186 rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data; 1187 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1188 pos += scnprintf(buf + pos, len - pos, 1189 "rx_packet_buf_tc_%d: 0x%x\n", i, 1190 le16_to_cpu(rx_buf_cmd->buf_num[i])); 1191 1192 pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n", 1193 le16_to_cpu(rx_buf_cmd->shared_buf)); 1194 1195 return pos; 1196 } 1197 1198 static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf, 1199 int len) 1200 { 1201 struct hclge_rx_com_wl *rx_com_wl; 1202 struct hclge_desc desc; 1203 int pos = 0; 1204 int ret; 1205 1206 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true); 1207 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1208 if (ret) { 1209 dev_err(&hdev->pdev->dev, 1210 "failed to dump rx common wl, ret = %d\n", ret); 1211 return ret; 1212 } 1213 1214 rx_com_wl = (struct hclge_rx_com_wl *)desc.data; 1215 pos += scnprintf(buf + pos, len - pos, "\n"); 1216 pos += scnprintf(buf + pos, len - pos, 1217 "rx_com_wl: high: 0x%x, low: 0x%x\n", 1218 le16_to_cpu(rx_com_wl->com_wl.high), 1219 le16_to_cpu(rx_com_wl->com_wl.low)); 1220 1221 return pos; 1222 } 1223 1224 static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf, 1225 int len) 1226 { 1227 struct hclge_rx_com_wl *rx_packet_cnt; 1228 struct hclge_desc desc; 1229 int pos = 0; 1230 int ret; 1231 1232 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true); 1233 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1234 if (ret) { 1235 dev_err(&hdev->pdev->dev, 1236 "failed to dump rx global pkt cnt, ret = %d\n", ret); 1237 return ret; 1238 } 1239 1240 rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data; 1241 pos += scnprintf(buf + pos, len - pos, 1242 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n", 1243 le16_to_cpu(rx_packet_cnt->com_wl.high), 1244 le16_to_cpu(rx_packet_cnt->com_wl.low)); 1245 1246 return pos; 1247 } 1248 1249 static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf, 1250 int len) 1251 { 1252 struct hclge_rx_priv_wl_buf *rx_priv_wl; 1253 struct hclge_desc desc[2]; 1254 int pos = 0; 1255 int i, ret; 1256 1257 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true); 1258 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1259 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true); 1260 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1261 if (ret) { 1262 dev_err(&hdev->pdev->dev, 1263 "failed to dump rx priv wl buf, ret = %d\n", ret); 1264 return ret; 1265 } 1266 1267 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data; 1268 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 1269 pos += scnprintf(buf + pos, len - pos, 1270 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i, 1271 le16_to_cpu(rx_priv_wl->tc_wl[i].high), 1272 le16_to_cpu(rx_priv_wl->tc_wl[i].low)); 1273 1274 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data; 1275 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 1276 pos += scnprintf(buf + pos, len - pos, 1277 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", 1278 i + HCLGE_TC_NUM_ONE_DESC, 1279 le16_to_cpu(rx_priv_wl->tc_wl[i].high), 1280 le16_to_cpu(rx_priv_wl->tc_wl[i].low)); 1281 1282 return pos; 1283 } 1284 1285 static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev, 1286 char *buf, int len) 1287 { 1288 struct hclge_rx_com_thrd *rx_com_thrd; 1289 struct hclge_desc desc[2]; 1290 int pos = 0; 1291 int i, ret; 1292 1293 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true); 1294 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1295 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true); 1296 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1297 if (ret) { 1298 dev_err(&hdev->pdev->dev, 1299 "failed to dump rx common threshold, ret = %d\n", ret); 1300 return ret; 1301 } 1302 1303 pos += scnprintf(buf + pos, len - pos, "\n"); 1304 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data; 1305 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 1306 pos += scnprintf(buf + pos, len - pos, 1307 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i, 1308 le16_to_cpu(rx_com_thrd->com_thrd[i].high), 1309 le16_to_cpu(rx_com_thrd->com_thrd[i].low)); 1310 1311 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data; 1312 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 1313 pos += scnprintf(buf + pos, len - pos, 1314 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", 1315 i + HCLGE_TC_NUM_ONE_DESC, 1316 le16_to_cpu(rx_com_thrd->com_thrd[i].high), 1317 le16_to_cpu(rx_com_thrd->com_thrd[i].low)); 1318 1319 return pos; 1320 } 1321 1322 static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf, 1323 int len) 1324 { 1325 int pos = 0; 1326 int ret; 1327 1328 ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos); 1329 if (ret < 0) 1330 return ret; 1331 pos += ret; 1332 1333 ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos); 1334 if (ret < 0) 1335 return ret; 1336 pos += ret; 1337 1338 ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos); 1339 if (ret < 0) 1340 return ret; 1341 pos += ret; 1342 1343 ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos); 1344 if (ret < 0) 1345 return ret; 1346 pos += ret; 1347 1348 pos += scnprintf(buf + pos, len - pos, "\n"); 1349 if (!hnae3_dev_dcb_supported(hdev)) 1350 return 0; 1351 1352 ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos); 1353 if (ret < 0) 1354 return ret; 1355 pos += ret; 1356 1357 ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos, 1358 len - pos); 1359 if (ret < 0) 1360 return ret; 1361 1362 return 0; 1363 } 1364 1365 static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len) 1366 { 1367 struct hclge_mac_ethertype_idx_rd_cmd *req0; 1368 struct hclge_desc desc; 1369 u32 msg_egress_port; 1370 int pos = 0; 1371 int ret, i; 1372 1373 pos += scnprintf(buf + pos, len - pos, 1374 "entry mac_addr mask ether "); 1375 pos += scnprintf(buf + pos, len - pos, 1376 "mask vlan mask i_map i_dir e_type "); 1377 pos += scnprintf(buf + pos, len - pos, "pf_id vf_id q_id drop\n"); 1378 1379 for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) { 1380 hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD, 1381 true); 1382 req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data; 1383 req0->index = cpu_to_le16(i); 1384 1385 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1386 if (ret) { 1387 dev_err(&hdev->pdev->dev, 1388 "failed to dump manage table, ret = %d\n", ret); 1389 return ret; 1390 } 1391 1392 if (!req0->resp_code) 1393 continue; 1394 1395 pos += scnprintf(buf + pos, len - pos, "%02u %pM ", 1396 le16_to_cpu(req0->index), req0->mac_addr); 1397 1398 pos += scnprintf(buf + pos, len - pos, 1399 "%x %04x %x %04x ", 1400 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B), 1401 le16_to_cpu(req0->ethter_type), 1402 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B), 1403 le16_to_cpu(req0->vlan_tag) & 1404 HCLGE_DBG_MNG_VLAN_TAG); 1405 1406 pos += scnprintf(buf + pos, len - pos, 1407 "%x %02x %02x ", 1408 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B), 1409 req0->i_port_bitmap, req0->i_port_direction); 1410 1411 msg_egress_port = le16_to_cpu(req0->egress_port); 1412 pos += scnprintf(buf + pos, len - pos, 1413 "%x %x %02x %04x %x\n", 1414 !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B), 1415 msg_egress_port & HCLGE_DBG_MNG_PF_ID, 1416 (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID, 1417 le16_to_cpu(req0->egress_queue), 1418 !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B)); 1419 } 1420 1421 return 0; 1422 } 1423 1424 #define HCLGE_DBG_TCAM_BUF_SIZE 256 1425 1426 static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x, 1427 char *tcam_buf, 1428 struct hclge_dbg_tcam_msg tcam_msg) 1429 { 1430 struct hclge_fd_tcam_config_1_cmd *req1; 1431 struct hclge_fd_tcam_config_2_cmd *req2; 1432 struct hclge_fd_tcam_config_3_cmd *req3; 1433 struct hclge_desc desc[3]; 1434 int pos = 0; 1435 int ret, i; 1436 u32 *req; 1437 1438 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true); 1439 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1440 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true); 1441 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1442 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true); 1443 1444 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; 1445 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; 1446 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; 1447 1448 req1->stage = tcam_msg.stage; 1449 req1->xy_sel = sel_x ? 1 : 0; 1450 req1->index = cpu_to_le32(tcam_msg.loc); 1451 1452 ret = hclge_cmd_send(&hdev->hw, desc, 3); 1453 if (ret) 1454 return ret; 1455 1456 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos, 1457 "read result tcam key %s(%u):\n", sel_x ? "x" : "y", 1458 tcam_msg.loc); 1459 1460 /* tcam_data0 ~ tcam_data1 */ 1461 req = (u32 *)req1->tcam_data; 1462 for (i = 0; i < 2; i++) 1463 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos, 1464 "%08x\n", *req++); 1465 1466 /* tcam_data2 ~ tcam_data7 */ 1467 req = (u32 *)req2->tcam_data; 1468 for (i = 0; i < 6; i++) 1469 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos, 1470 "%08x\n", *req++); 1471 1472 /* tcam_data8 ~ tcam_data12 */ 1473 req = (u32 *)req3->tcam_data; 1474 for (i = 0; i < 5; i++) 1475 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos, 1476 "%08x\n", *req++); 1477 1478 return ret; 1479 } 1480 1481 static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs) 1482 { 1483 struct hclge_fd_rule *rule; 1484 struct hlist_node *node; 1485 int cnt = 0; 1486 1487 spin_lock_bh(&hdev->fd_rule_lock); 1488 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 1489 rule_locs[cnt] = rule->location; 1490 cnt++; 1491 } 1492 spin_unlock_bh(&hdev->fd_rule_lock); 1493 1494 if (cnt != hdev->hclge_fd_rule_num || cnt == 0) 1495 return -EINVAL; 1496 1497 return cnt; 1498 } 1499 1500 static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len) 1501 { 1502 u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; 1503 struct hclge_dbg_tcam_msg tcam_msg; 1504 int i, ret, rule_cnt; 1505 u16 *rule_locs; 1506 char *tcam_buf; 1507 int pos = 0; 1508 1509 if (!hnae3_dev_fd_supported(hdev)) { 1510 dev_err(&hdev->pdev->dev, 1511 "Only FD-supported dev supports dump fd tcam\n"); 1512 return -EOPNOTSUPP; 1513 } 1514 1515 if (!hdev->hclge_fd_rule_num || !rule_num) 1516 return 0; 1517 1518 rule_locs = kcalloc(rule_num, sizeof(u16), GFP_KERNEL); 1519 if (!rule_locs) 1520 return -ENOMEM; 1521 1522 tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL); 1523 if (!tcam_buf) { 1524 kfree(rule_locs); 1525 return -ENOMEM; 1526 } 1527 1528 rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs); 1529 if (rule_cnt < 0) { 1530 ret = rule_cnt; 1531 dev_err(&hdev->pdev->dev, 1532 "failed to get rule number, ret = %d\n", ret); 1533 goto out; 1534 } 1535 1536 ret = 0; 1537 for (i = 0; i < rule_cnt; i++) { 1538 tcam_msg.stage = HCLGE_FD_STAGE_1; 1539 tcam_msg.loc = rule_locs[i]; 1540 1541 ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg); 1542 if (ret) { 1543 dev_err(&hdev->pdev->dev, 1544 "failed to get fd tcam key x, ret = %d\n", ret); 1545 goto out; 1546 } 1547 1548 pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf); 1549 1550 ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg); 1551 if (ret) { 1552 dev_err(&hdev->pdev->dev, 1553 "failed to get fd tcam key y, ret = %d\n", ret); 1554 goto out; 1555 } 1556 1557 pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf); 1558 } 1559 1560 out: 1561 kfree(tcam_buf); 1562 kfree(rule_locs); 1563 return ret; 1564 } 1565 1566 static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len) 1567 { 1568 u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */ 1569 struct hclge_fd_ad_cnt_read_cmd *req; 1570 char str_id[HCLGE_DBG_ID_LEN]; 1571 struct hclge_desc desc; 1572 int pos = 0; 1573 int ret; 1574 u64 cnt; 1575 u8 i; 1576 1577 pos += scnprintf(buf + pos, len - pos, 1578 "func_id\thit_times\n"); 1579 1580 for (i = 0; i < func_num; i++) { 1581 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true); 1582 req = (struct hclge_fd_ad_cnt_read_cmd *)desc.data; 1583 req->index = cpu_to_le16(i); 1584 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1585 if (ret) { 1586 dev_err(&hdev->pdev->dev, "failed to get fd counter, ret = %d\n", 1587 ret); 1588 return ret; 1589 } 1590 cnt = le64_to_cpu(req->cnt); 1591 hclge_dbg_get_func_id_str(str_id, i); 1592 pos += scnprintf(buf + pos, len - pos, 1593 "%s\t%llu\n", str_id, cnt); 1594 } 1595 1596 return 0; 1597 } 1598 1599 int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len) 1600 { 1601 int pos = 0; 1602 1603 pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n", 1604 hdev->rst_stats.pf_rst_cnt); 1605 pos += scnprintf(buf + pos, len - pos, "FLR reset count: %u\n", 1606 hdev->rst_stats.flr_rst_cnt); 1607 pos += scnprintf(buf + pos, len - pos, "GLOBAL reset count: %u\n", 1608 hdev->rst_stats.global_rst_cnt); 1609 pos += scnprintf(buf + pos, len - pos, "IMP reset count: %u\n", 1610 hdev->rst_stats.imp_rst_cnt); 1611 pos += scnprintf(buf + pos, len - pos, "reset done count: %u\n", 1612 hdev->rst_stats.reset_done_cnt); 1613 pos += scnprintf(buf + pos, len - pos, "HW reset done count: %u\n", 1614 hdev->rst_stats.hw_reset_done_cnt); 1615 pos += scnprintf(buf + pos, len - pos, "reset count: %u\n", 1616 hdev->rst_stats.reset_cnt); 1617 pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n", 1618 hdev->rst_stats.reset_fail_cnt); 1619 pos += scnprintf(buf + pos, len - pos, 1620 "vector0 interrupt enable status: 0x%x\n", 1621 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE)); 1622 pos += scnprintf(buf + pos, len - pos, "reset interrupt source: 0x%x\n", 1623 hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG)); 1624 pos += scnprintf(buf + pos, len - pos, "reset interrupt status: 0x%x\n", 1625 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS)); 1626 pos += scnprintf(buf + pos, len - pos, "RAS interrupt status: 0x%x\n", 1627 hclge_read_dev(&hdev->hw, 1628 HCLGE_RAS_PF_OTHER_INT_STS_REG)); 1629 pos += scnprintf(buf + pos, len - pos, "hardware reset status: 0x%x\n", 1630 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); 1631 pos += scnprintf(buf + pos, len - pos, "handshake status: 0x%x\n", 1632 hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG)); 1633 pos += scnprintf(buf + pos, len - pos, "function reset status: 0x%x\n", 1634 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING)); 1635 pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n", 1636 hdev->state); 1637 1638 return 0; 1639 } 1640 1641 static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len) 1642 { 1643 unsigned long rem_nsec; 1644 int pos = 0; 1645 u64 lc; 1646 1647 lc = local_clock(); 1648 rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS); 1649 1650 pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n", 1651 (unsigned long)lc, rem_nsec / 1000); 1652 pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n", 1653 jiffies_to_msecs(jiffies - hdev->last_serv_processed)); 1654 pos += scnprintf(buf + pos, len - pos, 1655 "last_service_task_processed: %lu(jiffies)\n", 1656 hdev->last_serv_processed); 1657 pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n", 1658 hdev->serv_processed_cnt); 1659 1660 return 0; 1661 } 1662 1663 static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len) 1664 { 1665 int pos = 0; 1666 1667 pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n", 1668 hdev->num_nic_msi); 1669 pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n", 1670 hdev->num_roce_msi); 1671 pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n", 1672 hdev->num_msi_used); 1673 pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n", 1674 hdev->num_msi_left); 1675 1676 return 0; 1677 } 1678 1679 static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src, 1680 char *buf, int len, u32 bd_num) 1681 { 1682 #define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2 1683 1684 struct hclge_desc *desc_index = desc_src; 1685 u32 offset = 0; 1686 int pos = 0; 1687 u32 i, j; 1688 1689 pos += scnprintf(buf + pos, len - pos, "offset | data\n"); 1690 1691 for (i = 0; i < bd_num; i++) { 1692 j = 0; 1693 while (j < HCLGE_DESC_DATA_LEN - 1) { 1694 pos += scnprintf(buf + pos, len - pos, "0x%04x | ", 1695 offset); 1696 pos += scnprintf(buf + pos, len - pos, "0x%08x ", 1697 le32_to_cpu(desc_index->data[j++])); 1698 pos += scnprintf(buf + pos, len - pos, "0x%08x\n", 1699 le32_to_cpu(desc_index->data[j++])); 1700 offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET; 1701 } 1702 desc_index++; 1703 } 1704 } 1705 1706 static int 1707 hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len) 1708 { 1709 struct hclge_get_imp_bd_cmd *req; 1710 struct hclge_desc *desc_src; 1711 struct hclge_desc desc; 1712 u32 bd_num; 1713 int ret; 1714 1715 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_STATS_BD, true); 1716 1717 req = (struct hclge_get_imp_bd_cmd *)desc.data; 1718 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1719 if (ret) { 1720 dev_err(&hdev->pdev->dev, 1721 "failed to get imp statistics bd number, ret = %d\n", 1722 ret); 1723 return ret; 1724 } 1725 1726 bd_num = le32_to_cpu(req->bd_num); 1727 if (!bd_num) { 1728 dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n"); 1729 return -EINVAL; 1730 } 1731 1732 desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); 1733 if (!desc_src) 1734 return -ENOMEM; 1735 1736 ret = hclge_dbg_cmd_send(hdev, desc_src, 0, bd_num, 1737 HCLGE_OPC_IMP_STATS_INFO); 1738 if (ret) { 1739 kfree(desc_src); 1740 dev_err(&hdev->pdev->dev, 1741 "failed to get imp statistics, ret = %d\n", ret); 1742 return ret; 1743 } 1744 1745 hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num); 1746 1747 kfree(desc_src); 1748 1749 return 0; 1750 } 1751 1752 #define HCLGE_CMD_NCL_CONFIG_BD_NUM 5 1753 #define HCLGE_MAX_NCL_CONFIG_LENGTH 16384 1754 1755 static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index, 1756 char *buf, int *len, int *pos) 1757 { 1758 #define HCLGE_CMD_DATA_NUM 6 1759 1760 int offset = HCLGE_MAX_NCL_CONFIG_LENGTH - *index; 1761 int i, j; 1762 1763 for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) { 1764 for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) { 1765 if (i == 0 && j == 0) 1766 continue; 1767 1768 *pos += scnprintf(buf + *pos, *len - *pos, 1769 "0x%04x | 0x%08x\n", offset, 1770 le32_to_cpu(desc[i].data[j])); 1771 1772 offset += sizeof(u32); 1773 *index -= sizeof(u32); 1774 1775 if (*index <= 0) 1776 return; 1777 } 1778 } 1779 } 1780 1781 static int 1782 hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len) 1783 { 1784 #define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4) 1785 1786 struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM]; 1787 int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM; 1788 int index = HCLGE_MAX_NCL_CONFIG_LENGTH; 1789 int pos = 0; 1790 u32 data0; 1791 int ret; 1792 1793 pos += scnprintf(buf + pos, len - pos, "offset | data\n"); 1794 1795 while (index > 0) { 1796 data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index; 1797 if (index >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD) 1798 data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16; 1799 else 1800 data0 |= (u32)index << 16; 1801 ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num, 1802 HCLGE_OPC_QUERY_NCL_CONFIG); 1803 if (ret) 1804 return ret; 1805 1806 hclge_ncl_config_data_print(desc, &index, buf, &len, &pos); 1807 } 1808 1809 return 0; 1810 } 1811 1812 static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len) 1813 { 1814 struct phy_device *phydev = hdev->hw.mac.phydev; 1815 struct hclge_config_mac_mode_cmd *req_app; 1816 struct hclge_common_lb_cmd *req_common; 1817 struct hclge_desc desc; 1818 u8 loopback_en; 1819 int pos = 0; 1820 int ret; 1821 1822 req_app = (struct hclge_config_mac_mode_cmd *)desc.data; 1823 req_common = (struct hclge_common_lb_cmd *)desc.data; 1824 1825 pos += scnprintf(buf + pos, len - pos, "mac id: %u\n", 1826 hdev->hw.mac.mac_id); 1827 1828 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); 1829 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1830 if (ret) { 1831 dev_err(&hdev->pdev->dev, 1832 "failed to dump app loopback status, ret = %d\n", ret); 1833 return ret; 1834 } 1835 1836 loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en), 1837 HCLGE_MAC_APP_LP_B); 1838 pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n", 1839 state_str[loopback_en]); 1840 1841 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true); 1842 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1843 if (ret) { 1844 dev_err(&hdev->pdev->dev, 1845 "failed to dump common loopback status, ret = %d\n", 1846 ret); 1847 return ret; 1848 } 1849 1850 loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; 1851 pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n", 1852 state_str[loopback_en]); 1853 1854 loopback_en = req_common->enable & 1855 HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0; 1856 pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n", 1857 state_str[loopback_en]); 1858 1859 if (phydev) { 1860 loopback_en = phydev->loopback_enabled; 1861 pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n", 1862 state_str[loopback_en]); 1863 } else if (hnae3_dev_phy_imp_supported(hdev)) { 1864 loopback_en = req_common->enable & 1865 HCLGE_CMD_GE_PHY_INNER_LOOP_B; 1866 pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n", 1867 state_str[loopback_en]); 1868 } 1869 1870 return 0; 1871 } 1872 1873 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt 1874 * @hdev: pointer to struct hclge_dev 1875 */ 1876 static int 1877 hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len) 1878 { 1879 struct hclge_mac_tnl_stats stats; 1880 unsigned long rem_nsec; 1881 int pos = 0; 1882 1883 pos += scnprintf(buf + pos, len - pos, 1884 "Recently generated mac tnl interruption:\n"); 1885 1886 while (kfifo_get(&hdev->mac_tnl_log, &stats)) { 1887 rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS); 1888 1889 pos += scnprintf(buf + pos, len - pos, 1890 "[%07lu.%03lu] status = 0x%x\n", 1891 (unsigned long)stats.time, rem_nsec / 1000, 1892 stats.status); 1893 } 1894 1895 return 0; 1896 } 1897 1898 1899 static const struct hclge_dbg_item mac_list_items[] = { 1900 { "FUNC_ID", 2 }, 1901 { "MAC_ADDR", 12 }, 1902 { "STATE", 2 }, 1903 }; 1904 1905 static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len, 1906 bool is_unicast) 1907 { 1908 char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN]; 1909 char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN]; 1910 char *result[ARRAY_SIZE(mac_list_items)]; 1911 struct hclge_mac_node *mac_node, *tmp; 1912 struct hclge_vport *vport; 1913 struct list_head *list; 1914 u32 func_id; 1915 int pos = 0; 1916 int i; 1917 1918 for (i = 0; i < ARRAY_SIZE(mac_list_items); i++) 1919 result[i] = &data_str[i][0]; 1920 1921 pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n", 1922 is_unicast ? "UC" : "MC"); 1923 hclge_dbg_fill_content(content, sizeof(content), mac_list_items, 1924 NULL, ARRAY_SIZE(mac_list_items)); 1925 pos += scnprintf(buf + pos, len - pos, "%s", content); 1926 1927 for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) { 1928 vport = &hdev->vport[func_id]; 1929 list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list; 1930 spin_lock_bh(&vport->mac_list_lock); 1931 list_for_each_entry_safe(mac_node, tmp, list, node) { 1932 i = 0; 1933 result[i++] = hclge_dbg_get_func_id_str(str_id, 1934 func_id); 1935 sprintf(result[i++], "%pM", mac_node->mac_addr); 1936 sprintf(result[i++], "%5s", 1937 hclge_mac_state_str[mac_node->state]); 1938 hclge_dbg_fill_content(content, sizeof(content), 1939 mac_list_items, 1940 (const char **)result, 1941 ARRAY_SIZE(mac_list_items)); 1942 pos += scnprintf(buf + pos, len - pos, "%s", content); 1943 } 1944 spin_unlock_bh(&vport->mac_list_lock); 1945 } 1946 } 1947 1948 static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len) 1949 { 1950 u8 func_num = pci_num_vf(hdev->pdev) + 1; 1951 struct hclge_vport *vport; 1952 int pos = 0; 1953 u8 i; 1954 1955 pos += scnprintf(buf, len, "num_alloc_vport : %u\n", 1956 hdev->num_alloc_vport); 1957 pos += scnprintf(buf + pos, len - pos, "max_umv_size : %u\n", 1958 hdev->max_umv_size); 1959 pos += scnprintf(buf + pos, len - pos, "wanted_umv_size : %u\n", 1960 hdev->wanted_umv_size); 1961 pos += scnprintf(buf + pos, len - pos, "priv_umv_size : %u\n", 1962 hdev->priv_umv_size); 1963 1964 mutex_lock(&hdev->vport_lock); 1965 pos += scnprintf(buf + pos, len - pos, "share_umv_size : %u\n", 1966 hdev->share_umv_size); 1967 for (i = 0; i < func_num; i++) { 1968 vport = &hdev->vport[i]; 1969 pos += scnprintf(buf + pos, len - pos, 1970 "vport(%u) used_umv_num : %u\n", 1971 i, vport->used_umv_num); 1972 } 1973 mutex_unlock(&hdev->vport_lock); 1974 1975 return 0; 1976 } 1977 1978 static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id, 1979 struct hclge_dbg_vlan_cfg *vlan_cfg) 1980 { 1981 struct hclge_vport_vtag_rx_cfg_cmd *req; 1982 struct hclge_desc desc; 1983 u16 bmap_index; 1984 u8 rx_cfg; 1985 int ret; 1986 1987 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true); 1988 1989 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; 1990 req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD; 1991 bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE; 1992 req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE); 1993 1994 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1995 if (ret) { 1996 dev_err(&hdev->pdev->dev, 1997 "failed to get vport%u rxvlan cfg, ret = %d\n", 1998 vf_id, ret); 1999 return ret; 2000 } 2001 2002 rx_cfg = req->vport_vlan_cfg; 2003 vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B); 2004 vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B); 2005 vlan_cfg->drop_tag1 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG1_EN_B); 2006 vlan_cfg->drop_tag2 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG2_EN_B); 2007 vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B); 2008 vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B); 2009 2010 return 0; 2011 } 2012 2013 static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id, 2014 struct hclge_dbg_vlan_cfg *vlan_cfg) 2015 { 2016 struct hclge_vport_vtag_tx_cfg_cmd *req; 2017 struct hclge_desc desc; 2018 u16 bmap_index; 2019 u8 tx_cfg; 2020 int ret; 2021 2022 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true); 2023 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; 2024 req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD; 2025 bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE; 2026 req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE); 2027 2028 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2029 if (ret) { 2030 dev_err(&hdev->pdev->dev, 2031 "failed to get vport%u txvlan cfg, ret = %d\n", 2032 vf_id, ret); 2033 return ret; 2034 } 2035 2036 tx_cfg = req->vport_vlan_cfg; 2037 vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1); 2038 2039 vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B); 2040 vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B); 2041 vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B); 2042 vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B); 2043 vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B); 2044 vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B); 2045 vlan_cfg->shift_tag = hnae3_get_bit(tx_cfg, HCLGE_TAG_SHIFT_MODE_EN_B); 2046 2047 return 0; 2048 } 2049 2050 static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev, 2051 u8 vlan_type, u8 vf_id, 2052 struct hclge_desc *desc) 2053 { 2054 struct hclge_vlan_filter_ctrl_cmd *req; 2055 int ret; 2056 2057 hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true); 2058 req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data; 2059 req->vlan_type = vlan_type; 2060 req->vf_id = vf_id; 2061 2062 ret = hclge_cmd_send(&hdev->hw, desc, 1); 2063 if (ret) 2064 dev_err(&hdev->pdev->dev, 2065 "failed to get vport%u vlan filter config, ret = %d.\n", 2066 vf_id, ret); 2067 2068 return ret; 2069 } 2070 2071 static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type, 2072 u8 vf_id, u8 *vlan_fe) 2073 { 2074 struct hclge_vlan_filter_ctrl_cmd *req; 2075 struct hclge_desc desc; 2076 int ret; 2077 2078 ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc); 2079 if (ret) 2080 return ret; 2081 2082 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; 2083 *vlan_fe = req->vlan_fe; 2084 2085 return 0; 2086 } 2087 2088 static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev, 2089 u8 vf_id, u8 *bypass_en) 2090 { 2091 struct hclge_port_vlan_filter_bypass_cmd *req; 2092 struct hclge_desc desc; 2093 int ret; 2094 2095 if (!test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps)) 2096 return 0; 2097 2098 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, true); 2099 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data; 2100 req->vf_id = vf_id; 2101 2102 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2103 if (ret) { 2104 dev_err(&hdev->pdev->dev, 2105 "failed to get vport%u port vlan filter bypass state, ret = %d.\n", 2106 vf_id, ret); 2107 return ret; 2108 } 2109 2110 *bypass_en = hnae3_get_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B); 2111 2112 return 0; 2113 } 2114 2115 static const struct hclge_dbg_item vlan_filter_items[] = { 2116 { "FUNC_ID", 2 }, 2117 { "I_VF_VLAN_FILTER", 2 }, 2118 { "E_VF_VLAN_FILTER", 2 }, 2119 { "PORT_VLAN_FILTER_BYPASS", 0 } 2120 }; 2121 2122 static const struct hclge_dbg_item vlan_offload_items[] = { 2123 { "FUNC_ID", 2 }, 2124 { "PVID", 4 }, 2125 { "ACCEPT_TAG1", 2 }, 2126 { "ACCEPT_TAG2", 2 }, 2127 { "ACCEPT_UNTAG1", 2 }, 2128 { "ACCEPT_UNTAG2", 2 }, 2129 { "INSERT_TAG1", 2 }, 2130 { "INSERT_TAG2", 2 }, 2131 { "SHIFT_TAG", 2 }, 2132 { "STRIP_TAG1", 2 }, 2133 { "STRIP_TAG2", 2 }, 2134 { "DROP_TAG1", 2 }, 2135 { "DROP_TAG2", 2 }, 2136 { "PRI_ONLY_TAG1", 2 }, 2137 { "PRI_ONLY_TAG2", 0 } 2138 }; 2139 2140 static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf, 2141 int len, int *pos) 2142 { 2143 char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN]; 2144 const char *result[ARRAY_SIZE(vlan_filter_items)]; 2145 u8 i, j, vlan_fe, bypass, ingress, egress; 2146 u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */ 2147 int ret; 2148 2149 ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0, 2150 &vlan_fe); 2151 if (ret) 2152 return ret; 2153 ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B; 2154 egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0; 2155 2156 *pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n", 2157 state_str[ingress]); 2158 *pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n", 2159 state_str[egress]); 2160 2161 hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items, 2162 NULL, ARRAY_SIZE(vlan_filter_items)); 2163 *pos += scnprintf(buf + *pos, len - *pos, "%s", content); 2164 2165 for (i = 0; i < func_num; i++) { 2166 ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i, 2167 &vlan_fe); 2168 if (ret) 2169 return ret; 2170 2171 ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B; 2172 egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0; 2173 ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass); 2174 if (ret) 2175 return ret; 2176 j = 0; 2177 result[j++] = hclge_dbg_get_func_id_str(str_id, i); 2178 result[j++] = state_str[ingress]; 2179 result[j++] = state_str[egress]; 2180 result[j++] = 2181 test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, 2182 hdev->ae_dev->caps) ? state_str[bypass] : "NA"; 2183 hclge_dbg_fill_content(content, sizeof(content), 2184 vlan_filter_items, result, 2185 ARRAY_SIZE(vlan_filter_items)); 2186 *pos += scnprintf(buf + *pos, len - *pos, "%s", content); 2187 } 2188 *pos += scnprintf(buf + *pos, len - *pos, "\n"); 2189 2190 return 0; 2191 } 2192 2193 static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf, 2194 int len, int *pos) 2195 { 2196 char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN]; 2197 const char *result[ARRAY_SIZE(vlan_offload_items)]; 2198 char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN]; 2199 u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */ 2200 struct hclge_dbg_vlan_cfg vlan_cfg; 2201 int ret; 2202 u8 i, j; 2203 2204 hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items, 2205 NULL, ARRAY_SIZE(vlan_offload_items)); 2206 *pos += scnprintf(buf + *pos, len - *pos, "%s", content); 2207 2208 for (i = 0; i < func_num; i++) { 2209 ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg); 2210 if (ret) 2211 return ret; 2212 2213 ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg); 2214 if (ret) 2215 return ret; 2216 2217 sprintf(str_pvid, "%u", vlan_cfg.pvid); 2218 j = 0; 2219 result[j++] = hclge_dbg_get_func_id_str(str_id, i); 2220 result[j++] = str_pvid; 2221 result[j++] = state_str[vlan_cfg.accept_tag1]; 2222 result[j++] = state_str[vlan_cfg.accept_tag2]; 2223 result[j++] = state_str[vlan_cfg.accept_untag1]; 2224 result[j++] = state_str[vlan_cfg.accept_untag2]; 2225 result[j++] = state_str[vlan_cfg.insert_tag1]; 2226 result[j++] = state_str[vlan_cfg.insert_tag2]; 2227 result[j++] = state_str[vlan_cfg.shift_tag]; 2228 result[j++] = state_str[vlan_cfg.strip_tag1]; 2229 result[j++] = state_str[vlan_cfg.strip_tag2]; 2230 result[j++] = state_str[vlan_cfg.drop_tag1]; 2231 result[j++] = state_str[vlan_cfg.drop_tag2]; 2232 result[j++] = state_str[vlan_cfg.pri_only1]; 2233 result[j++] = state_str[vlan_cfg.pri_only2]; 2234 2235 hclge_dbg_fill_content(content, sizeof(content), 2236 vlan_offload_items, result, 2237 ARRAY_SIZE(vlan_offload_items)); 2238 *pos += scnprintf(buf + *pos, len - *pos, "%s", content); 2239 } 2240 2241 return 0; 2242 } 2243 2244 static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf, 2245 int len) 2246 { 2247 int pos = 0; 2248 int ret; 2249 2250 ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos); 2251 if (ret) 2252 return ret; 2253 2254 return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos); 2255 } 2256 2257 static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len) 2258 { 2259 struct hclge_ptp *ptp = hdev->ptp; 2260 u32 sw_cfg = ptp->ptp_cfg; 2261 unsigned int tx_start; 2262 unsigned int last_rx; 2263 int pos = 0; 2264 u32 hw_cfg; 2265 int ret; 2266 2267 pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n", 2268 ptp->info.name); 2269 pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n", 2270 test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ? 2271 "yes" : "no"); 2272 pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n", 2273 test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ? 2274 "yes" : "no"); 2275 pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n", 2276 test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ? 2277 "yes" : "no"); 2278 2279 last_rx = jiffies_to_msecs(ptp->last_rx); 2280 pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n", 2281 last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC); 2282 pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt); 2283 2284 tx_start = jiffies_to_msecs(ptp->tx_start); 2285 pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n", 2286 tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC); 2287 pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt); 2288 pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n", 2289 ptp->tx_skipped); 2290 pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n", 2291 ptp->tx_timeout); 2292 pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n", 2293 ptp->last_tx_seqid); 2294 2295 ret = hclge_ptp_cfg_qry(hdev, &hw_cfg); 2296 if (ret) 2297 return ret; 2298 2299 pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n", 2300 sw_cfg, hw_cfg); 2301 2302 pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n", 2303 ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter); 2304 2305 return 0; 2306 } 2307 2308 static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len) 2309 { 2310 hclge_dbg_dump_mac_list(hdev, buf, len, true); 2311 2312 return 0; 2313 } 2314 2315 static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len) 2316 { 2317 hclge_dbg_dump_mac_list(hdev, buf, len, false); 2318 2319 return 0; 2320 } 2321 2322 static const struct hclge_dbg_func hclge_dbg_cmd_func[] = { 2323 { 2324 .cmd = HNAE3_DBG_CMD_TM_NODES, 2325 .dbg_dump = hclge_dbg_dump_tm_nodes, 2326 }, 2327 { 2328 .cmd = HNAE3_DBG_CMD_TM_PRI, 2329 .dbg_dump = hclge_dbg_dump_tm_pri, 2330 }, 2331 { 2332 .cmd = HNAE3_DBG_CMD_TM_QSET, 2333 .dbg_dump = hclge_dbg_dump_tm_qset, 2334 }, 2335 { 2336 .cmd = HNAE3_DBG_CMD_TM_MAP, 2337 .dbg_dump = hclge_dbg_dump_tm_map, 2338 }, 2339 { 2340 .cmd = HNAE3_DBG_CMD_TM_PG, 2341 .dbg_dump = hclge_dbg_dump_tm_pg, 2342 }, 2343 { 2344 .cmd = HNAE3_DBG_CMD_TM_PORT, 2345 .dbg_dump = hclge_dbg_dump_tm_port, 2346 }, 2347 { 2348 .cmd = HNAE3_DBG_CMD_TC_SCH_INFO, 2349 .dbg_dump = hclge_dbg_dump_tc, 2350 }, 2351 { 2352 .cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG, 2353 .dbg_dump = hclge_dbg_dump_qos_pause_cfg, 2354 }, 2355 { 2356 .cmd = HNAE3_DBG_CMD_QOS_PRI_MAP, 2357 .dbg_dump = hclge_dbg_dump_qos_pri_map, 2358 }, 2359 { 2360 .cmd = HNAE3_DBG_CMD_QOS_BUF_CFG, 2361 .dbg_dump = hclge_dbg_dump_qos_buf_cfg, 2362 }, 2363 { 2364 .cmd = HNAE3_DBG_CMD_MAC_UC, 2365 .dbg_dump = hclge_dbg_dump_mac_uc, 2366 }, 2367 { 2368 .cmd = HNAE3_DBG_CMD_MAC_MC, 2369 .dbg_dump = hclge_dbg_dump_mac_mc, 2370 }, 2371 { 2372 .cmd = HNAE3_DBG_CMD_MNG_TBL, 2373 .dbg_dump = hclge_dbg_dump_mng_table, 2374 }, 2375 { 2376 .cmd = HNAE3_DBG_CMD_LOOPBACK, 2377 .dbg_dump = hclge_dbg_dump_loopback, 2378 }, 2379 { 2380 .cmd = HNAE3_DBG_CMD_PTP_INFO, 2381 .dbg_dump = hclge_dbg_dump_ptp_info, 2382 }, 2383 { 2384 .cmd = HNAE3_DBG_CMD_INTERRUPT_INFO, 2385 .dbg_dump = hclge_dbg_dump_interrupt, 2386 }, 2387 { 2388 .cmd = HNAE3_DBG_CMD_RESET_INFO, 2389 .dbg_dump = hclge_dbg_dump_rst_info, 2390 }, 2391 { 2392 .cmd = HNAE3_DBG_CMD_IMP_INFO, 2393 .dbg_dump = hclge_dbg_get_imp_stats_info, 2394 }, 2395 { 2396 .cmd = HNAE3_DBG_CMD_NCL_CONFIG, 2397 .dbg_dump = hclge_dbg_dump_ncl_config, 2398 }, 2399 { 2400 .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON, 2401 .dbg_dump_reg = hclge_dbg_dump_reg_cmd, 2402 }, 2403 { 2404 .cmd = HNAE3_DBG_CMD_REG_SSU, 2405 .dbg_dump_reg = hclge_dbg_dump_reg_cmd, 2406 }, 2407 { 2408 .cmd = HNAE3_DBG_CMD_REG_IGU_EGU, 2409 .dbg_dump_reg = hclge_dbg_dump_reg_cmd, 2410 }, 2411 { 2412 .cmd = HNAE3_DBG_CMD_REG_RPU, 2413 .dbg_dump_reg = hclge_dbg_dump_reg_cmd, 2414 }, 2415 { 2416 .cmd = HNAE3_DBG_CMD_REG_NCSI, 2417 .dbg_dump_reg = hclge_dbg_dump_reg_cmd, 2418 }, 2419 { 2420 .cmd = HNAE3_DBG_CMD_REG_RTC, 2421 .dbg_dump_reg = hclge_dbg_dump_reg_cmd, 2422 }, 2423 { 2424 .cmd = HNAE3_DBG_CMD_REG_PPP, 2425 .dbg_dump_reg = hclge_dbg_dump_reg_cmd, 2426 }, 2427 { 2428 .cmd = HNAE3_DBG_CMD_REG_RCB, 2429 .dbg_dump_reg = hclge_dbg_dump_reg_cmd, 2430 }, 2431 { 2432 .cmd = HNAE3_DBG_CMD_REG_TQP, 2433 .dbg_dump_reg = hclge_dbg_dump_reg_cmd, 2434 }, 2435 { 2436 .cmd = HNAE3_DBG_CMD_REG_MAC, 2437 .dbg_dump = hclge_dbg_dump_mac, 2438 }, 2439 { 2440 .cmd = HNAE3_DBG_CMD_REG_DCB, 2441 .dbg_dump = hclge_dbg_dump_dcb, 2442 }, 2443 { 2444 .cmd = HNAE3_DBG_CMD_FD_TCAM, 2445 .dbg_dump = hclge_dbg_dump_fd_tcam, 2446 }, 2447 { 2448 .cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS, 2449 .dbg_dump = hclge_dbg_dump_mac_tnl_status, 2450 }, 2451 { 2452 .cmd = HNAE3_DBG_CMD_SERV_INFO, 2453 .dbg_dump = hclge_dbg_dump_serv_info, 2454 }, 2455 { 2456 .cmd = HNAE3_DBG_CMD_VLAN_CONFIG, 2457 .dbg_dump = hclge_dbg_dump_vlan_config, 2458 }, 2459 { 2460 .cmd = HNAE3_DBG_CMD_FD_COUNTER, 2461 .dbg_dump = hclge_dbg_dump_fd_counter, 2462 }, 2463 { 2464 .cmd = HNAE3_DBG_CMD_UMV_INFO, 2465 .dbg_dump = hclge_dbg_dump_umv_info, 2466 }, 2467 }; 2468 2469 int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd, 2470 char *buf, int len) 2471 { 2472 struct hclge_vport *vport = hclge_get_vport(handle); 2473 const struct hclge_dbg_func *cmd_func; 2474 struct hclge_dev *hdev = vport->back; 2475 u32 i; 2476 2477 for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) { 2478 if (cmd == hclge_dbg_cmd_func[i].cmd) { 2479 cmd_func = &hclge_dbg_cmd_func[i]; 2480 if (cmd_func->dbg_dump) 2481 return cmd_func->dbg_dump(hdev, buf, len); 2482 else 2483 return cmd_func->dbg_dump_reg(hdev, cmd, buf, 2484 len); 2485 } 2486 } 2487 2488 dev_err(&hdev->pdev->dev, "invalid command(%d)\n", cmd); 2489 return -EINVAL; 2490 } 2491