1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Copyright (c) 2018-2019 Hisilicon Limited. */ 3 4 #include <linux/device.h> 5 6 #include "hclge_debugfs.h" 7 #include "hclge_cmd.h" 8 #include "hclge_main.h" 9 #include "hclge_tm.h" 10 #include "hnae3.h" 11 12 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset) 13 { 14 struct hclge_desc desc[4]; 15 int ret; 16 17 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true); 18 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 19 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true); 20 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 21 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true); 22 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 23 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true); 24 25 ret = hclge_cmd_send(&hdev->hw, desc, 4); 26 if (ret != HCLGE_CMD_EXEC_SUCCESS) { 27 dev_err(&hdev->pdev->dev, 28 "get dfx bdnum fail, status is %d.\n", ret); 29 return ret; 30 } 31 32 return (int)desc[offset / 6].data[offset % 6]; 33 } 34 35 static int hclge_dbg_cmd_send(struct hclge_dev *hdev, 36 struct hclge_desc *desc_src, 37 int index, int bd_num, 38 enum hclge_opcode_type cmd) 39 { 40 struct hclge_desc *desc = desc_src; 41 int ret, i; 42 43 hclge_cmd_setup_basic_desc(desc, cmd, true); 44 desc->data[0] = cpu_to_le32(index); 45 46 for (i = 1; i < bd_num; i++) { 47 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 48 desc++; 49 hclge_cmd_setup_basic_desc(desc, cmd, true); 50 } 51 52 ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num); 53 if (ret) { 54 dev_err(&hdev->pdev->dev, 55 "read reg cmd send fail, status is %d.\n", ret); 56 return ret; 57 } 58 59 return ret; 60 } 61 62 static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, 63 struct hclge_dbg_dfx_message *dfx_message, 64 char *cmd_buf, int msg_num, int offset, 65 enum hclge_opcode_type cmd) 66 { 67 struct hclge_desc *desc_src; 68 struct hclge_desc *desc; 69 int bd_num, buf_len; 70 int ret, i; 71 int index; 72 int max; 73 74 ret = kstrtouint(cmd_buf, 10, &index); 75 index = (ret != 0) ? 0 : index; 76 77 bd_num = hclge_dbg_get_dfx_bd_num(hdev, offset); 78 if (bd_num <= 0) 79 return; 80 81 buf_len = sizeof(struct hclge_desc) * bd_num; 82 desc_src = kzalloc(buf_len, GFP_KERNEL); 83 if (!desc_src) { 84 dev_err(&hdev->pdev->dev, "call kzalloc failed\n"); 85 return; 86 } 87 88 desc = desc_src; 89 ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, cmd); 90 if (ret != HCLGE_CMD_EXEC_SUCCESS) { 91 kfree(desc_src); 92 return; 93 } 94 95 max = (bd_num * 6) <= msg_num ? (bd_num * 6) : msg_num; 96 97 desc = desc_src; 98 for (i = 0; i < max; i++) { 99 (((i / 6) > 0) && ((i % 6) == 0)) ? desc++ : desc; 100 if (dfx_message->flag) 101 dev_info(&hdev->pdev->dev, "%s: 0x%x\n", 102 dfx_message->message, desc->data[i % 6]); 103 104 dfx_message++; 105 } 106 107 kfree(desc_src); 108 } 109 110 static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *cmd_buf) 111 { 112 struct device *dev = &hdev->pdev->dev; 113 struct hclge_dbg_bitmap_cmd *bitmap; 114 int rq_id, pri_id, qset_id; 115 int port_id, nq_id, pg_id; 116 struct hclge_desc desc[2]; 117 118 int cnt, ret; 119 120 cnt = sscanf(cmd_buf, "%i %i %i %i %i %i", 121 &port_id, &pri_id, &pg_id, &rq_id, &nq_id, &qset_id); 122 if (cnt != 6) { 123 dev_err(&hdev->pdev->dev, 124 "dump dcb: bad command parameter, cnt=%d\n", cnt); 125 return; 126 } 127 128 ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1, 129 HCLGE_OPC_QSET_DFX_STS); 130 if (ret) 131 return; 132 133 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; 134 dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0); 135 dev_info(dev, "nic_qs_mask: 0x%x\n", bitmap->bit1); 136 dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2); 137 dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3); 138 139 ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, HCLGE_OPC_PRI_DFX_STS); 140 if (ret) 141 return; 142 143 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; 144 dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0); 145 dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1); 146 dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2); 147 148 ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, HCLGE_OPC_PG_DFX_STS); 149 if (ret) 150 return; 151 152 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; 153 dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0); 154 dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1); 155 dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2); 156 157 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, 158 HCLGE_OPC_PORT_DFX_STS); 159 if (ret) 160 return; 161 162 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; 163 dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0); 164 dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1); 165 166 ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_NQ_CNT); 167 if (ret) 168 return; 169 170 dev_info(dev, "sch_nq_cnt: 0x%x\n", desc[0].data[1]); 171 172 ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT); 173 if (ret) 174 return; 175 176 dev_info(dev, "sch_rq_cnt: 0x%x\n", desc[0].data[1]); 177 178 ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS); 179 if (ret) 180 return; 181 182 dev_info(dev, "pri_bp: 0x%x\n", desc[0].data[1]); 183 dev_info(dev, "fifo_dfx_info: 0x%x\n", desc[0].data[2]); 184 dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n", desc[0].data[3]); 185 dev_info(dev, "tx_private_waterline: 0x%x\n", desc[0].data[4]); 186 dev_info(dev, "tm_bypass_en: 0x%x\n", desc[0].data[5]); 187 dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", desc[1].data[0]); 188 dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", desc[1].data[1]); 189 190 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, 191 HCLGE_OPC_TM_INTERNAL_CNT); 192 if (ret) 193 return; 194 195 dev_info(dev, "SCH_NIC_NUM: 0x%x\n", desc[0].data[1]); 196 dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", desc[0].data[2]); 197 198 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, 199 HCLGE_OPC_TM_INTERNAL_STS_1); 200 if (ret) 201 return; 202 203 dev_info(dev, "TC_MAP_SEL: 0x%x\n", desc[0].data[1]); 204 dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", desc[0].data[2]); 205 dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", desc[0].data[3]); 206 dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[4]); 207 dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[5]); 208 } 209 210 static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, char *cmd_buf) 211 { 212 int msg_num; 213 214 if (strncmp(&cmd_buf[9], "bios common", 11) == 0) { 215 msg_num = sizeof(hclge_dbg_bios_common_reg) / 216 sizeof(struct hclge_dbg_dfx_message); 217 hclge_dbg_dump_reg_common(hdev, hclge_dbg_bios_common_reg, 218 &cmd_buf[21], msg_num, 219 HCLGE_DBG_DFX_BIOS_OFFSET, 220 HCLGE_OPC_DFX_BIOS_COMMON_REG); 221 } else if (strncmp(&cmd_buf[9], "ssu", 3) == 0) { 222 msg_num = sizeof(hclge_dbg_ssu_reg_0) / 223 sizeof(struct hclge_dbg_dfx_message); 224 hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_0, 225 &cmd_buf[13], msg_num, 226 HCLGE_DBG_DFX_SSU_0_OFFSET, 227 HCLGE_OPC_DFX_SSU_REG_0); 228 229 msg_num = sizeof(hclge_dbg_ssu_reg_1) / 230 sizeof(struct hclge_dbg_dfx_message); 231 hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_1, 232 &cmd_buf[13], msg_num, 233 HCLGE_DBG_DFX_SSU_1_OFFSET, 234 HCLGE_OPC_DFX_SSU_REG_1); 235 236 msg_num = sizeof(hclge_dbg_ssu_reg_2) / 237 sizeof(struct hclge_dbg_dfx_message); 238 hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_2, 239 &cmd_buf[13], msg_num, 240 HCLGE_DBG_DFX_SSU_2_OFFSET, 241 HCLGE_OPC_DFX_SSU_REG_2); 242 } else if (strncmp(&cmd_buf[9], "igu egu", 7) == 0) { 243 msg_num = sizeof(hclge_dbg_igu_egu_reg) / 244 sizeof(struct hclge_dbg_dfx_message); 245 hclge_dbg_dump_reg_common(hdev, hclge_dbg_igu_egu_reg, 246 &cmd_buf[17], msg_num, 247 HCLGE_DBG_DFX_IGU_OFFSET, 248 HCLGE_OPC_DFX_IGU_EGU_REG); 249 } else if (strncmp(&cmd_buf[9], "rpu", 3) == 0) { 250 msg_num = sizeof(hclge_dbg_rpu_reg_0) / 251 sizeof(struct hclge_dbg_dfx_message); 252 hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_0, 253 &cmd_buf[13], msg_num, 254 HCLGE_DBG_DFX_RPU_0_OFFSET, 255 HCLGE_OPC_DFX_RPU_REG_0); 256 257 msg_num = sizeof(hclge_dbg_rpu_reg_1) / 258 sizeof(struct hclge_dbg_dfx_message); 259 hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_1, 260 &cmd_buf[13], msg_num, 261 HCLGE_DBG_DFX_RPU_1_OFFSET, 262 HCLGE_OPC_DFX_RPU_REG_1); 263 } else if (strncmp(&cmd_buf[9], "ncsi", 4) == 0) { 264 msg_num = sizeof(hclge_dbg_ncsi_reg) / 265 sizeof(struct hclge_dbg_dfx_message); 266 hclge_dbg_dump_reg_common(hdev, hclge_dbg_ncsi_reg, 267 &cmd_buf[14], msg_num, 268 HCLGE_DBG_DFX_NCSI_OFFSET, 269 HCLGE_OPC_DFX_NCSI_REG); 270 } else if (strncmp(&cmd_buf[9], "rtc", 3) == 0) { 271 msg_num = sizeof(hclge_dbg_rtc_reg) / 272 sizeof(struct hclge_dbg_dfx_message); 273 hclge_dbg_dump_reg_common(hdev, hclge_dbg_rtc_reg, 274 &cmd_buf[13], msg_num, 275 HCLGE_DBG_DFX_RTC_OFFSET, 276 HCLGE_OPC_DFX_RTC_REG); 277 } else if (strncmp(&cmd_buf[9], "ppp", 3) == 0) { 278 msg_num = sizeof(hclge_dbg_ppp_reg) / 279 sizeof(struct hclge_dbg_dfx_message); 280 hclge_dbg_dump_reg_common(hdev, hclge_dbg_ppp_reg, 281 &cmd_buf[13], msg_num, 282 HCLGE_DBG_DFX_PPP_OFFSET, 283 HCLGE_OPC_DFX_PPP_REG); 284 } else if (strncmp(&cmd_buf[9], "rcb", 3) == 0) { 285 msg_num = sizeof(hclge_dbg_rcb_reg) / 286 sizeof(struct hclge_dbg_dfx_message); 287 hclge_dbg_dump_reg_common(hdev, hclge_dbg_rcb_reg, 288 &cmd_buf[13], msg_num, 289 HCLGE_DBG_DFX_RCB_OFFSET, 290 HCLGE_OPC_DFX_RCB_REG); 291 } else if (strncmp(&cmd_buf[9], "tqp", 3) == 0) { 292 msg_num = sizeof(hclge_dbg_tqp_reg) / 293 sizeof(struct hclge_dbg_dfx_message); 294 hclge_dbg_dump_reg_common(hdev, hclge_dbg_tqp_reg, 295 &cmd_buf[13], msg_num, 296 HCLGE_DBG_DFX_TQP_OFFSET, 297 HCLGE_OPC_DFX_TQP_REG); 298 } else if (strncmp(&cmd_buf[9], "dcb", 3) == 0) { 299 hclge_dbg_dump_dcb(hdev, &cmd_buf[13]); 300 } else { 301 dev_info(&hdev->pdev->dev, "unknown command\n"); 302 return; 303 } 304 } 305 306 static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index, 307 char *title_buf, char *true_buf, 308 char *false_buf) 309 { 310 if (flag) 311 dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index, 312 true_buf); 313 else 314 dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index, 315 false_buf); 316 } 317 318 static void hclge_dbg_dump_tc(struct hclge_dev *hdev) 319 { 320 struct hclge_ets_tc_weight_cmd *ets_weight; 321 struct hclge_desc desc; 322 int i, ret; 323 324 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true); 325 326 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 327 if (ret) { 328 dev_err(&hdev->pdev->dev, "dump tc fail, status is %d.\n", ret); 329 return; 330 } 331 332 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; 333 334 dev_info(&hdev->pdev->dev, "dump tc\n"); 335 dev_info(&hdev->pdev->dev, "weight_offset: %u\n", 336 ets_weight->weight_offset); 337 338 for (i = 0; i < HNAE3_MAX_TC; i++) 339 hclge_title_idx_print(hdev, ets_weight->tc_weight[i], i, 340 "tc", "no sp mode", "sp mode"); 341 } 342 343 static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) 344 { 345 struct hclge_port_shapping_cmd *port_shap_cfg_cmd; 346 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; 347 struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd; 348 enum hclge_opcode_type cmd; 349 struct hclge_desc desc; 350 int ret; 351 352 cmd = HCLGE_OPC_TM_PG_C_SHAPPING; 353 hclge_cmd_setup_basic_desc(&desc, cmd, true); 354 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 355 if (ret) 356 goto err_tm_pg_cmd_send; 357 358 pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; 359 dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id); 360 dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n", 361 pg_shap_cfg_cmd->pg_shapping_para); 362 363 cmd = HCLGE_OPC_TM_PG_P_SHAPPING; 364 hclge_cmd_setup_basic_desc(&desc, cmd, true); 365 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 366 if (ret) 367 goto err_tm_pg_cmd_send; 368 369 pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; 370 dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id); 371 dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n", 372 pg_shap_cfg_cmd->pg_shapping_para); 373 374 cmd = HCLGE_OPC_TM_PORT_SHAPPING; 375 hclge_cmd_setup_basic_desc(&desc, cmd, true); 376 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 377 if (ret) 378 goto err_tm_pg_cmd_send; 379 380 port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; 381 dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n", 382 port_shap_cfg_cmd->port_shapping_para); 383 384 cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG; 385 hclge_cmd_setup_basic_desc(&desc, cmd, true); 386 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 387 if (ret) 388 goto err_tm_pg_cmd_send; 389 390 dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", desc.data[0]); 391 392 cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG; 393 hclge_cmd_setup_basic_desc(&desc, cmd, true); 394 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 395 if (ret) 396 goto err_tm_pg_cmd_send; 397 398 dev_info(&hdev->pdev->dev, "PRI_SCH pg_id: %u\n", desc.data[0]); 399 400 cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG; 401 hclge_cmd_setup_basic_desc(&desc, cmd, true); 402 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 403 if (ret) 404 goto err_tm_pg_cmd_send; 405 406 dev_info(&hdev->pdev->dev, "QS_SCH pg_id: %u\n", desc.data[0]); 407 408 cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; 409 hclge_cmd_setup_basic_desc(&desc, cmd, true); 410 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 411 if (ret) 412 goto err_tm_pg_cmd_send; 413 414 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; 415 dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_id: %u\n", 416 bp_to_qs_map_cmd->tc_id); 417 dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_shapping: 0x%x\n", 418 bp_to_qs_map_cmd->qs_group_id); 419 dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n", 420 bp_to_qs_map_cmd->qs_bit_map); 421 return; 422 423 err_tm_pg_cmd_send: 424 dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), status is %d\n", 425 cmd, ret); 426 } 427 428 static void hclge_dbg_dump_tm(struct hclge_dev *hdev) 429 { 430 struct hclge_priority_weight_cmd *priority_weight; 431 struct hclge_pg_to_pri_link_cmd *pg_to_pri_map; 432 struct hclge_qs_to_pri_link_cmd *qs_to_pri_map; 433 struct hclge_nq_to_qs_link_cmd *nq_to_qs_map; 434 struct hclge_pri_shapping_cmd *shap_cfg_cmd; 435 struct hclge_pg_weight_cmd *pg_weight; 436 struct hclge_qs_weight_cmd *qs_weight; 437 enum hclge_opcode_type cmd; 438 struct hclge_desc desc; 439 int ret; 440 441 cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK; 442 hclge_cmd_setup_basic_desc(&desc, cmd, true); 443 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 444 if (ret) 445 goto err_tm_cmd_send; 446 447 pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data; 448 dev_info(&hdev->pdev->dev, "dump tm\n"); 449 dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n", 450 pg_to_pri_map->pg_id); 451 dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n", 452 pg_to_pri_map->pri_bit_map); 453 454 cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK; 455 hclge_cmd_setup_basic_desc(&desc, cmd, true); 456 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 457 if (ret) 458 goto err_tm_cmd_send; 459 460 qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 461 dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n", 462 qs_to_pri_map->qs_id); 463 dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n", 464 qs_to_pri_map->priority); 465 dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n", 466 qs_to_pri_map->link_vld); 467 468 cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK; 469 hclge_cmd_setup_basic_desc(&desc, cmd, true); 470 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 471 if (ret) 472 goto err_tm_cmd_send; 473 474 nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data; 475 dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id); 476 dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: %u\n", 477 nq_to_qs_map->qset_id); 478 479 cmd = HCLGE_OPC_TM_PG_WEIGHT; 480 hclge_cmd_setup_basic_desc(&desc, cmd, true); 481 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 482 if (ret) 483 goto err_tm_cmd_send; 484 485 pg_weight = (struct hclge_pg_weight_cmd *)desc.data; 486 dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id); 487 dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr); 488 489 cmd = HCLGE_OPC_TM_QS_WEIGHT; 490 hclge_cmd_setup_basic_desc(&desc, cmd, true); 491 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 492 if (ret) 493 goto err_tm_cmd_send; 494 495 qs_weight = (struct hclge_qs_weight_cmd *)desc.data; 496 dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", qs_weight->qs_id); 497 dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr); 498 499 cmd = HCLGE_OPC_TM_PRI_WEIGHT; 500 hclge_cmd_setup_basic_desc(&desc, cmd, true); 501 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 502 if (ret) 503 goto err_tm_cmd_send; 504 505 priority_weight = (struct hclge_priority_weight_cmd *)desc.data; 506 dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id); 507 dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr); 508 509 cmd = HCLGE_OPC_TM_PRI_C_SHAPPING; 510 hclge_cmd_setup_basic_desc(&desc, cmd, true); 511 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 512 if (ret) 513 goto err_tm_cmd_send; 514 515 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 516 dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id); 517 dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n", 518 shap_cfg_cmd->pri_shapping_para); 519 520 cmd = HCLGE_OPC_TM_PRI_P_SHAPPING; 521 hclge_cmd_setup_basic_desc(&desc, cmd, true); 522 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 523 if (ret) 524 goto err_tm_cmd_send; 525 526 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 527 dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id); 528 dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n", 529 shap_cfg_cmd->pri_shapping_para); 530 531 hclge_dbg_dump_tm_pg(hdev); 532 533 return; 534 535 err_tm_cmd_send: 536 dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), status is %d\n", 537 cmd, ret); 538 } 539 540 static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *cmd_buf) 541 { 542 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; 543 struct hclge_nq_to_qs_link_cmd *nq_to_qs_map; 544 struct hclge_qs_to_pri_link_cmd *map; 545 struct hclge_tqp_tx_queue_tc_cmd *tc; 546 enum hclge_opcode_type cmd; 547 struct hclge_desc desc; 548 int queue_id, group_id; 549 u32 qset_maping[32]; 550 int tc_id, qset_id; 551 int pri_id, ret; 552 u32 i; 553 554 ret = kstrtouint(&cmd_buf[12], 10, &queue_id); 555 queue_id = (ret != 0) ? 0 : queue_id; 556 557 cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK; 558 nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data; 559 hclge_cmd_setup_basic_desc(&desc, cmd, true); 560 nq_to_qs_map->nq_id = cpu_to_le16(queue_id); 561 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 562 if (ret) 563 goto err_tm_map_cmd_send; 564 qset_id = nq_to_qs_map->qset_id & 0x3FF; 565 566 cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK; 567 map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 568 hclge_cmd_setup_basic_desc(&desc, cmd, true); 569 map->qs_id = cpu_to_le16(qset_id); 570 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 571 if (ret) 572 goto err_tm_map_cmd_send; 573 pri_id = map->priority; 574 575 cmd = HCLGE_OPC_TQP_TX_QUEUE_TC; 576 tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data; 577 hclge_cmd_setup_basic_desc(&desc, cmd, true); 578 tc->queue_id = cpu_to_le16(queue_id); 579 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 580 if (ret) 581 goto err_tm_map_cmd_send; 582 tc_id = tc->tc_id & 0x7; 583 584 dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n"); 585 dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n", 586 queue_id, qset_id, pri_id, tc_id); 587 588 cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; 589 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; 590 for (group_id = 0; group_id < 32; group_id++) { 591 hclge_cmd_setup_basic_desc(&desc, cmd, true); 592 bp_to_qs_map_cmd->tc_id = tc_id; 593 bp_to_qs_map_cmd->qs_group_id = group_id; 594 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 595 if (ret) 596 goto err_tm_map_cmd_send; 597 598 qset_maping[group_id] = bp_to_qs_map_cmd->qs_bit_map; 599 } 600 601 dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n"); 602 603 i = 0; 604 for (group_id = 0; group_id < 4; group_id++) { 605 dev_info(&hdev->pdev->dev, 606 "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n", 607 group_id * 256, qset_maping[(u32)(i + 7)], 608 qset_maping[(u32)(i + 6)], qset_maping[(u32)(i + 5)], 609 qset_maping[(u32)(i + 4)], qset_maping[(u32)(i + 3)], 610 qset_maping[(u32)(i + 2)], qset_maping[(u32)(i + 1)], 611 qset_maping[i]); 612 i += 8; 613 } 614 615 return; 616 617 err_tm_map_cmd_send: 618 dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), status is %d\n", 619 cmd, ret); 620 } 621 622 static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev) 623 { 624 struct hclge_cfg_pause_param_cmd *pause_param; 625 struct hclge_desc desc; 626 int ret; 627 628 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); 629 630 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 631 if (ret) { 632 dev_err(&hdev->pdev->dev, "dump checksum fail, status is %d.\n", 633 ret); 634 return; 635 } 636 637 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; 638 dev_info(&hdev->pdev->dev, "dump qos pause cfg\n"); 639 dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n", 640 pause_param->pause_trans_gap); 641 dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n", 642 pause_param->pause_trans_time); 643 } 644 645 static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev) 646 { 647 struct hclge_qos_pri_map_cmd *pri_map; 648 struct hclge_desc desc; 649 int ret; 650 651 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true); 652 653 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 654 if (ret) { 655 dev_err(&hdev->pdev->dev, 656 "dump qos pri map fail, status is %d.\n", ret); 657 return; 658 } 659 660 pri_map = (struct hclge_qos_pri_map_cmd *)desc.data; 661 dev_info(&hdev->pdev->dev, "dump qos pri map\n"); 662 dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri); 663 dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc); 664 dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc); 665 dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc); 666 dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc); 667 dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc); 668 dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc); 669 dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc); 670 dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc); 671 } 672 673 static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) 674 { 675 struct hclge_tx_buff_alloc_cmd *tx_buf_cmd; 676 struct hclge_rx_priv_buff_cmd *rx_buf_cmd; 677 struct hclge_rx_priv_wl_buf *rx_priv_wl; 678 struct hclge_rx_com_wl *rx_packet_cnt; 679 struct hclge_rx_com_thrd *rx_com_thrd; 680 struct hclge_rx_com_wl *rx_com_wl; 681 enum hclge_opcode_type cmd; 682 struct hclge_desc desc[2]; 683 int i, ret; 684 685 cmd = HCLGE_OPC_TX_BUFF_ALLOC; 686 hclge_cmd_setup_basic_desc(desc, cmd, true); 687 ret = hclge_cmd_send(&hdev->hw, desc, 1); 688 if (ret) 689 goto err_qos_cmd_send; 690 691 dev_info(&hdev->pdev->dev, "dump qos buf cfg\n"); 692 693 tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data; 694 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 695 dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i, 696 tx_buf_cmd->tx_pkt_buff[i]); 697 698 cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC; 699 hclge_cmd_setup_basic_desc(desc, cmd, true); 700 ret = hclge_cmd_send(&hdev->hw, desc, 1); 701 if (ret) 702 goto err_qos_cmd_send; 703 704 dev_info(&hdev->pdev->dev, "\n"); 705 rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data; 706 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 707 dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i, 708 rx_buf_cmd->buf_num[i]); 709 710 dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n", 711 rx_buf_cmd->shared_buf); 712 713 cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC; 714 hclge_cmd_setup_basic_desc(&desc[0], cmd, true); 715 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 716 hclge_cmd_setup_basic_desc(&desc[1], cmd, true); 717 ret = hclge_cmd_send(&hdev->hw, desc, 2); 718 if (ret) 719 goto err_qos_cmd_send; 720 721 dev_info(&hdev->pdev->dev, "\n"); 722 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data; 723 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 724 dev_info(&hdev->pdev->dev, 725 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i, 726 rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low); 727 728 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data; 729 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 730 dev_info(&hdev->pdev->dev, 731 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i + 4, 732 rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low); 733 734 cmd = HCLGE_OPC_RX_COM_THRD_ALLOC; 735 hclge_cmd_setup_basic_desc(&desc[0], cmd, true); 736 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 737 hclge_cmd_setup_basic_desc(&desc[1], cmd, true); 738 ret = hclge_cmd_send(&hdev->hw, desc, 2); 739 if (ret) 740 goto err_qos_cmd_send; 741 742 dev_info(&hdev->pdev->dev, "\n"); 743 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data; 744 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 745 dev_info(&hdev->pdev->dev, 746 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i, 747 rx_com_thrd->com_thrd[i].high, 748 rx_com_thrd->com_thrd[i].low); 749 750 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data; 751 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 752 dev_info(&hdev->pdev->dev, 753 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i + 4, 754 rx_com_thrd->com_thrd[i].high, 755 rx_com_thrd->com_thrd[i].low); 756 757 cmd = HCLGE_OPC_RX_COM_WL_ALLOC; 758 hclge_cmd_setup_basic_desc(desc, cmd, true); 759 ret = hclge_cmd_send(&hdev->hw, desc, 1); 760 if (ret) 761 goto err_qos_cmd_send; 762 763 rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data; 764 dev_info(&hdev->pdev->dev, "\n"); 765 dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n", 766 rx_com_wl->com_wl.high, rx_com_wl->com_wl.low); 767 768 cmd = HCLGE_OPC_RX_GBL_PKT_CNT; 769 hclge_cmd_setup_basic_desc(desc, cmd, true); 770 ret = hclge_cmd_send(&hdev->hw, desc, 1); 771 if (ret) 772 goto err_qos_cmd_send; 773 774 rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data; 775 dev_info(&hdev->pdev->dev, 776 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n", 777 rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low); 778 779 return; 780 781 err_qos_cmd_send: 782 dev_err(&hdev->pdev->dev, 783 "dump qos buf cfg fail(0x%x), status is %d\n", cmd, ret); 784 } 785 786 static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev) 787 { 788 struct hclge_mac_ethertype_idx_rd_cmd *req0; 789 char printf_buf[HCLGE_DBG_BUF_LEN]; 790 struct hclge_desc desc; 791 int ret, i; 792 793 dev_info(&hdev->pdev->dev, "mng tab:\n"); 794 memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); 795 strncat(printf_buf, 796 "entry|mac_addr |mask|ether|mask|vlan|mask", 797 HCLGE_DBG_BUF_LEN - 1); 798 strncat(printf_buf + strlen(printf_buf), 799 "|i_map|i_dir|e_type|pf_id|vf_id|q_id|drop\n", 800 HCLGE_DBG_BUF_LEN - strlen(printf_buf) - 1); 801 802 dev_info(&hdev->pdev->dev, "%s", printf_buf); 803 804 for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) { 805 hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD, 806 true); 807 req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data; 808 req0->index = cpu_to_le16(i); 809 810 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 811 if (ret) { 812 dev_err(&hdev->pdev->dev, 813 "call hclge_cmd_send fail, ret = %d\n", ret); 814 return; 815 } 816 817 if (!req0->resp_code) 818 continue; 819 820 memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); 821 snprintf(printf_buf, HCLGE_DBG_BUF_LEN, 822 "%02u |%02x:%02x:%02x:%02x:%02x:%02x|", 823 req0->index, req0->mac_add[0], req0->mac_add[1], 824 req0->mac_add[2], req0->mac_add[3], req0->mac_add[4], 825 req0->mac_add[5]); 826 827 snprintf(printf_buf + strlen(printf_buf), 828 HCLGE_DBG_BUF_LEN - strlen(printf_buf), 829 "%x |%04x |%x |%04x|%x |%02x |%02x |", 830 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B), 831 req0->ethter_type, 832 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B), 833 req0->vlan_tag & HCLGE_DBG_MNG_VLAN_TAG, 834 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B), 835 req0->i_port_bitmap, req0->i_port_direction); 836 837 snprintf(printf_buf + strlen(printf_buf), 838 HCLGE_DBG_BUF_LEN - strlen(printf_buf), 839 "%d |%d |%02d |%04d|%x\n", 840 !!(req0->egress_port & HCLGE_DBG_MNG_E_TYPE_B), 841 req0->egress_port & HCLGE_DBG_MNG_PF_ID, 842 (req0->egress_port >> 3) & HCLGE_DBG_MNG_VF_ID, 843 req0->egress_queue, 844 !!(req0->egress_port & HCLGE_DBG_MNG_DROP_B)); 845 846 dev_info(&hdev->pdev->dev, "%s", printf_buf); 847 } 848 } 849 850 static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage, 851 bool sel_x, u32 loc) 852 { 853 struct hclge_fd_tcam_config_1_cmd *req1; 854 struct hclge_fd_tcam_config_2_cmd *req2; 855 struct hclge_fd_tcam_config_3_cmd *req3; 856 struct hclge_desc desc[3]; 857 int ret, i; 858 u32 *req; 859 860 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true); 861 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 862 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true); 863 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 864 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true); 865 866 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; 867 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; 868 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; 869 870 req1->stage = stage; 871 req1->xy_sel = sel_x ? 1 : 0; 872 req1->index = cpu_to_le32(loc); 873 874 ret = hclge_cmd_send(&hdev->hw, desc, 3); 875 if (ret) 876 return; 877 878 dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n", 879 sel_x ? "x" : "y", loc); 880 881 req = (u32 *)req1->tcam_data; 882 for (i = 0; i < 2; i++) 883 dev_info(&hdev->pdev->dev, "%08x\n", *req++); 884 885 req = (u32 *)req2->tcam_data; 886 for (i = 0; i < 6; i++) 887 dev_info(&hdev->pdev->dev, "%08x\n", *req++); 888 889 req = (u32 *)req3->tcam_data; 890 for (i = 0; i < 5; i++) 891 dev_info(&hdev->pdev->dev, "%08x\n", *req++); 892 } 893 894 static void hclge_dbg_fd_tcam(struct hclge_dev *hdev) 895 { 896 u32 i; 897 898 for (i = 0; i < hdev->fd_cfg.rule_num[0]; i++) { 899 hclge_dbg_fd_tcam_read(hdev, 0, true, i); 900 hclge_dbg_fd_tcam_read(hdev, 0, false, i); 901 } 902 } 903 904 int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf) 905 { 906 struct hclge_vport *vport = hclge_get_vport(handle); 907 struct hclge_dev *hdev = vport->back; 908 909 if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) { 910 hclge_dbg_fd_tcam(hdev); 911 } else if (strncmp(cmd_buf, "dump tc", 7) == 0) { 912 hclge_dbg_dump_tc(hdev); 913 } else if (strncmp(cmd_buf, "dump tm map", 11) == 0) { 914 hclge_dbg_dump_tm_map(hdev, cmd_buf); 915 } else if (strncmp(cmd_buf, "dump tm", 7) == 0) { 916 hclge_dbg_dump_tm(hdev); 917 } else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) { 918 hclge_dbg_dump_qos_pause_cfg(hdev); 919 } else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) { 920 hclge_dbg_dump_qos_pri_map(hdev); 921 } else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) { 922 hclge_dbg_dump_qos_buf_cfg(hdev); 923 } else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) { 924 hclge_dbg_dump_mng_table(hdev); 925 } else if (strncmp(cmd_buf, "dump reg", 8) == 0) { 926 hclge_dbg_dump_reg_cmd(hdev, cmd_buf); 927 } else { 928 dev_info(&hdev->pdev->dev, "unknown command\n"); 929 return -EINVAL; 930 } 931 932 return 0; 933 } 934