1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018-2019 Hisilicon Limited. */
3 
4 #include <linux/device.h>
5 
6 #include "hclge_debugfs.h"
7 #include "hclge_main.h"
8 #include "hclge_tm.h"
9 #include "hnae3.h"
10 
11 static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
12 	{ .reg_type = "bios common",
13 	  .dfx_msg = &hclge_dbg_bios_common_reg[0],
14 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
15 		       .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
16 		       .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
17 	{ .reg_type = "ssu",
18 	  .dfx_msg = &hclge_dbg_ssu_reg_0[0],
19 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
20 		       .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
21 		       .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
22 	{ .reg_type = "ssu",
23 	  .dfx_msg = &hclge_dbg_ssu_reg_1[0],
24 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
25 		       .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
26 		       .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
27 	{ .reg_type = "ssu",
28 	  .dfx_msg = &hclge_dbg_ssu_reg_2[0],
29 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
30 		       .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
31 		       .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
32 	{ .reg_type = "igu egu",
33 	  .dfx_msg = &hclge_dbg_igu_egu_reg[0],
34 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
35 		       .offset = HCLGE_DBG_DFX_IGU_OFFSET,
36 		       .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
37 	{ .reg_type = "rpu",
38 	  .dfx_msg = &hclge_dbg_rpu_reg_0[0],
39 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
40 		       .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
41 		       .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
42 	{ .reg_type = "rpu",
43 	  .dfx_msg = &hclge_dbg_rpu_reg_1[0],
44 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
45 		       .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
46 		       .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
47 	{ .reg_type = "ncsi",
48 	  .dfx_msg = &hclge_dbg_ncsi_reg[0],
49 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
50 		       .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
51 		       .cmd = HCLGE_OPC_DFX_NCSI_REG } },
52 	{ .reg_type = "rtc",
53 	  .dfx_msg = &hclge_dbg_rtc_reg[0],
54 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
55 		       .offset = HCLGE_DBG_DFX_RTC_OFFSET,
56 		       .cmd = HCLGE_OPC_DFX_RTC_REG } },
57 	{ .reg_type = "ppp",
58 	  .dfx_msg = &hclge_dbg_ppp_reg[0],
59 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
60 		       .offset = HCLGE_DBG_DFX_PPP_OFFSET,
61 		       .cmd = HCLGE_OPC_DFX_PPP_REG } },
62 	{ .reg_type = "rcb",
63 	  .dfx_msg = &hclge_dbg_rcb_reg[0],
64 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
65 		       .offset = HCLGE_DBG_DFX_RCB_OFFSET,
66 		       .cmd = HCLGE_OPC_DFX_RCB_REG } },
67 	{ .reg_type = "tqp",
68 	  .dfx_msg = &hclge_dbg_tqp_reg[0],
69 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
70 		       .offset = HCLGE_DBG_DFX_TQP_OFFSET,
71 		       .cmd = HCLGE_OPC_DFX_TQP_REG } },
72 };
73 
74 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
75 {
76 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
77 	int entries_per_desc;
78 	int index;
79 	int ret;
80 
81 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
82 	if (ret) {
83 		dev_err(&hdev->pdev->dev,
84 			"get dfx bdnum fail, ret = %d\n", ret);
85 		return ret;
86 	}
87 
88 	entries_per_desc = ARRAY_SIZE(desc[0].data);
89 	index = offset % entries_per_desc;
90 	return le32_to_cpu(desc[offset / entries_per_desc].data[index]);
91 }
92 
93 static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
94 			      struct hclge_desc *desc_src,
95 			      int index, int bd_num,
96 			      enum hclge_opcode_type cmd)
97 {
98 	struct hclge_desc *desc = desc_src;
99 	int ret, i;
100 
101 	hclge_cmd_setup_basic_desc(desc, cmd, true);
102 	desc->data[0] = cpu_to_le32(index);
103 
104 	for (i = 1; i < bd_num; i++) {
105 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
106 		desc++;
107 		hclge_cmd_setup_basic_desc(desc, cmd, true);
108 	}
109 
110 	ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
111 	if (ret)
112 		dev_err(&hdev->pdev->dev,
113 			"cmd(0x%x) send fail, ret = %d\n", cmd, ret);
114 	return ret;
115 }
116 
117 static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
118 				      const struct hclge_dbg_reg_type_info *reg_info,
119 				      const char *cmd_buf)
120 {
121 #define IDX_OFFSET	1
122 
123 	const char *s = &cmd_buf[strlen(reg_info->reg_type) + IDX_OFFSET];
124 	const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
125 	const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
126 	struct hclge_desc *desc_src;
127 	struct hclge_desc *desc;
128 	int entries_per_desc;
129 	int bd_num, buf_len;
130 	int index = 0;
131 	int min_num;
132 	int ret, i;
133 
134 	if (*s) {
135 		ret = kstrtouint(s, 0, &index);
136 		index = (ret != 0) ? 0 : index;
137 	}
138 
139 	bd_num = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset);
140 	if (bd_num <= 0) {
141 		dev_err(&hdev->pdev->dev, "get cmd(%d) bd num(%d) failed\n",
142 			reg_msg->offset, bd_num);
143 		return;
144 	}
145 
146 	buf_len = sizeof(struct hclge_desc) * bd_num;
147 	desc_src = kzalloc(buf_len, GFP_KERNEL);
148 	if (!desc_src)
149 		return;
150 
151 	desc = desc_src;
152 	ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
153 	if (ret) {
154 		kfree(desc_src);
155 		return;
156 	}
157 
158 	entries_per_desc = ARRAY_SIZE(desc->data);
159 	min_num = min_t(int, bd_num * entries_per_desc, reg_msg->msg_num);
160 
161 	desc = desc_src;
162 	for (i = 0; i < min_num; i++) {
163 		if (i > 0 && (i % entries_per_desc) == 0)
164 			desc++;
165 		if (dfx_message->flag)
166 			dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
167 				 dfx_message->message,
168 				 le32_to_cpu(desc->data[i % entries_per_desc]));
169 
170 		dfx_message++;
171 	}
172 
173 	kfree(desc_src);
174 }
175 
176 static void hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev)
177 {
178 	struct hclge_config_mac_mode_cmd *req;
179 	struct hclge_desc desc;
180 	u32 loop_en;
181 	int ret;
182 
183 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
184 
185 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
186 	if (ret) {
187 		dev_err(&hdev->pdev->dev,
188 			"failed to dump mac enable status, ret = %d\n", ret);
189 		return;
190 	}
191 
192 	req = (struct hclge_config_mac_mode_cmd *)desc.data;
193 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
194 
195 	dev_info(&hdev->pdev->dev, "config_mac_trans_en: %#x\n",
196 		 hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B));
197 	dev_info(&hdev->pdev->dev, "config_mac_rcv_en: %#x\n",
198 		 hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B));
199 	dev_info(&hdev->pdev->dev, "config_pad_trans_en: %#x\n",
200 		 hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B));
201 	dev_info(&hdev->pdev->dev, "config_pad_rcv_en: %#x\n",
202 		 hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B));
203 	dev_info(&hdev->pdev->dev, "config_1588_trans_en: %#x\n",
204 		 hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B));
205 	dev_info(&hdev->pdev->dev, "config_1588_rcv_en: %#x\n",
206 		 hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B));
207 	dev_info(&hdev->pdev->dev, "config_mac_app_loop_en: %#x\n",
208 		 hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B));
209 	dev_info(&hdev->pdev->dev, "config_mac_line_loop_en: %#x\n",
210 		 hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B));
211 	dev_info(&hdev->pdev->dev, "config_mac_fcs_tx_en: %#x\n",
212 		 hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B));
213 	dev_info(&hdev->pdev->dev, "config_mac_rx_oversize_truncate_en: %#x\n",
214 		 hnae3_get_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B));
215 	dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_strip_en: %#x\n",
216 		 hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B));
217 	dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_en: %#x\n",
218 		 hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B));
219 	dev_info(&hdev->pdev->dev, "config_mac_tx_under_min_err_en: %#x\n",
220 		 hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B));
221 	dev_info(&hdev->pdev->dev, "config_mac_tx_oversize_truncate_en: %#x\n",
222 		 hnae3_get_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B));
223 }
224 
225 static void hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev)
226 {
227 	struct hclge_config_max_frm_size_cmd *req;
228 	struct hclge_desc desc;
229 	int ret;
230 
231 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
232 
233 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
234 	if (ret) {
235 		dev_err(&hdev->pdev->dev,
236 			"failed to dump mac frame size, ret = %d\n", ret);
237 		return;
238 	}
239 
240 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
241 
242 	dev_info(&hdev->pdev->dev, "max_frame_size: %u\n",
243 		 le16_to_cpu(req->max_frm_size));
244 	dev_info(&hdev->pdev->dev, "min_frame_size: %u\n", req->min_frm_size);
245 }
246 
247 static void hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev)
248 {
249 #define HCLGE_MAC_SPEED_SHIFT	0
250 #define HCLGE_MAC_SPEED_MASK	GENMASK(5, 0)
251 #define HCLGE_MAC_DUPLEX_SHIFT	7
252 
253 	struct hclge_config_mac_speed_dup_cmd *req;
254 	struct hclge_desc desc;
255 	int ret;
256 
257 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
258 
259 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
260 	if (ret) {
261 		dev_err(&hdev->pdev->dev,
262 			"failed to dump mac speed duplex, ret = %d\n", ret);
263 		return;
264 	}
265 
266 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
267 
268 	dev_info(&hdev->pdev->dev, "speed: %#lx\n",
269 		 hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
270 				 HCLGE_MAC_SPEED_SHIFT));
271 	dev_info(&hdev->pdev->dev, "duplex: %#x\n",
272 		 hnae3_get_bit(req->speed_dup, HCLGE_MAC_DUPLEX_SHIFT));
273 }
274 
275 static void hclge_dbg_dump_mac(struct hclge_dev *hdev)
276 {
277 	hclge_dbg_dump_mac_enable_status(hdev);
278 
279 	hclge_dbg_dump_mac_frame_size(hdev);
280 
281 	hclge_dbg_dump_mac_speed_duplex(hdev);
282 }
283 
284 static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
285 {
286 	struct device *dev = &hdev->pdev->dev;
287 	struct hclge_dbg_bitmap_cmd *bitmap;
288 	enum hclge_opcode_type cmd;
289 	int rq_id, pri_id, qset_id;
290 	int port_id, nq_id, pg_id;
291 	struct hclge_desc desc[2];
292 
293 	int cnt, ret;
294 
295 	cnt = sscanf(cmd_buf, "%i %i %i %i %i %i",
296 		     &port_id, &pri_id, &pg_id, &rq_id, &nq_id, &qset_id);
297 	if (cnt != 6) {
298 		dev_err(&hdev->pdev->dev,
299 			"dump dcb: bad command parameter, cnt=%d\n", cnt);
300 		return;
301 	}
302 
303 	cmd = HCLGE_OPC_QSET_DFX_STS;
304 	ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1, cmd);
305 	if (ret)
306 		goto err_dcb_cmd_send;
307 
308 	bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
309 	dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0);
310 	dev_info(dev, "nic_qs_mask: 0x%x\n", bitmap->bit1);
311 	dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2);
312 	dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3);
313 
314 	cmd = HCLGE_OPC_PRI_DFX_STS;
315 	ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, cmd);
316 	if (ret)
317 		goto err_dcb_cmd_send;
318 
319 	bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
320 	dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0);
321 	dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1);
322 	dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2);
323 
324 	cmd = HCLGE_OPC_PG_DFX_STS;
325 	ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, cmd);
326 	if (ret)
327 		goto err_dcb_cmd_send;
328 
329 	bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
330 	dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0);
331 	dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1);
332 	dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2);
333 
334 	cmd = HCLGE_OPC_PORT_DFX_STS;
335 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
336 	if (ret)
337 		goto err_dcb_cmd_send;
338 
339 	bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
340 	dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0);
341 	dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1);
342 
343 	cmd = HCLGE_OPC_SCH_NQ_CNT;
344 	ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd);
345 	if (ret)
346 		goto err_dcb_cmd_send;
347 
348 	dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
349 
350 	cmd = HCLGE_OPC_SCH_RQ_CNT;
351 	ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd);
352 	if (ret)
353 		goto err_dcb_cmd_send;
354 
355 	dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
356 
357 	cmd = HCLGE_OPC_TM_INTERNAL_STS;
358 	ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, cmd);
359 	if (ret)
360 		goto err_dcb_cmd_send;
361 
362 	dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1]));
363 	dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2]));
364 	dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n",
365 		 le32_to_cpu(desc[0].data[3]));
366 	dev_info(dev, "tx_private_waterline: 0x%x\n",
367 		 le32_to_cpu(desc[0].data[4]));
368 	dev_info(dev, "tm_bypass_en: 0x%x\n", le32_to_cpu(desc[0].data[5]));
369 	dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0]));
370 	dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1]));
371 
372 	cmd = HCLGE_OPC_TM_INTERNAL_CNT;
373 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
374 	if (ret)
375 		goto err_dcb_cmd_send;
376 
377 	dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1]));
378 	dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2]));
379 
380 	cmd = HCLGE_OPC_TM_INTERNAL_STS_1;
381 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
382 	if (ret)
383 		goto err_dcb_cmd_send;
384 
385 	dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1]));
386 	dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2]));
387 	dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[3]));
388 	dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n",
389 		 le32_to_cpu(desc[0].data[4]));
390 	dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n",
391 		 le32_to_cpu(desc[0].data[5]));
392 	return;
393 
394 err_dcb_cmd_send:
395 	dev_err(&hdev->pdev->dev,
396 		"failed to dump dcb dfx, cmd = %#x, ret = %d\n",
397 		cmd, ret);
398 }
399 
400 static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
401 {
402 	const struct hclge_dbg_reg_type_info *reg_info;
403 	bool has_dump = false;
404 	int i;
405 
406 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
407 		reg_info = &hclge_dbg_reg_info[i];
408 		if (!strncmp(cmd_buf, reg_info->reg_type,
409 			     strlen(reg_info->reg_type))) {
410 			hclge_dbg_dump_reg_common(hdev, reg_info, cmd_buf);
411 			has_dump = true;
412 		}
413 	}
414 
415 	if (strncmp(cmd_buf, "mac", strlen("mac")) == 0) {
416 		hclge_dbg_dump_mac(hdev);
417 		has_dump = true;
418 	}
419 
420 	if (strncmp(cmd_buf, "dcb", 3) == 0) {
421 		hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]);
422 		has_dump = true;
423 	}
424 
425 	if (!has_dump) {
426 		dev_info(&hdev->pdev->dev, "unknown command\n");
427 		return;
428 	}
429 }
430 
431 static void hclge_print_tc_info(struct hclge_dev *hdev, bool flag, int index)
432 {
433 	if (flag)
434 		dev_info(&hdev->pdev->dev, "tc(%d): no sp mode weight: %u\n",
435 			 index, hdev->tm_info.pg_info[0].tc_dwrr[index]);
436 	else
437 		dev_info(&hdev->pdev->dev, "tc(%d): sp mode\n", index);
438 }
439 
440 static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
441 {
442 	struct hclge_ets_tc_weight_cmd *ets_weight;
443 	struct hclge_desc desc;
444 	int i, ret;
445 
446 	if (!hnae3_dev_dcb_supported(hdev)) {
447 		dev_info(&hdev->pdev->dev,
448 			 "Only DCB-supported dev supports tc\n");
449 		return;
450 	}
451 
452 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
453 
454 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
455 	if (ret) {
456 		dev_err(&hdev->pdev->dev, "dump tc fail, ret = %d\n", ret);
457 		return;
458 	}
459 
460 	ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
461 
462 	dev_info(&hdev->pdev->dev, "dump tc: %u tc enabled\n",
463 		 hdev->tm_info.num_tc);
464 	dev_info(&hdev->pdev->dev, "weight_offset: %u\n",
465 		 ets_weight->weight_offset);
466 
467 	for (i = 0; i < HNAE3_MAX_TC; i++)
468 		hclge_print_tc_info(hdev, ets_weight->tc_weight[i], i);
469 }
470 
471 static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
472 {
473 	struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
474 	struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
475 	struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd;
476 	enum hclge_opcode_type cmd;
477 	struct hclge_desc desc;
478 	int ret;
479 
480 	cmd = HCLGE_OPC_TM_PG_C_SHAPPING;
481 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
482 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
483 	if (ret)
484 		goto err_tm_pg_cmd_send;
485 
486 	pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
487 	dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
488 	dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n",
489 		 le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
490 
491 	cmd = HCLGE_OPC_TM_PG_P_SHAPPING;
492 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
493 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
494 	if (ret)
495 		goto err_tm_pg_cmd_send;
496 
497 	pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
498 	dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
499 	dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
500 		 le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
501 	dev_info(&hdev->pdev->dev, "PG_P flag: %#x\n", pg_shap_cfg_cmd->flag);
502 	dev_info(&hdev->pdev->dev, "PG_P pg_rate: %u(Mbps)\n",
503 		 le32_to_cpu(pg_shap_cfg_cmd->pg_rate));
504 
505 	cmd = HCLGE_OPC_TM_PORT_SHAPPING;
506 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
507 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
508 	if (ret)
509 		goto err_tm_pg_cmd_send;
510 
511 	port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
512 	dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
513 		 le32_to_cpu(port_shap_cfg_cmd->port_shapping_para));
514 	dev_info(&hdev->pdev->dev, "PORT flag: %#x\n", port_shap_cfg_cmd->flag);
515 	dev_info(&hdev->pdev->dev, "PORT port_rate: %u(Mbps)\n",
516 		 le32_to_cpu(port_shap_cfg_cmd->port_rate));
517 
518 	cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
519 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
520 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
521 	if (ret)
522 		goto err_tm_pg_cmd_send;
523 
524 	dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n",
525 		 le32_to_cpu(desc.data[0]));
526 
527 	cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG;
528 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
529 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
530 	if (ret)
531 		goto err_tm_pg_cmd_send;
532 
533 	dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n",
534 		 le32_to_cpu(desc.data[0]));
535 
536 	cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
537 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
538 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
539 	if (ret)
540 		goto err_tm_pg_cmd_send;
541 
542 	dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n",
543 		 le32_to_cpu(desc.data[0]));
544 
545 	if (!hnae3_dev_dcb_supported(hdev)) {
546 		dev_info(&hdev->pdev->dev,
547 			 "Only DCB-supported dev supports tm mapping\n");
548 		return;
549 	}
550 
551 	cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
552 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
553 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
554 	if (ret)
555 		goto err_tm_pg_cmd_send;
556 
557 	bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
558 	dev_info(&hdev->pdev->dev, "BP_TO_QSET tc_id: %u\n",
559 		 bp_to_qs_map_cmd->tc_id);
560 	dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
561 		 bp_to_qs_map_cmd->qs_group_id);
562 	dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
563 		 le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map));
564 	return;
565 
566 err_tm_pg_cmd_send:
567 	dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), ret = %d\n",
568 		cmd, ret);
569 }
570 
571 static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
572 {
573 	struct hclge_priority_weight_cmd *priority_weight;
574 	struct hclge_pg_to_pri_link_cmd *pg_to_pri_map;
575 	struct hclge_qs_to_pri_link_cmd *qs_to_pri_map;
576 	struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
577 	struct hclge_pri_shapping_cmd *shap_cfg_cmd;
578 	struct hclge_pg_weight_cmd *pg_weight;
579 	struct hclge_qs_weight_cmd *qs_weight;
580 	enum hclge_opcode_type cmd;
581 	struct hclge_desc desc;
582 	int ret;
583 
584 	cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK;
585 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
586 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
587 	if (ret)
588 		goto err_tm_cmd_send;
589 
590 	pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
591 	dev_info(&hdev->pdev->dev, "dump tm\n");
592 	dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n",
593 		 pg_to_pri_map->pg_id);
594 	dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n",
595 		 pg_to_pri_map->pri_bit_map);
596 
597 	cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
598 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
599 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
600 	if (ret)
601 		goto err_tm_cmd_send;
602 
603 	qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
604 	dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n",
605 		 le16_to_cpu(qs_to_pri_map->qs_id));
606 	dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n",
607 		 qs_to_pri_map->priority);
608 	dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n",
609 		 qs_to_pri_map->link_vld);
610 
611 	cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
612 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
613 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
614 	if (ret)
615 		goto err_tm_cmd_send;
616 
617 	nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
618 	dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n",
619 		 le16_to_cpu(nq_to_qs_map->nq_id));
620 	dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
621 		 le16_to_cpu(nq_to_qs_map->qset_id));
622 
623 	cmd = HCLGE_OPC_TM_PG_WEIGHT;
624 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
625 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
626 	if (ret)
627 		goto err_tm_cmd_send;
628 
629 	pg_weight = (struct hclge_pg_weight_cmd *)desc.data;
630 	dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id);
631 	dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr);
632 
633 	cmd = HCLGE_OPC_TM_QS_WEIGHT;
634 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
635 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
636 	if (ret)
637 		goto err_tm_cmd_send;
638 
639 	qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
640 	dev_info(&hdev->pdev->dev, "QS qs_id: %u\n",
641 		 le16_to_cpu(qs_weight->qs_id));
642 	dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr);
643 
644 	cmd = HCLGE_OPC_TM_PRI_WEIGHT;
645 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
646 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
647 	if (ret)
648 		goto err_tm_cmd_send;
649 
650 	priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
651 	dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id);
652 	dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr);
653 
654 	cmd = HCLGE_OPC_TM_PRI_C_SHAPPING;
655 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
656 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
657 	if (ret)
658 		goto err_tm_cmd_send;
659 
660 	shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
661 	dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
662 	dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
663 		 le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
664 	dev_info(&hdev->pdev->dev, "PRI_C flag: %#x\n", shap_cfg_cmd->flag);
665 	dev_info(&hdev->pdev->dev, "PRI_C pri_rate: %u(Mbps)\n",
666 		 le32_to_cpu(shap_cfg_cmd->pri_rate));
667 
668 	cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
669 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
670 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
671 	if (ret)
672 		goto err_tm_cmd_send;
673 
674 	shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
675 	dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
676 	dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
677 		 le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
678 	dev_info(&hdev->pdev->dev, "PRI_P flag: %#x\n", shap_cfg_cmd->flag);
679 	dev_info(&hdev->pdev->dev, "PRI_P pri_rate: %u(Mbps)\n",
680 		 le32_to_cpu(shap_cfg_cmd->pri_rate));
681 
682 	hclge_dbg_dump_tm_pg(hdev);
683 
684 	return;
685 
686 err_tm_cmd_send:
687 	dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), ret = %d\n",
688 		cmd, ret);
689 }
690 
691 static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
692 				  const char *cmd_buf)
693 {
694 	struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
695 	struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
696 	u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
697 	struct hclge_qs_to_pri_link_cmd *map;
698 	struct hclge_tqp_tx_queue_tc_cmd *tc;
699 	enum hclge_opcode_type cmd;
700 	struct hclge_desc desc;
701 	int queue_id, group_id;
702 	int tc_id, qset_id;
703 	int pri_id, ret;
704 	u16 qs_id_l;
705 	u16 qs_id_h;
706 	u8 grp_num;
707 	u32 i;
708 
709 	ret = kstrtouint(cmd_buf, 0, &queue_id);
710 	queue_id = (ret != 0) ? 0 : queue_id;
711 
712 	cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
713 	nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
714 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
715 	nq_to_qs_map->nq_id = cpu_to_le16(queue_id);
716 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
717 	if (ret)
718 		goto err_tm_map_cmd_send;
719 	qset_id = le16_to_cpu(nq_to_qs_map->qset_id);
720 
721 	/* convert qset_id to the following format, drop the vld bit
722 	 *            | qs_id_h | vld | qs_id_l |
723 	 * qset_id:   | 15 ~ 11 |  10 |  9 ~ 0  |
724 	 *             \         \   /         /
725 	 *              \         \ /         /
726 	 * qset_id: | 15 | 14 ~ 10 |  9 ~ 0  |
727 	 */
728 	qs_id_l = hnae3_get_field(qset_id, HCLGE_TM_QS_ID_L_MSK,
729 				  HCLGE_TM_QS_ID_L_S);
730 	qs_id_h = hnae3_get_field(qset_id, HCLGE_TM_QS_ID_H_EXT_MSK,
731 				  HCLGE_TM_QS_ID_H_EXT_S);
732 	qset_id = 0;
733 	hnae3_set_field(qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
734 			qs_id_l);
735 	hnae3_set_field(qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S,
736 			qs_id_h);
737 
738 	cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
739 	map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
740 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
741 	map->qs_id = cpu_to_le16(qset_id);
742 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
743 	if (ret)
744 		goto err_tm_map_cmd_send;
745 	pri_id = map->priority;
746 
747 	cmd = HCLGE_OPC_TQP_TX_QUEUE_TC;
748 	tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
749 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
750 	tc->queue_id = cpu_to_le16(queue_id);
751 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
752 	if (ret)
753 		goto err_tm_map_cmd_send;
754 	tc_id = tc->tc_id & 0x7;
755 
756 	dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n");
757 	dev_info(&hdev->pdev->dev, "%04d     | %04d    | %02d     | %02d\n",
758 		 queue_id, qset_id, pri_id, tc_id);
759 
760 	if (!hnae3_dev_dcb_supported(hdev)) {
761 		dev_info(&hdev->pdev->dev,
762 			 "Only DCB-supported dev supports tm mapping\n");
763 		return;
764 	}
765 
766 	grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ?
767 		  HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM;
768 	cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
769 	bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
770 	for (group_id = 0; group_id < grp_num; group_id++) {
771 		hclge_cmd_setup_basic_desc(&desc, cmd, true);
772 		bp_to_qs_map_cmd->tc_id = tc_id;
773 		bp_to_qs_map_cmd->qs_group_id = group_id;
774 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
775 		if (ret)
776 			goto err_tm_map_cmd_send;
777 
778 		qset_mapping[group_id] =
779 			le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map);
780 	}
781 
782 	dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n");
783 
784 	i = 0;
785 	for (group_id = 0; group_id < grp_num / 8; group_id++) {
786 		dev_info(&hdev->pdev->dev,
787 			 "%04d  | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
788 			 group_id * 256, qset_mapping[(u32)(i + 7)],
789 			 qset_mapping[(u32)(i + 6)], qset_mapping[(u32)(i + 5)],
790 			 qset_mapping[(u32)(i + 4)], qset_mapping[(u32)(i + 3)],
791 			 qset_mapping[(u32)(i + 2)], qset_mapping[(u32)(i + 1)],
792 			 qset_mapping[i]);
793 		i += 8;
794 	}
795 
796 	return;
797 
798 err_tm_map_cmd_send:
799 	dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), ret = %d\n",
800 		cmd, ret);
801 }
802 
803 static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
804 {
805 	struct hclge_cfg_pause_param_cmd *pause_param;
806 	struct hclge_desc desc;
807 	int ret;
808 
809 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
810 
811 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
812 	if (ret) {
813 		dev_err(&hdev->pdev->dev, "dump checksum fail, ret = %d\n",
814 			ret);
815 		return;
816 	}
817 
818 	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
819 	dev_info(&hdev->pdev->dev, "dump qos pause cfg\n");
820 	dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n",
821 		 pause_param->pause_trans_gap);
822 	dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n",
823 		 le16_to_cpu(pause_param->pause_trans_time));
824 }
825 
826 static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
827 {
828 	struct hclge_qos_pri_map_cmd *pri_map;
829 	struct hclge_desc desc;
830 	int ret;
831 
832 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
833 
834 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
835 	if (ret) {
836 		dev_err(&hdev->pdev->dev,
837 			"dump qos pri map fail, ret = %d\n", ret);
838 		return;
839 	}
840 
841 	pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
842 	dev_info(&hdev->pdev->dev, "dump qos pri map\n");
843 	dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
844 	dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc);
845 	dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc);
846 	dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc);
847 	dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc);
848 	dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc);
849 	dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc);
850 	dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc);
851 	dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
852 }
853 
854 static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
855 {
856 	struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
857 	struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
858 	struct hclge_rx_priv_wl_buf *rx_priv_wl;
859 	struct hclge_rx_com_wl *rx_packet_cnt;
860 	struct hclge_rx_com_thrd *rx_com_thrd;
861 	struct hclge_rx_com_wl *rx_com_wl;
862 	enum hclge_opcode_type cmd;
863 	struct hclge_desc desc[2];
864 	int i, ret;
865 
866 	cmd = HCLGE_OPC_TX_BUFF_ALLOC;
867 	hclge_cmd_setup_basic_desc(desc, cmd, true);
868 	ret = hclge_cmd_send(&hdev->hw, desc, 1);
869 	if (ret)
870 		goto err_qos_cmd_send;
871 
872 	dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
873 
874 	tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
875 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
876 		dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
877 			 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
878 
879 	cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
880 	hclge_cmd_setup_basic_desc(desc, cmd, true);
881 	ret = hclge_cmd_send(&hdev->hw, desc, 1);
882 	if (ret)
883 		goto err_qos_cmd_send;
884 
885 	dev_info(&hdev->pdev->dev, "\n");
886 	rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
887 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
888 		dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
889 			 le16_to_cpu(rx_buf_cmd->buf_num[i]));
890 
891 	dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
892 		 le16_to_cpu(rx_buf_cmd->shared_buf));
893 
894 	cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
895 	hclge_cmd_setup_basic_desc(desc, cmd, true);
896 	ret = hclge_cmd_send(&hdev->hw, desc, 1);
897 	if (ret)
898 		goto err_qos_cmd_send;
899 
900 	rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
901 	dev_info(&hdev->pdev->dev, "\n");
902 	dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
903 		 le16_to_cpu(rx_com_wl->com_wl.high),
904 		 le16_to_cpu(rx_com_wl->com_wl.low));
905 
906 	cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
907 	hclge_cmd_setup_basic_desc(desc, cmd, true);
908 	ret = hclge_cmd_send(&hdev->hw, desc, 1);
909 	if (ret)
910 		goto err_qos_cmd_send;
911 
912 	rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
913 	dev_info(&hdev->pdev->dev,
914 		 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
915 		 le16_to_cpu(rx_packet_cnt->com_wl.high),
916 		 le16_to_cpu(rx_packet_cnt->com_wl.low));
917 	dev_info(&hdev->pdev->dev, "\n");
918 
919 	if (!hnae3_dev_dcb_supported(hdev)) {
920 		dev_info(&hdev->pdev->dev,
921 			 "Only DCB-supported dev supports rx priv wl\n");
922 		return;
923 	}
924 	cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
925 	hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
926 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
927 	hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
928 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
929 	if (ret)
930 		goto err_qos_cmd_send;
931 
932 	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
933 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
934 		dev_info(&hdev->pdev->dev,
935 			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
936 			 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
937 			 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
938 
939 	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
940 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
941 		dev_info(&hdev->pdev->dev,
942 			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
943 			 i + HCLGE_TC_NUM_ONE_DESC,
944 			 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
945 			 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
946 
947 	cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
948 	hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
949 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
950 	hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
951 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
952 	if (ret)
953 		goto err_qos_cmd_send;
954 
955 	dev_info(&hdev->pdev->dev, "\n");
956 	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
957 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
958 		dev_info(&hdev->pdev->dev,
959 			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
960 			 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
961 			 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
962 
963 	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
964 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
965 		dev_info(&hdev->pdev->dev,
966 			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
967 			 i + HCLGE_TC_NUM_ONE_DESC,
968 			 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
969 			 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
970 	return;
971 
972 err_qos_cmd_send:
973 	dev_err(&hdev->pdev->dev,
974 		"dump qos buf cfg fail(0x%x), ret = %d\n", cmd, ret);
975 }
976 
977 static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
978 {
979 	struct hclge_mac_ethertype_idx_rd_cmd *req0;
980 	char printf_buf[HCLGE_DBG_BUF_LEN];
981 	struct hclge_desc desc;
982 	u32 msg_egress_port;
983 	int ret, i;
984 
985 	dev_info(&hdev->pdev->dev, "mng tab:\n");
986 	memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
987 	strncat(printf_buf,
988 		"entry|mac_addr         |mask|ether|mask|vlan|mask",
989 		HCLGE_DBG_BUF_LEN - 1);
990 	strncat(printf_buf + strlen(printf_buf),
991 		"|i_map|i_dir|e_type|pf_id|vf_id|q_id|drop\n",
992 		HCLGE_DBG_BUF_LEN - strlen(printf_buf) - 1);
993 
994 	dev_info(&hdev->pdev->dev, "%s", printf_buf);
995 
996 	for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
997 		hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
998 					   true);
999 		req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
1000 		req0->index = cpu_to_le16(i);
1001 
1002 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1003 		if (ret) {
1004 			dev_err(&hdev->pdev->dev,
1005 				"call hclge_cmd_send fail, ret = %d\n", ret);
1006 			return;
1007 		}
1008 
1009 		if (!req0->resp_code)
1010 			continue;
1011 
1012 		memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
1013 		snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
1014 			 "%02u   |%02x:%02x:%02x:%02x:%02x:%02x|",
1015 			 le16_to_cpu(req0->index),
1016 			 req0->mac_addr[0], req0->mac_addr[1],
1017 			 req0->mac_addr[2], req0->mac_addr[3],
1018 			 req0->mac_addr[4], req0->mac_addr[5]);
1019 
1020 		snprintf(printf_buf + strlen(printf_buf),
1021 			 HCLGE_DBG_BUF_LEN - strlen(printf_buf),
1022 			 "%x   |%04x |%x   |%04x|%x   |%02x   |%02x   |",
1023 			 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
1024 			 le16_to_cpu(req0->ethter_type),
1025 			 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
1026 			 le16_to_cpu(req0->vlan_tag) & HCLGE_DBG_MNG_VLAN_TAG,
1027 			 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
1028 			 req0->i_port_bitmap, req0->i_port_direction);
1029 
1030 		msg_egress_port = le16_to_cpu(req0->egress_port);
1031 		snprintf(printf_buf + strlen(printf_buf),
1032 			 HCLGE_DBG_BUF_LEN - strlen(printf_buf),
1033 			 "%x     |%x    |%02x   |%04x|%x\n",
1034 			 !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
1035 			 msg_egress_port & HCLGE_DBG_MNG_PF_ID,
1036 			 (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
1037 			 le16_to_cpu(req0->egress_queue),
1038 			 !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
1039 
1040 		dev_info(&hdev->pdev->dev, "%s", printf_buf);
1041 	}
1042 }
1043 
1044 static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
1045 				  bool sel_x, u32 loc)
1046 {
1047 	struct hclge_fd_tcam_config_1_cmd *req1;
1048 	struct hclge_fd_tcam_config_2_cmd *req2;
1049 	struct hclge_fd_tcam_config_3_cmd *req3;
1050 	struct hclge_desc desc[3];
1051 	int ret, i;
1052 	u32 *req;
1053 
1054 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
1055 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1056 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
1057 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1058 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
1059 
1060 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
1061 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
1062 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
1063 
1064 	req1->stage  = stage;
1065 	req1->xy_sel = sel_x ? 1 : 0;
1066 	req1->index  = cpu_to_le32(loc);
1067 
1068 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
1069 	if (ret)
1070 		return ret;
1071 
1072 	dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
1073 		 sel_x ? "x" : "y", loc);
1074 
1075 	/* tcam_data0 ~ tcam_data1 */
1076 	req = (u32 *)req1->tcam_data;
1077 	for (i = 0; i < 2; i++)
1078 		dev_info(&hdev->pdev->dev, "%08x\n", *req++);
1079 
1080 	/* tcam_data2 ~ tcam_data7 */
1081 	req = (u32 *)req2->tcam_data;
1082 	for (i = 0; i < 6; i++)
1083 		dev_info(&hdev->pdev->dev, "%08x\n", *req++);
1084 
1085 	/* tcam_data8 ~ tcam_data12 */
1086 	req = (u32 *)req3->tcam_data;
1087 	for (i = 0; i < 5; i++)
1088 		dev_info(&hdev->pdev->dev, "%08x\n", *req++);
1089 
1090 	return ret;
1091 }
1092 
1093 static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
1094 {
1095 	struct hclge_fd_rule *rule;
1096 	struct hlist_node *node;
1097 	int cnt = 0;
1098 
1099 	spin_lock_bh(&hdev->fd_rule_lock);
1100 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
1101 		rule_locs[cnt] = rule->location;
1102 		cnt++;
1103 	}
1104 	spin_unlock_bh(&hdev->fd_rule_lock);
1105 
1106 	if (cnt != hdev->hclge_fd_rule_num)
1107 		return -EINVAL;
1108 
1109 	return cnt;
1110 }
1111 
1112 static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
1113 {
1114 	int i, ret, rule_cnt;
1115 	u16 *rule_locs;
1116 
1117 	if (!hnae3_dev_fd_supported(hdev)) {
1118 		dev_err(&hdev->pdev->dev,
1119 			"Only FD-supported dev supports dump fd tcam\n");
1120 		return;
1121 	}
1122 
1123 	if (!hdev->hclge_fd_rule_num ||
1124 	    !hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
1125 		return;
1126 
1127 	rule_locs = kcalloc(hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
1128 			    sizeof(u16), GFP_KERNEL);
1129 	if (!rule_locs)
1130 		return;
1131 
1132 	rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
1133 	if (rule_cnt <= 0) {
1134 		dev_err(&hdev->pdev->dev,
1135 			"failed to get rule number, ret = %d\n", rule_cnt);
1136 		kfree(rule_locs);
1137 		return;
1138 	}
1139 
1140 	for (i = 0; i < rule_cnt; i++) {
1141 		ret = hclge_dbg_fd_tcam_read(hdev, 0, true, rule_locs[i]);
1142 		if (ret) {
1143 			dev_err(&hdev->pdev->dev,
1144 				"failed to get fd tcam key x, ret = %d\n", ret);
1145 			kfree(rule_locs);
1146 			return;
1147 		}
1148 
1149 		ret = hclge_dbg_fd_tcam_read(hdev, 0, false, rule_locs[i]);
1150 		if (ret) {
1151 			dev_err(&hdev->pdev->dev,
1152 				"failed to get fd tcam key y, ret = %d\n", ret);
1153 			kfree(rule_locs);
1154 			return;
1155 		}
1156 	}
1157 
1158 	kfree(rule_locs);
1159 }
1160 
1161 void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
1162 {
1163 	dev_info(&hdev->pdev->dev, "PF reset count: %u\n",
1164 		 hdev->rst_stats.pf_rst_cnt);
1165 	dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
1166 		 hdev->rst_stats.flr_rst_cnt);
1167 	dev_info(&hdev->pdev->dev, "GLOBAL reset count: %u\n",
1168 		 hdev->rst_stats.global_rst_cnt);
1169 	dev_info(&hdev->pdev->dev, "IMP reset count: %u\n",
1170 		 hdev->rst_stats.imp_rst_cnt);
1171 	dev_info(&hdev->pdev->dev, "reset done count: %u\n",
1172 		 hdev->rst_stats.reset_done_cnt);
1173 	dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
1174 		 hdev->rst_stats.hw_reset_done_cnt);
1175 	dev_info(&hdev->pdev->dev, "reset count: %u\n",
1176 		 hdev->rst_stats.reset_cnt);
1177 	dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
1178 		 hdev->rst_stats.reset_fail_cnt);
1179 	dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
1180 		 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE));
1181 	dev_info(&hdev->pdev->dev, "reset interrupt source: 0x%x\n",
1182 		 hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG));
1183 	dev_info(&hdev->pdev->dev, "reset interrupt status: 0x%x\n",
1184 		 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS));
1185 	dev_info(&hdev->pdev->dev, "hardware reset status: 0x%x\n",
1186 		 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
1187 	dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
1188 		 hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
1189 	dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
1190 		 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
1191 	dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
1192 }
1193 
1194 static void hclge_dbg_dump_serv_info(struct hclge_dev *hdev)
1195 {
1196 	dev_info(&hdev->pdev->dev, "last_serv_processed: %lu\n",
1197 		 hdev->last_serv_processed);
1198 	dev_info(&hdev->pdev->dev, "last_serv_cnt: %lu\n",
1199 		 hdev->serv_processed_cnt);
1200 }
1201 
1202 static void hclge_dbg_dump_interrupt(struct hclge_dev *hdev)
1203 {
1204 	dev_info(&hdev->pdev->dev, "num_nic_msi: %u\n", hdev->num_nic_msi);
1205 	dev_info(&hdev->pdev->dev, "num_roce_msi: %u\n", hdev->num_roce_msi);
1206 	dev_info(&hdev->pdev->dev, "num_msi_used: %u\n", hdev->num_msi_used);
1207 	dev_info(&hdev->pdev->dev, "num_msi_left: %u\n", hdev->num_msi_left);
1208 }
1209 
1210 static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
1211 {
1212 	struct hclge_desc *desc_src, *desc_tmp;
1213 	struct hclge_get_m7_bd_cmd *req;
1214 	struct hclge_desc desc;
1215 	u32 bd_num, buf_len;
1216 	int ret, i;
1217 
1218 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_STATS_BD, true);
1219 
1220 	req = (struct hclge_get_m7_bd_cmd *)desc.data;
1221 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1222 	if (ret) {
1223 		dev_err(&hdev->pdev->dev,
1224 			"get firmware statistics bd number failed, ret = %d\n",
1225 			ret);
1226 		return;
1227 	}
1228 
1229 	bd_num = le32_to_cpu(req->bd_num);
1230 
1231 	buf_len	 = sizeof(struct hclge_desc) * bd_num;
1232 	desc_src = kzalloc(buf_len, GFP_KERNEL);
1233 	if (!desc_src)
1234 		return;
1235 
1236 	desc_tmp = desc_src;
1237 	ret  = hclge_dbg_cmd_send(hdev, desc_tmp, 0, bd_num,
1238 				  HCLGE_OPC_M7_STATS_INFO);
1239 	if (ret) {
1240 		kfree(desc_src);
1241 		dev_err(&hdev->pdev->dev,
1242 			"get firmware statistics failed, ret = %d\n", ret);
1243 		return;
1244 	}
1245 
1246 	for (i = 0; i < bd_num; i++) {
1247 		dev_info(&hdev->pdev->dev, "0x%08x  0x%08x  0x%08x\n",
1248 			 le32_to_cpu(desc_tmp->data[0]),
1249 			 le32_to_cpu(desc_tmp->data[1]),
1250 			 le32_to_cpu(desc_tmp->data[2]));
1251 		dev_info(&hdev->pdev->dev, "0x%08x  0x%08x  0x%08x\n",
1252 			 le32_to_cpu(desc_tmp->data[3]),
1253 			 le32_to_cpu(desc_tmp->data[4]),
1254 			 le32_to_cpu(desc_tmp->data[5]));
1255 
1256 		desc_tmp++;
1257 	}
1258 
1259 	kfree(desc_src);
1260 }
1261 
1262 #define HCLGE_CMD_NCL_CONFIG_BD_NUM	5
1263 
1264 static void hclge_ncl_config_data_print(struct hclge_dev *hdev,
1265 					struct hclge_desc *desc, int *offset,
1266 					int *length)
1267 {
1268 #define HCLGE_CMD_DATA_NUM		6
1269 
1270 	int i;
1271 	int j;
1272 
1273 	for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
1274 		for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
1275 			if (i == 0 && j == 0)
1276 				continue;
1277 
1278 			dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
1279 				 *offset,
1280 				 le32_to_cpu(desc[i].data[j]));
1281 			*offset += sizeof(u32);
1282 			*length -= sizeof(u32);
1283 			if (*length <= 0)
1284 				return;
1285 		}
1286 	}
1287 }
1288 
1289 /* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
1290  * @hdev: pointer to struct hclge_dev
1291  * @cmd_buf: string that contains offset and length
1292  */
1293 static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
1294 				      const char *cmd_buf)
1295 {
1296 #define HCLGE_MAX_NCL_CONFIG_OFFSET	4096
1297 #define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD	(20 + 24 * 4)
1298 #define HCLGE_NCL_CONFIG_PARAM_NUM	2
1299 
1300 	struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
1301 	int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
1302 	int offset;
1303 	int length;
1304 	int data0;
1305 	int ret;
1306 
1307 	ret = sscanf(cmd_buf, "%x %x", &offset, &length);
1308 	if (ret != HCLGE_NCL_CONFIG_PARAM_NUM) {
1309 		dev_err(&hdev->pdev->dev,
1310 			"Too few parameters, num = %d.\n", ret);
1311 		return;
1312 	}
1313 
1314 	if (offset < 0 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
1315 	    length <= 0 || length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
1316 		dev_err(&hdev->pdev->dev,
1317 			"Invalid input, offset = %d, length = %d.\n",
1318 			offset, length);
1319 		return;
1320 	}
1321 
1322 	dev_info(&hdev->pdev->dev, "offset |    data\n");
1323 
1324 	while (length > 0) {
1325 		data0 = offset;
1326 		if (length >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
1327 			data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
1328 		else
1329 			data0 |= length << 16;
1330 		ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
1331 					 HCLGE_OPC_QUERY_NCL_CONFIG);
1332 		if (ret)
1333 			return;
1334 
1335 		hclge_ncl_config_data_print(hdev, desc, &offset, &length);
1336 	}
1337 }
1338 
1339 static void hclge_dbg_dump_loopback(struct hclge_dev *hdev,
1340 				    const char *cmd_buf)
1341 {
1342 	struct phy_device *phydev = hdev->hw.mac.phydev;
1343 	struct hclge_config_mac_mode_cmd *req_app;
1344 	struct hclge_serdes_lb_cmd *req_serdes;
1345 	struct hclge_desc desc;
1346 	u8 loopback_en;
1347 	int ret;
1348 
1349 	req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
1350 	req_serdes = (struct hclge_serdes_lb_cmd *)desc.data;
1351 
1352 	dev_info(&hdev->pdev->dev, "mac id: %u\n", hdev->hw.mac.mac_id);
1353 
1354 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
1355 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1356 	if (ret) {
1357 		dev_err(&hdev->pdev->dev,
1358 			"failed to dump app loopback status, ret = %d\n", ret);
1359 		return;
1360 	}
1361 
1362 	loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
1363 				    HCLGE_MAC_APP_LP_B);
1364 	dev_info(&hdev->pdev->dev, "app loopback: %s\n",
1365 		 loopback_en ? "on" : "off");
1366 
1367 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, true);
1368 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1369 	if (ret) {
1370 		dev_err(&hdev->pdev->dev,
1371 			"failed to dump serdes loopback status, ret = %d\n",
1372 			ret);
1373 		return;
1374 	}
1375 
1376 	loopback_en = req_serdes->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
1377 	dev_info(&hdev->pdev->dev, "serdes serial loopback: %s\n",
1378 		 loopback_en ? "on" : "off");
1379 
1380 	loopback_en = req_serdes->enable &
1381 			HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
1382 	dev_info(&hdev->pdev->dev, "serdes parallel loopback: %s\n",
1383 		 loopback_en ? "on" : "off");
1384 
1385 	if (phydev)
1386 		dev_info(&hdev->pdev->dev, "phy loopback: %s\n",
1387 			 phydev->loopback_enabled ? "on" : "off");
1388 }
1389 
1390 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
1391  * @hdev: pointer to struct hclge_dev
1392  */
1393 static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
1394 {
1395 #define HCLGE_BILLION_NANO_SECONDS 1000000000
1396 
1397 	struct hclge_mac_tnl_stats stats;
1398 	unsigned long rem_nsec;
1399 
1400 	dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n");
1401 
1402 	while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
1403 		rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
1404 		dev_info(&hdev->pdev->dev, "[%07lu.%03lu] status = 0x%x\n",
1405 			 (unsigned long)stats.time, rem_nsec / 1000,
1406 			 stats.status);
1407 	}
1408 }
1409 
1410 static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid)
1411 {
1412 	struct hclge_qs_shapping_cmd *shap_cfg_cmd;
1413 	u8 ir_u, ir_b, ir_s, bs_b, bs_s;
1414 	struct hclge_desc desc;
1415 	u32 shapping_para;
1416 	u32 rate;
1417 	int ret;
1418 
1419 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
1420 
1421 	shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
1422 	shap_cfg_cmd->qs_id = cpu_to_le16(qsid);
1423 
1424 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1425 	if (ret) {
1426 		dev_err(&hdev->pdev->dev,
1427 			"qs%u failed to get tx_rate, ret=%d\n",
1428 			qsid, ret);
1429 		return;
1430 	}
1431 
1432 	shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
1433 	ir_b = hclge_tm_get_field(shapping_para, IR_B);
1434 	ir_u = hclge_tm_get_field(shapping_para, IR_U);
1435 	ir_s = hclge_tm_get_field(shapping_para, IR_S);
1436 	bs_b = hclge_tm_get_field(shapping_para, BS_B);
1437 	bs_s = hclge_tm_get_field(shapping_para, BS_S);
1438 	rate = le32_to_cpu(shap_cfg_cmd->qs_rate);
1439 
1440 	dev_info(&hdev->pdev->dev,
1441 		 "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u, flag:%#x, rate:%u(Mbps)\n",
1442 		 qsid, ir_b, ir_u, ir_s, bs_b, bs_s, shap_cfg_cmd->flag, rate);
1443 }
1444 
1445 static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev)
1446 {
1447 	struct hnae3_knic_private_info *kinfo;
1448 	struct hclge_vport *vport;
1449 	int vport_id, i;
1450 
1451 	for (vport_id = 0; vport_id <= pci_num_vf(hdev->pdev); vport_id++) {
1452 		vport = &hdev->vport[vport_id];
1453 		kinfo = &vport->nic.kinfo;
1454 
1455 		dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id);
1456 
1457 		for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1458 			u16 qsid = vport->qs_offset + i;
1459 
1460 			hclge_dbg_dump_qs_shaper_single(hdev, qsid);
1461 		}
1462 	}
1463 }
1464 
1465 static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
1466 				     const char *cmd_buf)
1467 {
1468 #define HCLGE_MAX_QSET_NUM 1024
1469 
1470 	u16 qsid;
1471 	int ret;
1472 
1473 	ret = kstrtou16(cmd_buf, 0, &qsid);
1474 	if (ret) {
1475 		hclge_dbg_dump_qs_shaper_all(hdev);
1476 		return;
1477 	}
1478 
1479 	if (qsid >= HCLGE_MAX_QSET_NUM) {
1480 		dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-1023]\n",
1481 			qsid);
1482 		return;
1483 	}
1484 
1485 	hclge_dbg_dump_qs_shaper_single(hdev, qsid);
1486 }
1487 
1488 static int hclge_dbg_dump_mac_list(struct hclge_dev *hdev, const char *cmd_buf,
1489 				   bool is_unicast)
1490 {
1491 	struct hclge_mac_node *mac_node, *tmp;
1492 	struct hclge_vport *vport;
1493 	struct list_head *list;
1494 	u32 func_id;
1495 	int ret;
1496 
1497 	ret = kstrtouint(cmd_buf, 0, &func_id);
1498 	if (ret < 0) {
1499 		dev_err(&hdev->pdev->dev,
1500 			"dump mac list: bad command string, ret = %d\n", ret);
1501 		return -EINVAL;
1502 	}
1503 
1504 	if (func_id >= hdev->num_alloc_vport) {
1505 		dev_err(&hdev->pdev->dev,
1506 			"function id(%u) is out of range(0-%u)\n", func_id,
1507 			hdev->num_alloc_vport - 1);
1508 		return -EINVAL;
1509 	}
1510 
1511 	vport = &hdev->vport[func_id];
1512 
1513 	list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
1514 
1515 	dev_info(&hdev->pdev->dev, "vport %u %s mac list:\n",
1516 		 func_id, is_unicast ? "uc" : "mc");
1517 	dev_info(&hdev->pdev->dev, "mac address              state\n");
1518 
1519 	spin_lock_bh(&vport->mac_list_lock);
1520 
1521 	list_for_each_entry_safe(mac_node, tmp, list, node) {
1522 		dev_info(&hdev->pdev->dev, "%pM         %d\n",
1523 			 mac_node->mac_addr, mac_node->state);
1524 	}
1525 
1526 	spin_unlock_bh(&vport->mac_list_lock);
1527 
1528 	return 0;
1529 }
1530 
1531 int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
1532 {
1533 #define DUMP_REG	"dump reg"
1534 #define DUMP_TM_MAP	"dump tm map"
1535 #define DUMP_LOOPBACK	"dump loopback"
1536 #define DUMP_INTERRUPT	"dump intr"
1537 
1538 	struct hclge_vport *vport = hclge_get_vport(handle);
1539 	struct hclge_dev *hdev = vport->back;
1540 
1541 	if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) {
1542 		hclge_dbg_fd_tcam(hdev);
1543 	} else if (strncmp(cmd_buf, "dump tc", 7) == 0) {
1544 		hclge_dbg_dump_tc(hdev);
1545 	} else if (strncmp(cmd_buf, DUMP_TM_MAP, strlen(DUMP_TM_MAP)) == 0) {
1546 		hclge_dbg_dump_tm_map(hdev, &cmd_buf[sizeof(DUMP_TM_MAP)]);
1547 	} else if (strncmp(cmd_buf, "dump tm", 7) == 0) {
1548 		hclge_dbg_dump_tm(hdev);
1549 	} else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) {
1550 		hclge_dbg_dump_qos_pause_cfg(hdev);
1551 	} else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) {
1552 		hclge_dbg_dump_qos_pri_map(hdev);
1553 	} else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) {
1554 		hclge_dbg_dump_qos_buf_cfg(hdev);
1555 	} else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) {
1556 		hclge_dbg_dump_mng_table(hdev);
1557 	} else if (strncmp(cmd_buf, DUMP_REG, strlen(DUMP_REG)) == 0) {
1558 		hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]);
1559 	} else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
1560 		hclge_dbg_dump_rst_info(hdev);
1561 	} else if (strncmp(cmd_buf, "dump serv info", 14) == 0) {
1562 		hclge_dbg_dump_serv_info(hdev);
1563 	} else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) {
1564 		hclge_dbg_get_m7_stats_info(hdev);
1565 	} else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
1566 		hclge_dbg_dump_ncl_config(hdev,
1567 					  &cmd_buf[sizeof("dump ncl_config")]);
1568 	} else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
1569 		hclge_dbg_dump_mac_tnl_status(hdev);
1570 	} else if (strncmp(cmd_buf, DUMP_LOOPBACK,
1571 		   strlen(DUMP_LOOPBACK)) == 0) {
1572 		hclge_dbg_dump_loopback(hdev, &cmd_buf[sizeof(DUMP_LOOPBACK)]);
1573 	} else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
1574 		hclge_dbg_dump_qs_shaper(hdev,
1575 					 &cmd_buf[sizeof("dump qs shaper")]);
1576 	} else if (strncmp(cmd_buf, "dump uc mac list", 16) == 0) {
1577 		hclge_dbg_dump_mac_list(hdev,
1578 					&cmd_buf[sizeof("dump uc mac list")],
1579 					true);
1580 	} else if (strncmp(cmd_buf, "dump mc mac list", 16) == 0) {
1581 		hclge_dbg_dump_mac_list(hdev,
1582 					&cmd_buf[sizeof("dump mc mac list")],
1583 					false);
1584 	} else if (strncmp(cmd_buf, DUMP_INTERRUPT,
1585 		   strlen(DUMP_INTERRUPT)) == 0) {
1586 		hclge_dbg_dump_interrupt(hdev);
1587 	} else {
1588 		dev_info(&hdev->pdev->dev, "unknown command\n");
1589 		return -EINVAL;
1590 	}
1591 
1592 	return 0;
1593 }
1594