1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018-2019 Hisilicon Limited. */
3 
4 #include <linux/device.h>
5 
6 #include "hclge_debugfs.h"
7 #include "hclge_err.h"
8 #include "hclge_main.h"
9 #include "hclge_tm.h"
10 #include "hnae3.h"
11 
12 static const char * const state_str[] = { "off", "on" };
13 static const char * const hclge_mac_state_str[] = {
14 	"TO_ADD", "TO_DEL", "ACTIVE"
15 };
16 
17 static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" };
18 
19 static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
20 	{ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
21 	  .dfx_msg = &hclge_dbg_bios_common_reg[0],
22 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
23 		       .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
24 		       .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
25 	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
26 	  .dfx_msg = &hclge_dbg_ssu_reg_0[0],
27 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
28 		       .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
29 		       .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
30 	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
31 	  .dfx_msg = &hclge_dbg_ssu_reg_1[0],
32 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
33 		       .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
34 		       .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
35 	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
36 	  .dfx_msg = &hclge_dbg_ssu_reg_2[0],
37 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
38 		       .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
39 		       .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
40 	{ .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
41 	  .dfx_msg = &hclge_dbg_igu_egu_reg[0],
42 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
43 		       .offset = HCLGE_DBG_DFX_IGU_OFFSET,
44 		       .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
45 	{ .cmd = HNAE3_DBG_CMD_REG_RPU,
46 	  .dfx_msg = &hclge_dbg_rpu_reg_0[0],
47 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
48 		       .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
49 		       .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
50 	{ .cmd = HNAE3_DBG_CMD_REG_RPU,
51 	  .dfx_msg = &hclge_dbg_rpu_reg_1[0],
52 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
53 		       .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
54 		       .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
55 	{ .cmd = HNAE3_DBG_CMD_REG_NCSI,
56 	  .dfx_msg = &hclge_dbg_ncsi_reg[0],
57 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
58 		       .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
59 		       .cmd = HCLGE_OPC_DFX_NCSI_REG } },
60 	{ .cmd = HNAE3_DBG_CMD_REG_RTC,
61 	  .dfx_msg = &hclge_dbg_rtc_reg[0],
62 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
63 		       .offset = HCLGE_DBG_DFX_RTC_OFFSET,
64 		       .cmd = HCLGE_OPC_DFX_RTC_REG } },
65 	{ .cmd = HNAE3_DBG_CMD_REG_PPP,
66 	  .dfx_msg = &hclge_dbg_ppp_reg[0],
67 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
68 		       .offset = HCLGE_DBG_DFX_PPP_OFFSET,
69 		       .cmd = HCLGE_OPC_DFX_PPP_REG } },
70 	{ .cmd = HNAE3_DBG_CMD_REG_RCB,
71 	  .dfx_msg = &hclge_dbg_rcb_reg[0],
72 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
73 		       .offset = HCLGE_DBG_DFX_RCB_OFFSET,
74 		       .cmd = HCLGE_OPC_DFX_RCB_REG } },
75 	{ .cmd = HNAE3_DBG_CMD_REG_TQP,
76 	  .dfx_msg = &hclge_dbg_tqp_reg[0],
77 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
78 		       .offset = HCLGE_DBG_DFX_TQP_OFFSET,
79 		       .cmd = HCLGE_OPC_DFX_TQP_REG } },
80 };
81 
82 /* make sure: len(name) + interval >= maxlen(item data) + 2,
83  * for example, name = "pkt_num"(len: 7), the prototype of item data is u32,
84  * and print as "%u"(maxlen: 10), so the interval should be at least 5.
85  */
86 static void hclge_dbg_fill_content(char *content, u16 len,
87 				   const struct hclge_dbg_item *items,
88 				   const char **result, u16 size)
89 {
90 	char *pos = content;
91 	u16 i;
92 
93 	memset(content, ' ', len);
94 	for (i = 0; i < size; i++) {
95 		if (result)
96 			strncpy(pos, result[i], strlen(result[i]));
97 		else
98 			strncpy(pos, items[i].name, strlen(items[i].name));
99 		pos += strlen(items[i].name) + items[i].interval;
100 	}
101 	*pos++ = '\n';
102 	*pos++ = '\0';
103 }
104 
105 static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
106 {
107 	if (id)
108 		sprintf(buf, "vf%u", id - 1U);
109 	else
110 		sprintf(buf, "pf");
111 
112 	return buf;
113 }
114 
115 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset,
116 				    u32 *bd_num)
117 {
118 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
119 	int entries_per_desc;
120 	int index;
121 	int ret;
122 
123 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
124 	if (ret) {
125 		dev_err(&hdev->pdev->dev,
126 			"failed to get dfx bd_num, offset = %d, ret = %d\n",
127 			offset, ret);
128 		return ret;
129 	}
130 
131 	entries_per_desc = ARRAY_SIZE(desc[0].data);
132 	index = offset % entries_per_desc;
133 
134 	*bd_num = le32_to_cpu(desc[offset / entries_per_desc].data[index]);
135 	if (!(*bd_num)) {
136 		dev_err(&hdev->pdev->dev, "The value of dfx bd_num is 0!\n");
137 		return -EINVAL;
138 	}
139 
140 	return 0;
141 }
142 
143 static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
144 			      struct hclge_desc *desc_src,
145 			      int index, int bd_num,
146 			      enum hclge_opcode_type cmd)
147 {
148 	struct hclge_desc *desc = desc_src;
149 	int ret, i;
150 
151 	hclge_cmd_setup_basic_desc(desc, cmd, true);
152 	desc->data[0] = cpu_to_le32(index);
153 
154 	for (i = 1; i < bd_num; i++) {
155 		desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
156 		desc++;
157 		hclge_cmd_setup_basic_desc(desc, cmd, true);
158 	}
159 
160 	ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
161 	if (ret)
162 		dev_err(&hdev->pdev->dev,
163 			"cmd(0x%x) send fail, ret = %d\n", cmd, ret);
164 	return ret;
165 }
166 
167 static int
168 hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
169 		       const struct hclge_dbg_reg_type_info *reg_info,
170 		       char *buf, int len, int *pos)
171 {
172 	const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
173 	const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
174 	struct hclge_desc *desc_src;
175 	u32 index, entry, i, cnt;
176 	int bd_num, min_num, ret;
177 	struct hclge_desc *desc;
178 
179 	ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
180 	if (ret)
181 		return ret;
182 
183 	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
184 	if (!desc_src)
185 		return -ENOMEM;
186 
187 	min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
188 
189 	for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
190 		*pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n",
191 				  cnt++, dfx_message->message);
192 
193 	for (i = 0; i < cnt; i++)
194 		*pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i);
195 
196 	*pos += scnprintf(buf + *pos, len - *pos, "\n");
197 
198 	for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
199 		dfx_message = reg_info->dfx_msg;
200 		desc = desc_src;
201 		ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num,
202 					 reg_msg->cmd);
203 		if (ret)
204 			break;
205 
206 		for (i = 0; i < min_num; i++, dfx_message++) {
207 			entry = i % HCLGE_DESC_DATA_LEN;
208 			if (i > 0 && !entry)
209 				desc++;
210 
211 			*pos += scnprintf(buf + *pos, len - *pos, "%#x\t",
212 					  le32_to_cpu(desc->data[entry]));
213 		}
214 		*pos += scnprintf(buf + *pos, len - *pos, "\n");
215 	}
216 
217 	kfree(desc_src);
218 	return ret;
219 }
220 
221 static int
222 hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
223 			  const struct hclge_dbg_reg_type_info *reg_info,
224 			  char *buf, int len, int *pos)
225 {
226 	const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
227 	const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
228 	struct hclge_desc *desc_src;
229 	int bd_num, min_num, ret;
230 	struct hclge_desc *desc;
231 	u32 entry, i;
232 
233 	ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
234 	if (ret)
235 		return ret;
236 
237 	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
238 	if (!desc_src)
239 		return -ENOMEM;
240 
241 	desc = desc_src;
242 
243 	ret = hclge_dbg_cmd_send(hdev, desc, 0, bd_num, reg_msg->cmd);
244 	if (ret) {
245 		kfree(desc);
246 		return ret;
247 	}
248 
249 	min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
250 
251 	for (i = 0; i < min_num; i++, dfx_message++) {
252 		entry = i % HCLGE_DESC_DATA_LEN;
253 		if (i > 0 && !entry)
254 			desc++;
255 		if (!dfx_message->flag)
256 			continue;
257 
258 		*pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
259 				  dfx_message->message,
260 				  le32_to_cpu(desc->data[entry]));
261 	}
262 
263 	kfree(desc_src);
264 	return 0;
265 }
266 
267 static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
268 	{HCLGE_MAC_TX_EN_B,  "mac_trans_en"},
269 	{HCLGE_MAC_RX_EN_B,  "mac_rcv_en"},
270 	{HCLGE_MAC_PAD_TX_B, "pad_trans_en"},
271 	{HCLGE_MAC_PAD_RX_B, "pad_rcv_en"},
272 	{HCLGE_MAC_1588_TX_B, "1588_trans_en"},
273 	{HCLGE_MAC_1588_RX_B, "1588_rcv_en"},
274 	{HCLGE_MAC_APP_LP_B,  "mac_app_loop_en"},
275 	{HCLGE_MAC_LINE_LP_B, "mac_line_loop_en"},
276 	{HCLGE_MAC_FCS_TX_B,  "mac_fcs_tx_en"},
277 	{HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, "mac_rx_oversize_truncate_en"},
278 	{HCLGE_MAC_RX_FCS_STRIP_B, "mac_rx_fcs_strip_en"},
279 	{HCLGE_MAC_RX_FCS_B, "mac_rx_fcs_en"},
280 	{HCLGE_MAC_TX_UNDER_MIN_ERR_B, "mac_tx_under_min_err_en"},
281 	{HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
282 };
283 
284 static int  hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
285 					     int len, int *pos)
286 {
287 	struct hclge_config_mac_mode_cmd *req;
288 	struct hclge_desc desc;
289 	u32 loop_en, i, offset;
290 	int ret;
291 
292 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
293 
294 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
295 	if (ret) {
296 		dev_err(&hdev->pdev->dev,
297 			"failed to dump mac enable status, ret = %d\n", ret);
298 		return ret;
299 	}
300 
301 	req = (struct hclge_config_mac_mode_cmd *)desc.data;
302 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
303 
304 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
305 		offset = hclge_dbg_mac_en_status[i].offset;
306 		*pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
307 				  hclge_dbg_mac_en_status[i].message,
308 				  hnae3_get_bit(loop_en, offset));
309 	}
310 
311 	return 0;
312 }
313 
314 static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
315 					 int len, int *pos)
316 {
317 	struct hclge_config_max_frm_size_cmd *req;
318 	struct hclge_desc desc;
319 	int ret;
320 
321 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
322 
323 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
324 	if (ret) {
325 		dev_err(&hdev->pdev->dev,
326 			"failed to dump mac frame size, ret = %d\n", ret);
327 		return ret;
328 	}
329 
330 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
331 
332 	*pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n",
333 			  le16_to_cpu(req->max_frm_size));
334 	*pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n",
335 			  req->min_frm_size);
336 
337 	return 0;
338 }
339 
340 static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
341 					   int len, int *pos)
342 {
343 #define HCLGE_MAC_SPEED_SHIFT	0
344 #define HCLGE_MAC_SPEED_MASK	GENMASK(5, 0)
345 #define HCLGE_MAC_DUPLEX_SHIFT	7
346 
347 	struct hclge_config_mac_speed_dup_cmd *req;
348 	struct hclge_desc desc;
349 	int ret;
350 
351 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
352 
353 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
354 	if (ret) {
355 		dev_err(&hdev->pdev->dev,
356 			"failed to dump mac speed duplex, ret = %d\n", ret);
357 		return ret;
358 	}
359 
360 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
361 
362 	*pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n",
363 			  hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
364 					  HCLGE_MAC_SPEED_SHIFT));
365 	*pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n",
366 			  hnae3_get_bit(req->speed_dup,
367 					HCLGE_MAC_DUPLEX_SHIFT));
368 	return 0;
369 }
370 
371 static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
372 {
373 	int pos = 0;
374 	int ret;
375 
376 	ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos);
377 	if (ret)
378 		return ret;
379 
380 	ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos);
381 	if (ret)
382 		return ret;
383 
384 	return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos);
385 }
386 
387 static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
388 				   int *pos)
389 {
390 	struct hclge_dbg_bitmap_cmd req;
391 	struct hclge_desc desc;
392 	u16 qset_id, qset_num;
393 	int ret;
394 
395 	ret = hclge_tm_get_qset_num(hdev, &qset_num);
396 	if (ret)
397 		return ret;
398 
399 	*pos += scnprintf(buf + *pos, len - *pos,
400 			  "qset_id  roce_qset_mask  nic_qset_mask  qset_shaping_pass  qset_bp_status\n");
401 	for (qset_id = 0; qset_id < qset_num; qset_id++) {
402 		ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
403 					 HCLGE_OPC_QSET_DFX_STS);
404 		if (ret)
405 			return ret;
406 
407 		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
408 
409 		*pos += scnprintf(buf + *pos, len - *pos,
410 				  "%04u           %#x            %#x             %#x               %#x\n",
411 				  qset_id, req.bit0, req.bit1, req.bit2,
412 				  req.bit3);
413 	}
414 
415 	return 0;
416 }
417 
418 static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
419 				  int *pos)
420 {
421 	struct hclge_dbg_bitmap_cmd req;
422 	struct hclge_desc desc;
423 	u8 pri_id, pri_num;
424 	int ret;
425 
426 	ret = hclge_tm_get_pri_num(hdev, &pri_num);
427 	if (ret)
428 		return ret;
429 
430 	*pos += scnprintf(buf + *pos, len - *pos,
431 			  "pri_id  pri_mask  pri_cshaping_pass  pri_pshaping_pass\n");
432 	for (pri_id = 0; pri_id < pri_num; pri_id++) {
433 		ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
434 					 HCLGE_OPC_PRI_DFX_STS);
435 		if (ret)
436 			return ret;
437 
438 		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
439 
440 		*pos += scnprintf(buf + *pos, len - *pos,
441 				  "%03u       %#x           %#x                %#x\n",
442 				  pri_id, req.bit0, req.bit1, req.bit2);
443 	}
444 
445 	return 0;
446 }
447 
448 static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
449 				 int *pos)
450 {
451 	struct hclge_dbg_bitmap_cmd req;
452 	struct hclge_desc desc;
453 	u8 pg_id;
454 	int ret;
455 
456 	*pos += scnprintf(buf + *pos, len - *pos,
457 			  "pg_id  pg_mask  pg_cshaping_pass  pg_pshaping_pass\n");
458 	for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
459 		ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
460 					 HCLGE_OPC_PG_DFX_STS);
461 		if (ret)
462 			return ret;
463 
464 		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
465 
466 		*pos += scnprintf(buf + *pos, len - *pos,
467 				  "%03u      %#x           %#x               %#x\n",
468 				  pg_id, req.bit0, req.bit1, req.bit2);
469 	}
470 
471 	return 0;
472 }
473 
474 static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
475 				    int *pos)
476 {
477 	struct hclge_desc desc;
478 	u16 nq_id;
479 	int ret;
480 
481 	*pos += scnprintf(buf + *pos, len - *pos,
482 			  "nq_id  sch_nic_queue_cnt  sch_roce_queue_cnt\n");
483 	for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
484 		ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
485 					 HCLGE_OPC_SCH_NQ_CNT);
486 		if (ret)
487 			return ret;
488 
489 		*pos += scnprintf(buf + *pos, len - *pos, "%04u           %#x",
490 				  nq_id, le32_to_cpu(desc.data[1]));
491 
492 		ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
493 					 HCLGE_OPC_SCH_RQ_CNT);
494 		if (ret)
495 			return ret;
496 
497 		*pos += scnprintf(buf + *pos, len - *pos,
498 				  "               %#x\n",
499 				  le32_to_cpu(desc.data[1]));
500 	}
501 
502 	return 0;
503 }
504 
505 static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
506 				   int *pos)
507 {
508 	struct hclge_dbg_bitmap_cmd req;
509 	struct hclge_desc desc;
510 	u8 port_id = 0;
511 	int ret;
512 
513 	ret = hclge_dbg_cmd_send(hdev, &desc, port_id, 1,
514 				 HCLGE_OPC_PORT_DFX_STS);
515 	if (ret)
516 		return ret;
517 
518 	req.bitmap = (u8)le32_to_cpu(desc.data[1]);
519 
520 	*pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
521 			 req.bit0);
522 	*pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
523 			 req.bit1);
524 
525 	return 0;
526 }
527 
528 static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
529 				 int *pos)
530 {
531 	struct hclge_desc desc[2];
532 	u8 port_id = 0;
533 	int ret;
534 
535 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
536 				 HCLGE_OPC_TM_INTERNAL_CNT);
537 	if (ret)
538 		return ret;
539 
540 	*pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n",
541 			  le32_to_cpu(desc[0].data[1]));
542 	*pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n",
543 			  le32_to_cpu(desc[0].data[2]));
544 
545 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
546 				 HCLGE_OPC_TM_INTERNAL_STS);
547 	if (ret)
548 		return ret;
549 
550 	*pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n",
551 			  le32_to_cpu(desc[0].data[1]));
552 	*pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n",
553 			  le32_to_cpu(desc[0].data[2]));
554 	*pos += scnprintf(buf + *pos, len - *pos,
555 			  "sch_roce_fifo_afull_gap: %#x\n",
556 			  le32_to_cpu(desc[0].data[3]));
557 	*pos += scnprintf(buf + *pos, len - *pos,
558 			  "tx_private_waterline: %#x\n",
559 			  le32_to_cpu(desc[0].data[4]));
560 	*pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n",
561 			  le32_to_cpu(desc[0].data[5]));
562 	*pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n",
563 			  le32_to_cpu(desc[1].data[0]));
564 	*pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n",
565 			  le32_to_cpu(desc[1].data[1]));
566 
567 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
568 		return 0;
569 
570 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
571 				 HCLGE_OPC_TM_INTERNAL_STS_1);
572 	if (ret)
573 		return ret;
574 
575 	*pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n",
576 			  le32_to_cpu(desc[0].data[1]));
577 	*pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n",
578 			  le32_to_cpu(desc[0].data[2]));
579 	*pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n",
580 			  le32_to_cpu(desc[0].data[3]));
581 	*pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n",
582 			  le32_to_cpu(desc[0].data[4]));
583 	*pos += scnprintf(buf + *pos, len - *pos,
584 			  "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
585 			  le32_to_cpu(desc[0].data[5]));
586 
587 	return 0;
588 }
589 
590 static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len)
591 {
592 	int pos = 0;
593 	int ret;
594 
595 	ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos);
596 	if (ret)
597 		return ret;
598 
599 	ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos);
600 	if (ret)
601 		return ret;
602 
603 	ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos);
604 	if (ret)
605 		return ret;
606 
607 	ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos);
608 	if (ret)
609 		return ret;
610 
611 	ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos);
612 	if (ret)
613 		return ret;
614 
615 	return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos);
616 }
617 
618 static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
619 				  enum hnae3_dbg_cmd cmd, char *buf, int len)
620 {
621 	const struct hclge_dbg_reg_type_info *reg_info;
622 	int pos = 0, ret = 0;
623 	int i;
624 
625 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
626 		reg_info = &hclge_dbg_reg_info[i];
627 		if (cmd == reg_info->cmd) {
628 			if (cmd == HNAE3_DBG_CMD_REG_TQP)
629 				return hclge_dbg_dump_reg_tqp(hdev, reg_info,
630 							      buf, len, &pos);
631 
632 			ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf,
633 							len, &pos);
634 			if (ret)
635 				break;
636 		}
637 	}
638 
639 	return ret;
640 }
641 
642 static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
643 {
644 	struct hclge_ets_tc_weight_cmd *ets_weight;
645 	struct hclge_desc desc;
646 	char *sch_mode_str;
647 	int pos = 0;
648 	int ret;
649 	u8 i;
650 
651 	if (!hnae3_dev_dcb_supported(hdev)) {
652 		dev_err(&hdev->pdev->dev,
653 			"Only DCB-supported dev supports tc\n");
654 		return -EOPNOTSUPP;
655 	}
656 
657 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
658 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
659 	if (ret) {
660 		dev_err(&hdev->pdev->dev, "failed to get tc weight, ret = %d\n",
661 			ret);
662 		return ret;
663 	}
664 
665 	ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
666 
667 	pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n",
668 			 hdev->tm_info.num_tc);
669 	pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n",
670 			 ets_weight->weight_offset);
671 
672 	pos += scnprintf(buf + pos, len - pos, "TC    MODE  WEIGHT\n");
673 	for (i = 0; i < HNAE3_MAX_TC; i++) {
674 		sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
675 		pos += scnprintf(buf + pos, len - pos, "%u     %4s    %3u\n",
676 				 i, sch_mode_str,
677 				 hdev->tm_info.pg_info[0].tc_dwrr[i]);
678 	}
679 
680 	return 0;
681 }
682 
683 static const struct hclge_dbg_item tm_pg_items[] = {
684 	{ "ID", 2 },
685 	{ "PRI_MAP", 2 },
686 	{ "MODE", 2 },
687 	{ "DWRR", 2 },
688 	{ "C_IR_B", 2 },
689 	{ "C_IR_U", 2 },
690 	{ "C_IR_S", 2 },
691 	{ "C_BS_B", 2 },
692 	{ "C_BS_S", 2 },
693 	{ "C_FLAG", 2 },
694 	{ "C_RATE(Mbps)", 2 },
695 	{ "P_IR_B", 2 },
696 	{ "P_IR_U", 2 },
697 	{ "P_IR_S", 2 },
698 	{ "P_BS_B", 2 },
699 	{ "P_BS_S", 2 },
700 	{ "P_FLAG", 2 },
701 	{ "P_RATE(Mbps)", 0 }
702 };
703 
704 static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
705 					  char **result, u8 *index)
706 {
707 	sprintf(result[(*index)++], "%3u", para->ir_b);
708 	sprintf(result[(*index)++], "%3u", para->ir_u);
709 	sprintf(result[(*index)++], "%3u", para->ir_s);
710 	sprintf(result[(*index)++], "%3u", para->bs_b);
711 	sprintf(result[(*index)++], "%3u", para->bs_s);
712 	sprintf(result[(*index)++], "%3u", para->flag);
713 	sprintf(result[(*index)++], "%6u", para->rate);
714 }
715 
716 static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
717 				  char *buf, int len)
718 {
719 	struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
720 	char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
721 	u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
722 	char content[HCLGE_DBG_TM_INFO_LEN];
723 	int pos = 0;
724 	int ret;
725 
726 	for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
727 		result[i] = data_str;
728 		data_str += HCLGE_DBG_DATA_STR_LEN;
729 	}
730 
731 	hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
732 			       NULL, ARRAY_SIZE(tm_pg_items));
733 	pos += scnprintf(buf + pos, len - pos, "%s", content);
734 
735 	for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
736 		ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
737 		if (ret)
738 			return ret;
739 
740 		ret = hclge_tm_get_pg_sch_mode(hdev, pg_id, &sch_mode);
741 		if (ret)
742 			return ret;
743 
744 		ret = hclge_tm_get_pg_weight(hdev, pg_id, &weight);
745 		if (ret)
746 			return ret;
747 
748 		ret = hclge_tm_get_pg_shaper(hdev, pg_id,
749 					     HCLGE_OPC_TM_PG_C_SHAPPING,
750 					     &c_shaper_para);
751 		if (ret)
752 			return ret;
753 
754 		ret = hclge_tm_get_pg_shaper(hdev, pg_id,
755 					     HCLGE_OPC_TM_PG_P_SHAPPING,
756 					     &p_shaper_para);
757 		if (ret)
758 			return ret;
759 
760 		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
761 				       "sp";
762 
763 		j = 0;
764 		sprintf(result[j++], "%02u", pg_id);
765 		sprintf(result[j++], "0x%02x", pri_bit_map);
766 		sprintf(result[j++], "%4s", sch_mode_str);
767 		sprintf(result[j++], "%3u", weight);
768 		hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
769 		hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
770 
771 		hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
772 				       (const char **)result,
773 				       ARRAY_SIZE(tm_pg_items));
774 		pos += scnprintf(buf + pos, len - pos, "%s", content);
775 	}
776 
777 	return 0;
778 }
779 
780 static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
781 {
782 	char *data_str;
783 	int ret;
784 
785 	data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
786 			   HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
787 	if (!data_str)
788 		return -ENOMEM;
789 
790 	ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
791 
792 	kfree(data_str);
793 
794 	return ret;
795 }
796 
797 static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev,  char *buf, int len)
798 {
799 	struct hclge_tm_shaper_para shaper_para;
800 	int pos = 0;
801 	int ret;
802 
803 	ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
804 	if (ret)
805 		return ret;
806 
807 	pos += scnprintf(buf + pos, len - pos,
808 			 "IR_B  IR_U  IR_S  BS_B  BS_S  FLAG  RATE(Mbps)\n");
809 	pos += scnprintf(buf + pos, len - pos,
810 			 "%3u   %3u   %3u   %3u   %3u     %1u   %6u\n",
811 			 shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
812 			 shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
813 			 shaper_para.rate);
814 
815 	return 0;
816 }
817 
818 static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
819 					 char *buf, int len)
820 {
821 	u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
822 	struct hclge_bp_to_qs_map_cmd *map;
823 	struct hclge_desc desc;
824 	int pos = 0;
825 	u8 group_id;
826 	u8 grp_num;
827 	u16 i = 0;
828 	int ret;
829 
830 	grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ?
831 		  HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM;
832 	map = (struct hclge_bp_to_qs_map_cmd *)desc.data;
833 	for (group_id = 0; group_id < grp_num; group_id++) {
834 		hclge_cmd_setup_basic_desc(&desc,
835 					   HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
836 					   true);
837 		map->tc_id = tc_id;
838 		map->qs_group_id = group_id;
839 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
840 		if (ret) {
841 			dev_err(&hdev->pdev->dev,
842 				"failed to get bp to qset map, ret = %d\n",
843 				ret);
844 			return ret;
845 		}
846 
847 		qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
848 	}
849 
850 	pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n");
851 	for (group_id = 0; group_id < grp_num / 8; group_id++) {
852 		pos += scnprintf(buf + pos, len - pos,
853 			 "%04d  | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
854 			 group_id * 256, qset_mapping[i + 7],
855 			 qset_mapping[i + 6], qset_mapping[i + 5],
856 			 qset_mapping[i + 4], qset_mapping[i + 3],
857 			 qset_mapping[i + 2], qset_mapping[i + 1],
858 			 qset_mapping[i]);
859 		i += 8;
860 	}
861 
862 	return pos;
863 }
864 
865 static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
866 {
867 	u16 queue_id;
868 	u16 qset_id;
869 	u8 link_vld;
870 	int pos = 0;
871 	u8 pri_id;
872 	u8 tc_id;
873 	int ret;
874 
875 	for (queue_id = 0; queue_id < hdev->num_tqps; queue_id++) {
876 		ret = hclge_tm_get_q_to_qs_map(hdev, queue_id, &qset_id);
877 		if (ret)
878 			return ret;
879 
880 		ret = hclge_tm_get_qset_map_pri(hdev, qset_id, &pri_id,
881 						&link_vld);
882 		if (ret)
883 			return ret;
884 
885 		ret = hclge_tm_get_q_to_tc(hdev, queue_id, &tc_id);
886 		if (ret)
887 			return ret;
888 
889 		pos += scnprintf(buf + pos, len - pos,
890 				 "QUEUE_ID   QSET_ID   PRI_ID   TC_ID\n");
891 		pos += scnprintf(buf + pos, len - pos,
892 				 "%04u        %4u       %3u      %2u\n",
893 				 queue_id, qset_id, pri_id, tc_id);
894 
895 		if (!hnae3_dev_dcb_supported(hdev))
896 			continue;
897 
898 		ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos,
899 						    len - pos);
900 		if (ret < 0)
901 			return ret;
902 		pos += ret;
903 
904 		pos += scnprintf(buf + pos, len - pos, "\n");
905 	}
906 
907 	return 0;
908 }
909 
910 static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
911 {
912 	struct hclge_tm_nodes_cmd *nodes;
913 	struct hclge_desc desc;
914 	int pos = 0;
915 	int ret;
916 
917 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
918 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
919 	if (ret) {
920 		dev_err(&hdev->pdev->dev,
921 			"failed to dump tm nodes, ret = %d\n", ret);
922 		return ret;
923 	}
924 
925 	nodes = (struct hclge_tm_nodes_cmd *)desc.data;
926 
927 	pos += scnprintf(buf + pos, len - pos, "       BASE_ID  MAX_NUM\n");
928 	pos += scnprintf(buf + pos, len - pos, "PG      %4u      %4u\n",
929 			 nodes->pg_base_id, nodes->pg_num);
930 	pos += scnprintf(buf + pos, len - pos, "PRI     %4u      %4u\n",
931 			 nodes->pri_base_id, nodes->pri_num);
932 	pos += scnprintf(buf + pos, len - pos, "QSET    %4u      %4u\n",
933 			 le16_to_cpu(nodes->qset_base_id),
934 			 le16_to_cpu(nodes->qset_num));
935 	pos += scnprintf(buf + pos, len - pos, "QUEUE   %4u      %4u\n",
936 			 le16_to_cpu(nodes->queue_base_id),
937 			 le16_to_cpu(nodes->queue_num));
938 
939 	return 0;
940 }
941 
942 static const struct hclge_dbg_item tm_pri_items[] = {
943 	{ "ID", 4 },
944 	{ "MODE", 2 },
945 	{ "DWRR", 2 },
946 	{ "C_IR_B", 2 },
947 	{ "C_IR_U", 2 },
948 	{ "C_IR_S", 2 },
949 	{ "C_BS_B", 2 },
950 	{ "C_BS_S", 2 },
951 	{ "C_FLAG", 2 },
952 	{ "C_RATE(Mbps)", 2 },
953 	{ "P_IR_B", 2 },
954 	{ "P_IR_U", 2 },
955 	{ "P_IR_S", 2 },
956 	{ "P_BS_B", 2 },
957 	{ "P_BS_S", 2 },
958 	{ "P_FLAG", 2 },
959 	{ "P_RATE(Mbps)", 0 }
960 };
961 
962 static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
963 {
964 	char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN];
965 	struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
966 	char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
967 	char content[HCLGE_DBG_TM_INFO_LEN];
968 	u8 pri_num, sch_mode, weight, i, j;
969 	int pos, ret;
970 
971 	ret = hclge_tm_get_pri_num(hdev, &pri_num);
972 	if (ret)
973 		return ret;
974 
975 	for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
976 		result[i] = &data_str[i][0];
977 
978 	hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
979 			       NULL, ARRAY_SIZE(tm_pri_items));
980 	pos = scnprintf(buf, len, "%s", content);
981 
982 	for (i = 0; i < pri_num; i++) {
983 		ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
984 		if (ret)
985 			return ret;
986 
987 		ret = hclge_tm_get_pri_weight(hdev, i, &weight);
988 		if (ret)
989 			return ret;
990 
991 		ret = hclge_tm_get_pri_shaper(hdev, i,
992 					      HCLGE_OPC_TM_PRI_C_SHAPPING,
993 					      &c_shaper_para);
994 		if (ret)
995 			return ret;
996 
997 		ret = hclge_tm_get_pri_shaper(hdev, i,
998 					      HCLGE_OPC_TM_PRI_P_SHAPPING,
999 					      &p_shaper_para);
1000 		if (ret)
1001 			return ret;
1002 
1003 		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1004 			       "sp";
1005 
1006 		j = 0;
1007 		sprintf(result[j++], "%04u", i);
1008 		sprintf(result[j++], "%4s", sch_mode_str);
1009 		sprintf(result[j++], "%3u", weight);
1010 		hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
1011 		hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
1012 		hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
1013 				       (const char **)result,
1014 				       ARRAY_SIZE(tm_pri_items));
1015 		pos += scnprintf(buf + pos, len - pos, "%s", content);
1016 	}
1017 
1018 	return 0;
1019 }
1020 
1021 static const struct hclge_dbg_item tm_qset_items[] = {
1022 	{ "ID", 4 },
1023 	{ "MAP_PRI", 2 },
1024 	{ "LINK_VLD", 2 },
1025 	{ "MODE", 2 },
1026 	{ "DWRR", 2 },
1027 	{ "IR_B", 2 },
1028 	{ "IR_U", 2 },
1029 	{ "IR_S", 2 },
1030 	{ "BS_B", 2 },
1031 	{ "BS_S", 2 },
1032 	{ "FLAG", 2 },
1033 	{ "RATE(Mbps)", 0 }
1034 };
1035 
1036 static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
1037 {
1038 	char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN];
1039 	char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str;
1040 	u8 priority, link_vld, sch_mode, weight;
1041 	struct hclge_tm_shaper_para shaper_para;
1042 	char content[HCLGE_DBG_TM_INFO_LEN];
1043 	u16 qset_num, i;
1044 	int ret, pos;
1045 	u8 j;
1046 
1047 	ret = hclge_tm_get_qset_num(hdev, &qset_num);
1048 	if (ret)
1049 		return ret;
1050 
1051 	for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++)
1052 		result[i] = &data_str[i][0];
1053 
1054 	hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1055 			       NULL, ARRAY_SIZE(tm_qset_items));
1056 	pos = scnprintf(buf, len, "%s", content);
1057 
1058 	for (i = 0; i < qset_num; i++) {
1059 		ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
1060 		if (ret)
1061 			return ret;
1062 
1063 		ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode);
1064 		if (ret)
1065 			return ret;
1066 
1067 		ret = hclge_tm_get_qset_weight(hdev, i, &weight);
1068 		if (ret)
1069 			return ret;
1070 
1071 		ret = hclge_tm_get_qset_shaper(hdev, i, &shaper_para);
1072 		if (ret)
1073 			return ret;
1074 
1075 		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1076 			       "sp";
1077 
1078 		j = 0;
1079 		sprintf(result[j++], "%04u", i);
1080 		sprintf(result[j++], "%4u", priority);
1081 		sprintf(result[j++], "%4u", link_vld);
1082 		sprintf(result[j++], "%4s", sch_mode_str);
1083 		sprintf(result[j++], "%3u", weight);
1084 		hclge_dbg_fill_shaper_content(&shaper_para, result, &j);
1085 
1086 		hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1087 				       (const char **)result,
1088 				       ARRAY_SIZE(tm_qset_items));
1089 		pos += scnprintf(buf + pos, len - pos, "%s", content);
1090 	}
1091 
1092 	return 0;
1093 }
1094 
1095 static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
1096 					int len)
1097 {
1098 	struct hclge_cfg_pause_param_cmd *pause_param;
1099 	struct hclge_desc desc;
1100 	int pos = 0;
1101 	int ret;
1102 
1103 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
1104 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1105 	if (ret) {
1106 		dev_err(&hdev->pdev->dev,
1107 			"failed to dump qos pause, ret = %d\n", ret);
1108 		return ret;
1109 	}
1110 
1111 	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
1112 
1113 	pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n",
1114 			 pause_param->pause_trans_gap);
1115 	pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n",
1116 			 le16_to_cpu(pause_param->pause_trans_time));
1117 	return 0;
1118 }
1119 
1120 #define HCLGE_DBG_TC_MASK		0x0F
1121 
1122 static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
1123 				      int len)
1124 {
1125 #define HCLGE_DBG_TC_BIT_WIDTH		4
1126 
1127 	struct hclge_qos_pri_map_cmd *pri_map;
1128 	struct hclge_desc desc;
1129 	int pos = 0;
1130 	u8 *pri_tc;
1131 	u8 tc, i;
1132 	int ret;
1133 
1134 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
1135 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1136 	if (ret) {
1137 		dev_err(&hdev->pdev->dev,
1138 			"failed to dump qos pri map, ret = %d\n", ret);
1139 		return ret;
1140 	}
1141 
1142 	pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
1143 
1144 	pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n",
1145 			 pri_map->vlan_pri);
1146 	pos += scnprintf(buf + pos, len - pos, "PRI  TC\n");
1147 
1148 	pri_tc = (u8 *)pri_map;
1149 	for (i = 0; i < HNAE3_MAX_TC; i++) {
1150 		tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
1151 		tc &= HCLGE_DBG_TC_MASK;
1152 		pos += scnprintf(buf + pos, len - pos, "%u     %u\n", i, tc);
1153 	}
1154 
1155 	return 0;
1156 }
1157 
1158 static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
1159 				       int len)
1160 {
1161 	struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
1162 	u8 *req0 = (u8 *)desc[0].data;
1163 	u8 *req1 = (u8 *)desc[1].data;
1164 	u8 dscp_tc[HCLGE_MAX_DSCP];
1165 	int pos, ret;
1166 	u8 i, j;
1167 
1168 	pos = scnprintf(buf, len, "tc map mode: %s\n",
1169 			tc_map_mode_str[hdev->vport[0].nic.kinfo.tc_map_mode]);
1170 
1171 	if (hdev->vport[0].nic.kinfo.tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
1172 		return 0;
1173 
1174 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, true);
1175 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1176 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, true);
1177 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
1178 	if (ret) {
1179 		dev_err(&hdev->pdev->dev,
1180 			"failed to dump qos dscp map, ret = %d\n", ret);
1181 		return ret;
1182 	}
1183 
1184 	pos += scnprintf(buf + pos, len - pos, "\nDSCP  PRIO  TC\n");
1185 
1186 	/* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
1187 	for (i = 0; i < HCLGE_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
1188 		j = i + HCLGE_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
1189 		/* Each dscp setting has 4 bits, so each byte saves two dscp
1190 		 * setting
1191 		 */
1192 		dscp_tc[i] = req0[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
1193 		dscp_tc[j] = req1[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
1194 		dscp_tc[i] &= HCLGE_DBG_TC_MASK;
1195 		dscp_tc[j] &= HCLGE_DBG_TC_MASK;
1196 	}
1197 
1198 	for (i = 0; i < HCLGE_MAX_DSCP; i++) {
1199 		if (hdev->tm_info.dscp_prio[i] == HCLGE_PRIO_ID_INVALID)
1200 			continue;
1201 
1202 		pos += scnprintf(buf + pos, len - pos, " %2u    %u    %u\n",
1203 				 i, hdev->tm_info.dscp_prio[i], dscp_tc[i]);
1204 	}
1205 
1206 	return 0;
1207 }
1208 
1209 static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
1210 {
1211 	struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
1212 	struct hclge_desc desc;
1213 	int pos = 0;
1214 	int i, ret;
1215 
1216 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
1217 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1218 	if (ret) {
1219 		dev_err(&hdev->pdev->dev,
1220 			"failed to dump tx buf, ret = %d\n", ret);
1221 		return ret;
1222 	}
1223 
1224 	tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1225 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1226 		pos += scnprintf(buf + pos, len - pos,
1227 				 "tx_packet_buf_tc_%d: 0x%x\n", i,
1228 				 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
1229 
1230 	return pos;
1231 }
1232 
1233 static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
1234 					  int len)
1235 {
1236 	struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
1237 	struct hclge_desc desc;
1238 	int pos = 0;
1239 	int i, ret;
1240 
1241 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
1242 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1243 	if (ret) {
1244 		dev_err(&hdev->pdev->dev,
1245 			"failed to dump rx priv buf, ret = %d\n", ret);
1246 		return ret;
1247 	}
1248 
1249 	pos += scnprintf(buf + pos, len - pos, "\n");
1250 
1251 	rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
1252 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1253 		pos += scnprintf(buf + pos, len - pos,
1254 				 "rx_packet_buf_tc_%d: 0x%x\n", i,
1255 				 le16_to_cpu(rx_buf_cmd->buf_num[i]));
1256 
1257 	pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n",
1258 			 le16_to_cpu(rx_buf_cmd->shared_buf));
1259 
1260 	return pos;
1261 }
1262 
1263 static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
1264 					   int len)
1265 {
1266 	struct hclge_rx_com_wl *rx_com_wl;
1267 	struct hclge_desc desc;
1268 	int pos = 0;
1269 	int ret;
1270 
1271 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
1272 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1273 	if (ret) {
1274 		dev_err(&hdev->pdev->dev,
1275 			"failed to dump rx common wl, ret = %d\n", ret);
1276 		return ret;
1277 	}
1278 
1279 	rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
1280 	pos += scnprintf(buf + pos, len - pos, "\n");
1281 	pos += scnprintf(buf + pos, len - pos,
1282 			 "rx_com_wl: high: 0x%x, low: 0x%x\n",
1283 			 le16_to_cpu(rx_com_wl->com_wl.high),
1284 			 le16_to_cpu(rx_com_wl->com_wl.low));
1285 
1286 	return pos;
1287 }
1288 
1289 static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
1290 					    int len)
1291 {
1292 	struct hclge_rx_com_wl *rx_packet_cnt;
1293 	struct hclge_desc desc;
1294 	int pos = 0;
1295 	int ret;
1296 
1297 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
1298 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1299 	if (ret) {
1300 		dev_err(&hdev->pdev->dev,
1301 			"failed to dump rx global pkt cnt, ret = %d\n", ret);
1302 		return ret;
1303 	}
1304 
1305 	rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
1306 	pos += scnprintf(buf + pos, len - pos,
1307 			 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
1308 			 le16_to_cpu(rx_packet_cnt->com_wl.high),
1309 			 le16_to_cpu(rx_packet_cnt->com_wl.low));
1310 
1311 	return pos;
1312 }
1313 
1314 static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
1315 					     int len)
1316 {
1317 	struct hclge_rx_priv_wl_buf *rx_priv_wl;
1318 	struct hclge_desc desc[2];
1319 	int pos = 0;
1320 	int i, ret;
1321 
1322 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1323 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1324 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1325 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1326 	if (ret) {
1327 		dev_err(&hdev->pdev->dev,
1328 			"failed to dump rx priv wl buf, ret = %d\n", ret);
1329 		return ret;
1330 	}
1331 
1332 	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
1333 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1334 		pos += scnprintf(buf + pos, len - pos,
1335 			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
1336 			 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1337 			 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1338 
1339 	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
1340 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1341 		pos += scnprintf(buf + pos, len - pos,
1342 			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
1343 			 i + HCLGE_TC_NUM_ONE_DESC,
1344 			 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1345 			 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1346 
1347 	return pos;
1348 }
1349 
1350 static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
1351 						  char *buf, int len)
1352 {
1353 	struct hclge_rx_com_thrd *rx_com_thrd;
1354 	struct hclge_desc desc[2];
1355 	int pos = 0;
1356 	int i, ret;
1357 
1358 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1359 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1360 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1361 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1362 	if (ret) {
1363 		dev_err(&hdev->pdev->dev,
1364 			"failed to dump rx common threshold, ret = %d\n", ret);
1365 		return ret;
1366 	}
1367 
1368 	pos += scnprintf(buf + pos, len - pos, "\n");
1369 	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
1370 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1371 		pos += scnprintf(buf + pos, len - pos,
1372 			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
1373 			 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1374 			 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1375 
1376 	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
1377 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1378 		pos += scnprintf(buf + pos, len - pos,
1379 			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
1380 			 i + HCLGE_TC_NUM_ONE_DESC,
1381 			 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1382 			 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1383 
1384 	return pos;
1385 }
1386 
1387 static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf,
1388 				      int len)
1389 {
1390 	int pos = 0;
1391 	int ret;
1392 
1393 	ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos);
1394 	if (ret < 0)
1395 		return ret;
1396 	pos += ret;
1397 
1398 	ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos);
1399 	if (ret < 0)
1400 		return ret;
1401 	pos += ret;
1402 
1403 	ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos);
1404 	if (ret < 0)
1405 		return ret;
1406 	pos += ret;
1407 
1408 	ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos);
1409 	if (ret < 0)
1410 		return ret;
1411 	pos += ret;
1412 
1413 	pos += scnprintf(buf + pos, len - pos, "\n");
1414 	if (!hnae3_dev_dcb_supported(hdev))
1415 		return 0;
1416 
1417 	ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos);
1418 	if (ret < 0)
1419 		return ret;
1420 	pos += ret;
1421 
1422 	ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos,
1423 						     len - pos);
1424 	if (ret < 0)
1425 		return ret;
1426 
1427 	return 0;
1428 }
1429 
1430 static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
1431 {
1432 	struct hclge_mac_ethertype_idx_rd_cmd *req0;
1433 	struct hclge_desc desc;
1434 	u32 msg_egress_port;
1435 	int pos = 0;
1436 	int ret, i;
1437 
1438 	pos += scnprintf(buf + pos, len - pos,
1439 			 "entry  mac_addr          mask  ether  ");
1440 	pos += scnprintf(buf + pos, len - pos,
1441 			 "mask  vlan  mask  i_map  i_dir  e_type  ");
1442 	pos += scnprintf(buf + pos, len - pos, "pf_id  vf_id  q_id  drop\n");
1443 
1444 	for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
1445 		hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
1446 					   true);
1447 		req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
1448 		req0->index = cpu_to_le16(i);
1449 
1450 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1451 		if (ret) {
1452 			dev_err(&hdev->pdev->dev,
1453 				"failed to dump manage table, ret = %d\n", ret);
1454 			return ret;
1455 		}
1456 
1457 		if (!req0->resp_code)
1458 			continue;
1459 
1460 		pos += scnprintf(buf + pos, len - pos, "%02u     %pM ",
1461 				 le16_to_cpu(req0->index), req0->mac_addr);
1462 
1463 		pos += scnprintf(buf + pos, len - pos,
1464 				 "%x     %04x   %x     %04x  ",
1465 				 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
1466 				 le16_to_cpu(req0->ethter_type),
1467 				 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
1468 				 le16_to_cpu(req0->vlan_tag) &
1469 				 HCLGE_DBG_MNG_VLAN_TAG);
1470 
1471 		pos += scnprintf(buf + pos, len - pos,
1472 				 "%x     %02x     %02x     ",
1473 				 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
1474 				 req0->i_port_bitmap, req0->i_port_direction);
1475 
1476 		msg_egress_port = le16_to_cpu(req0->egress_port);
1477 		pos += scnprintf(buf + pos, len - pos,
1478 				 "%x       %x      %02x     %04x  %x\n",
1479 				 !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
1480 				 msg_egress_port & HCLGE_DBG_MNG_PF_ID,
1481 				 (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
1482 				 le16_to_cpu(req0->egress_queue),
1483 				 !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
1484 	}
1485 
1486 	return 0;
1487 }
1488 
1489 #define HCLGE_DBG_TCAM_BUF_SIZE 256
1490 
1491 static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
1492 				  char *tcam_buf,
1493 				  struct hclge_dbg_tcam_msg tcam_msg)
1494 {
1495 	struct hclge_fd_tcam_config_1_cmd *req1;
1496 	struct hclge_fd_tcam_config_2_cmd *req2;
1497 	struct hclge_fd_tcam_config_3_cmd *req3;
1498 	struct hclge_desc desc[3];
1499 	int pos = 0;
1500 	int ret, i;
1501 	u32 *req;
1502 
1503 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
1504 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1505 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
1506 	desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1507 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
1508 
1509 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
1510 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
1511 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
1512 
1513 	req1->stage  = tcam_msg.stage;
1514 	req1->xy_sel = sel_x ? 1 : 0;
1515 	req1->index  = cpu_to_le32(tcam_msg.loc);
1516 
1517 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
1518 	if (ret)
1519 		return ret;
1520 
1521 	pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1522 			 "read result tcam key %s(%u):\n", sel_x ? "x" : "y",
1523 			 tcam_msg.loc);
1524 
1525 	/* tcam_data0 ~ tcam_data1 */
1526 	req = (u32 *)req1->tcam_data;
1527 	for (i = 0; i < 2; i++)
1528 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1529 				 "%08x\n", *req++);
1530 
1531 	/* tcam_data2 ~ tcam_data7 */
1532 	req = (u32 *)req2->tcam_data;
1533 	for (i = 0; i < 6; i++)
1534 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1535 				 "%08x\n", *req++);
1536 
1537 	/* tcam_data8 ~ tcam_data12 */
1538 	req = (u32 *)req3->tcam_data;
1539 	for (i = 0; i < 5; i++)
1540 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1541 				 "%08x\n", *req++);
1542 
1543 	return ret;
1544 }
1545 
1546 static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
1547 {
1548 	struct hclge_fd_rule *rule;
1549 	struct hlist_node *node;
1550 	int cnt = 0;
1551 
1552 	spin_lock_bh(&hdev->fd_rule_lock);
1553 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
1554 		rule_locs[cnt] = rule->location;
1555 		cnt++;
1556 	}
1557 	spin_unlock_bh(&hdev->fd_rule_lock);
1558 
1559 	if (cnt != hdev->hclge_fd_rule_num || cnt == 0)
1560 		return -EINVAL;
1561 
1562 	return cnt;
1563 }
1564 
1565 static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
1566 {
1567 	u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
1568 	struct hclge_dbg_tcam_msg tcam_msg;
1569 	int i, ret, rule_cnt;
1570 	u16 *rule_locs;
1571 	char *tcam_buf;
1572 	int pos = 0;
1573 
1574 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
1575 		dev_err(&hdev->pdev->dev,
1576 			"Only FD-supported dev supports dump fd tcam\n");
1577 		return -EOPNOTSUPP;
1578 	}
1579 
1580 	if (!hdev->hclge_fd_rule_num || !rule_num)
1581 		return 0;
1582 
1583 	rule_locs = kcalloc(rule_num, sizeof(u16), GFP_KERNEL);
1584 	if (!rule_locs)
1585 		return -ENOMEM;
1586 
1587 	tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
1588 	if (!tcam_buf) {
1589 		kfree(rule_locs);
1590 		return -ENOMEM;
1591 	}
1592 
1593 	rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
1594 	if (rule_cnt < 0) {
1595 		ret = rule_cnt;
1596 		dev_err(&hdev->pdev->dev,
1597 			"failed to get rule number, ret = %d\n", ret);
1598 		goto out;
1599 	}
1600 
1601 	ret = 0;
1602 	for (i = 0; i < rule_cnt; i++) {
1603 		tcam_msg.stage = HCLGE_FD_STAGE_1;
1604 		tcam_msg.loc = rule_locs[i];
1605 
1606 		ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg);
1607 		if (ret) {
1608 			dev_err(&hdev->pdev->dev,
1609 				"failed to get fd tcam key x, ret = %d\n", ret);
1610 			goto out;
1611 		}
1612 
1613 		pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1614 
1615 		ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg);
1616 		if (ret) {
1617 			dev_err(&hdev->pdev->dev,
1618 				"failed to get fd tcam key y, ret = %d\n", ret);
1619 			goto out;
1620 		}
1621 
1622 		pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1623 	}
1624 
1625 out:
1626 	kfree(tcam_buf);
1627 	kfree(rule_locs);
1628 	return ret;
1629 }
1630 
1631 static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
1632 {
1633 	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
1634 	struct hclge_fd_ad_cnt_read_cmd *req;
1635 	char str_id[HCLGE_DBG_ID_LEN];
1636 	struct hclge_desc desc;
1637 	int pos = 0;
1638 	int ret;
1639 	u64 cnt;
1640 	u8 i;
1641 
1642 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
1643 		return -EOPNOTSUPP;
1644 
1645 	pos += scnprintf(buf + pos, len - pos,
1646 			 "func_id\thit_times\n");
1647 
1648 	for (i = 0; i < func_num; i++) {
1649 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
1650 		req = (struct hclge_fd_ad_cnt_read_cmd *)desc.data;
1651 		req->index = cpu_to_le16(i);
1652 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1653 		if (ret) {
1654 			dev_err(&hdev->pdev->dev, "failed to get fd counter, ret = %d\n",
1655 				ret);
1656 			return ret;
1657 		}
1658 		cnt = le64_to_cpu(req->cnt);
1659 		hclge_dbg_get_func_id_str(str_id, i);
1660 		pos += scnprintf(buf + pos, len - pos,
1661 				 "%s\t%llu\n", str_id, cnt);
1662 	}
1663 
1664 	return 0;
1665 }
1666 
1667 static const struct hclge_dbg_status_dfx_info hclge_dbg_rst_info[] = {
1668 	{HCLGE_MISC_VECTOR_REG_BASE, "vector0 interrupt enable status"},
1669 	{HCLGE_MISC_RESET_STS_REG,   "reset interrupt source"},
1670 	{HCLGE_MISC_VECTOR_INT_STS,  "reset interrupt status"},
1671 	{HCLGE_RAS_PF_OTHER_INT_STS_REG, "RAS interrupt status"},
1672 	{HCLGE_GLOBAL_RESET_REG,  "hardware reset status"},
1673 	{HCLGE_NIC_CSQ_DEPTH_REG, "handshake status"},
1674 	{HCLGE_FUN_RST_ING, "function reset status"}
1675 };
1676 
1677 int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
1678 {
1679 	u32 i, offset;
1680 	int pos = 0;
1681 
1682 	pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n",
1683 			 hdev->rst_stats.pf_rst_cnt);
1684 	pos += scnprintf(buf + pos, len - pos, "FLR reset count: %u\n",
1685 			 hdev->rst_stats.flr_rst_cnt);
1686 	pos += scnprintf(buf + pos, len - pos, "GLOBAL reset count: %u\n",
1687 			 hdev->rst_stats.global_rst_cnt);
1688 	pos += scnprintf(buf + pos, len - pos, "IMP reset count: %u\n",
1689 			 hdev->rst_stats.imp_rst_cnt);
1690 	pos += scnprintf(buf + pos, len - pos, "reset done count: %u\n",
1691 			 hdev->rst_stats.reset_done_cnt);
1692 	pos += scnprintf(buf + pos, len - pos, "HW reset done count: %u\n",
1693 			 hdev->rst_stats.hw_reset_done_cnt);
1694 	pos += scnprintf(buf + pos, len - pos, "reset count: %u\n",
1695 			 hdev->rst_stats.reset_cnt);
1696 	pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n",
1697 			 hdev->rst_stats.reset_fail_cnt);
1698 
1699 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
1700 		offset = hclge_dbg_rst_info[i].offset;
1701 		pos += scnprintf(buf + pos, len - pos, "%s: 0x%x\n",
1702 				 hclge_dbg_rst_info[i].message,
1703 				 hclge_read_dev(&hdev->hw, offset));
1704 	}
1705 
1706 	pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n",
1707 			 hdev->state);
1708 
1709 	return 0;
1710 }
1711 
1712 static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len)
1713 {
1714 	unsigned long rem_nsec;
1715 	int pos = 0;
1716 	u64 lc;
1717 
1718 	lc = local_clock();
1719 	rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);
1720 
1721 	pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n",
1722 			 (unsigned long)lc, rem_nsec / 1000);
1723 	pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n",
1724 			 jiffies_to_msecs(jiffies - hdev->last_serv_processed));
1725 	pos += scnprintf(buf + pos, len - pos,
1726 			 "last_service_task_processed: %lu(jiffies)\n",
1727 			 hdev->last_serv_processed);
1728 	pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n",
1729 			 hdev->serv_processed_cnt);
1730 
1731 	return 0;
1732 }
1733 
1734 static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len)
1735 {
1736 	int pos = 0;
1737 
1738 	pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n",
1739 			 hdev->num_nic_msi);
1740 	pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n",
1741 			 hdev->num_roce_msi);
1742 	pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n",
1743 			 hdev->num_msi_used);
1744 	pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n",
1745 			 hdev->num_msi_left);
1746 
1747 	return 0;
1748 }
1749 
1750 static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src,
1751 					  char *buf, int len, u32 bd_num)
1752 {
1753 #define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2
1754 
1755 	struct hclge_desc *desc_index = desc_src;
1756 	u32 offset = 0;
1757 	int pos = 0;
1758 	u32 i, j;
1759 
1760 	pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1761 
1762 	for (i = 0; i < bd_num; i++) {
1763 		j = 0;
1764 		while (j < HCLGE_DESC_DATA_LEN - 1) {
1765 			pos += scnprintf(buf + pos, len - pos, "0x%04x | ",
1766 					 offset);
1767 			pos += scnprintf(buf + pos, len - pos, "0x%08x  ",
1768 					 le32_to_cpu(desc_index->data[j++]));
1769 			pos += scnprintf(buf + pos, len - pos, "0x%08x\n",
1770 					 le32_to_cpu(desc_index->data[j++]));
1771 			offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
1772 		}
1773 		desc_index++;
1774 	}
1775 }
1776 
1777 static int
1778 hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
1779 {
1780 	struct hclge_get_imp_bd_cmd *req;
1781 	struct hclge_desc *desc_src;
1782 	struct hclge_desc desc;
1783 	u32 bd_num;
1784 	int ret;
1785 
1786 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_STATS_BD, true);
1787 
1788 	req = (struct hclge_get_imp_bd_cmd *)desc.data;
1789 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1790 	if (ret) {
1791 		dev_err(&hdev->pdev->dev,
1792 			"failed to get imp statistics bd number, ret = %d\n",
1793 			ret);
1794 		return ret;
1795 	}
1796 
1797 	bd_num = le32_to_cpu(req->bd_num);
1798 	if (!bd_num) {
1799 		dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
1800 		return -EINVAL;
1801 	}
1802 
1803 	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
1804 	if (!desc_src)
1805 		return -ENOMEM;
1806 
1807 	ret  = hclge_dbg_cmd_send(hdev, desc_src, 0, bd_num,
1808 				  HCLGE_OPC_IMP_STATS_INFO);
1809 	if (ret) {
1810 		kfree(desc_src);
1811 		dev_err(&hdev->pdev->dev,
1812 			"failed to get imp statistics, ret = %d\n", ret);
1813 		return ret;
1814 	}
1815 
1816 	hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num);
1817 
1818 	kfree(desc_src);
1819 
1820 	return 0;
1821 }
1822 
1823 #define HCLGE_CMD_NCL_CONFIG_BD_NUM	5
1824 #define HCLGE_MAX_NCL_CONFIG_LENGTH	16384
1825 
1826 static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
1827 					char *buf, int len, int *pos)
1828 {
1829 #define HCLGE_CMD_DATA_NUM		6
1830 
1831 	int offset = HCLGE_MAX_NCL_CONFIG_LENGTH - *index;
1832 	int i, j;
1833 
1834 	for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
1835 		for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
1836 			if (i == 0 && j == 0)
1837 				continue;
1838 
1839 			*pos += scnprintf(buf + *pos, len - *pos,
1840 					  "0x%04x | 0x%08x\n", offset,
1841 					  le32_to_cpu(desc[i].data[j]));
1842 
1843 			offset += sizeof(u32);
1844 			*index -= sizeof(u32);
1845 
1846 			if (*index <= 0)
1847 				return;
1848 		}
1849 	}
1850 }
1851 
1852 static int
1853 hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
1854 {
1855 #define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD	(20 + 24 * 4)
1856 
1857 	struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
1858 	int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
1859 	int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
1860 	int pos = 0;
1861 	u32 data0;
1862 	int ret;
1863 
1864 	pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1865 
1866 	while (index > 0) {
1867 		data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
1868 		if (index >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
1869 			data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
1870 		else
1871 			data0 |= (u32)index << 16;
1872 		ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
1873 					 HCLGE_OPC_QUERY_NCL_CONFIG);
1874 		if (ret)
1875 			return ret;
1876 
1877 		hclge_ncl_config_data_print(desc, &index, buf, len, &pos);
1878 	}
1879 
1880 	return 0;
1881 }
1882 
1883 static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
1884 {
1885 	struct phy_device *phydev = hdev->hw.mac.phydev;
1886 	struct hclge_config_mac_mode_cmd *req_app;
1887 	struct hclge_common_lb_cmd *req_common;
1888 	struct hclge_desc desc;
1889 	u8 loopback_en;
1890 	int pos = 0;
1891 	int ret;
1892 
1893 	req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
1894 	req_common = (struct hclge_common_lb_cmd *)desc.data;
1895 
1896 	pos += scnprintf(buf + pos, len - pos, "mac id: %u\n",
1897 			 hdev->hw.mac.mac_id);
1898 
1899 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
1900 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1901 	if (ret) {
1902 		dev_err(&hdev->pdev->dev,
1903 			"failed to dump app loopback status, ret = %d\n", ret);
1904 		return ret;
1905 	}
1906 
1907 	loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
1908 				    HCLGE_MAC_APP_LP_B);
1909 	pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
1910 			 state_str[loopback_en]);
1911 
1912 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
1913 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1914 	if (ret) {
1915 		dev_err(&hdev->pdev->dev,
1916 			"failed to dump common loopback status, ret = %d\n",
1917 			ret);
1918 		return ret;
1919 	}
1920 
1921 	loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
1922 	pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
1923 			 state_str[loopback_en]);
1924 
1925 	loopback_en = req_common->enable &
1926 			HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
1927 	pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
1928 			 state_str[loopback_en]);
1929 
1930 	if (phydev) {
1931 		loopback_en = phydev->loopback_enabled;
1932 		pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1933 				 state_str[loopback_en]);
1934 	} else if (hnae3_dev_phy_imp_supported(hdev)) {
1935 		loopback_en = req_common->enable &
1936 			      HCLGE_CMD_GE_PHY_INNER_LOOP_B;
1937 		pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1938 				 state_str[loopback_en]);
1939 	}
1940 
1941 	return 0;
1942 }
1943 
1944 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
1945  * @hdev: pointer to struct hclge_dev
1946  */
1947 static int
1948 hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len)
1949 {
1950 	struct hclge_mac_tnl_stats stats;
1951 	unsigned long rem_nsec;
1952 	int pos = 0;
1953 
1954 	pos += scnprintf(buf + pos, len - pos,
1955 			 "Recently generated mac tnl interruption:\n");
1956 
1957 	while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
1958 		rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
1959 
1960 		pos += scnprintf(buf + pos, len - pos,
1961 				 "[%07lu.%03lu] status = 0x%x\n",
1962 				 (unsigned long)stats.time, rem_nsec / 1000,
1963 				 stats.status);
1964 	}
1965 
1966 	return 0;
1967 }
1968 
1969 
1970 static const struct hclge_dbg_item mac_list_items[] = {
1971 	{ "FUNC_ID", 2 },
1972 	{ "MAC_ADDR", 12 },
1973 	{ "STATE", 2 },
1974 };
1975 
1976 static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
1977 				    bool is_unicast)
1978 {
1979 	char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN];
1980 	char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
1981 	char *result[ARRAY_SIZE(mac_list_items)];
1982 	struct hclge_mac_node *mac_node, *tmp;
1983 	struct hclge_vport *vport;
1984 	struct list_head *list;
1985 	u32 func_id;
1986 	int pos = 0;
1987 	int i;
1988 
1989 	for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
1990 		result[i] = &data_str[i][0];
1991 
1992 	pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n",
1993 			 is_unicast ? "UC" : "MC");
1994 	hclge_dbg_fill_content(content, sizeof(content), mac_list_items,
1995 			       NULL, ARRAY_SIZE(mac_list_items));
1996 	pos += scnprintf(buf + pos, len - pos, "%s", content);
1997 
1998 	for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
1999 		vport = &hdev->vport[func_id];
2000 		list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
2001 		spin_lock_bh(&vport->mac_list_lock);
2002 		list_for_each_entry_safe(mac_node, tmp, list, node) {
2003 			i = 0;
2004 			result[i++] = hclge_dbg_get_func_id_str(str_id,
2005 								func_id);
2006 			sprintf(result[i++], "%pM", mac_node->mac_addr);
2007 			sprintf(result[i++], "%5s",
2008 				hclge_mac_state_str[mac_node->state]);
2009 			hclge_dbg_fill_content(content, sizeof(content),
2010 					       mac_list_items,
2011 					       (const char **)result,
2012 					       ARRAY_SIZE(mac_list_items));
2013 			pos += scnprintf(buf + pos, len - pos, "%s", content);
2014 		}
2015 		spin_unlock_bh(&vport->mac_list_lock);
2016 	}
2017 }
2018 
2019 static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
2020 {
2021 	u8 func_num = pci_num_vf(hdev->pdev) + 1;
2022 	struct hclge_vport *vport;
2023 	int pos = 0;
2024 	u8 i;
2025 
2026 	pos += scnprintf(buf, len, "num_alloc_vport   : %u\n",
2027 			  hdev->num_alloc_vport);
2028 	pos += scnprintf(buf + pos, len - pos, "max_umv_size     : %u\n",
2029 			 hdev->max_umv_size);
2030 	pos += scnprintf(buf + pos, len - pos, "wanted_umv_size  : %u\n",
2031 			 hdev->wanted_umv_size);
2032 	pos += scnprintf(buf + pos, len - pos, "priv_umv_size    : %u\n",
2033 			 hdev->priv_umv_size);
2034 
2035 	mutex_lock(&hdev->vport_lock);
2036 	pos += scnprintf(buf + pos, len - pos, "share_umv_size   : %u\n",
2037 			 hdev->share_umv_size);
2038 	for (i = 0; i < func_num; i++) {
2039 		vport = &hdev->vport[i];
2040 		pos += scnprintf(buf + pos, len - pos,
2041 				 "vport(%u) used_umv_num : %u\n",
2042 				 i, vport->used_umv_num);
2043 	}
2044 	mutex_unlock(&hdev->vport_lock);
2045 
2046 	pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num  : %u\n",
2047 			 hdev->used_mc_mac_num);
2048 
2049 	return 0;
2050 }
2051 
2052 static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
2053 					 struct hclge_dbg_vlan_cfg *vlan_cfg)
2054 {
2055 	struct hclge_vport_vtag_rx_cfg_cmd *req;
2056 	struct hclge_desc desc;
2057 	u16 bmap_index;
2058 	u8 rx_cfg;
2059 	int ret;
2060 
2061 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true);
2062 
2063 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
2064 	req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2065 	bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2066 	req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2067 
2068 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2069 	if (ret) {
2070 		dev_err(&hdev->pdev->dev,
2071 			"failed to get vport%u rxvlan cfg, ret = %d\n",
2072 			vf_id, ret);
2073 		return ret;
2074 	}
2075 
2076 	rx_cfg = req->vport_vlan_cfg;
2077 	vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B);
2078 	vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B);
2079 	vlan_cfg->drop_tag1 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG1_EN_B);
2080 	vlan_cfg->drop_tag2 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG2_EN_B);
2081 	vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B);
2082 	vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B);
2083 
2084 	return 0;
2085 }
2086 
2087 static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
2088 					 struct hclge_dbg_vlan_cfg *vlan_cfg)
2089 {
2090 	struct hclge_vport_vtag_tx_cfg_cmd *req;
2091 	struct hclge_desc desc;
2092 	u16 bmap_index;
2093 	u8 tx_cfg;
2094 	int ret;
2095 
2096 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true);
2097 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
2098 	req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2099 	bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2100 	req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2101 
2102 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2103 	if (ret) {
2104 		dev_err(&hdev->pdev->dev,
2105 			"failed to get vport%u txvlan cfg, ret = %d\n",
2106 			vf_id, ret);
2107 		return ret;
2108 	}
2109 
2110 	tx_cfg = req->vport_vlan_cfg;
2111 	vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1);
2112 
2113 	vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B);
2114 	vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B);
2115 	vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B);
2116 	vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B);
2117 	vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B);
2118 	vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B);
2119 	vlan_cfg->shift_tag = hnae3_get_bit(tx_cfg, HCLGE_TAG_SHIFT_MODE_EN_B);
2120 
2121 	return 0;
2122 }
2123 
2124 static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev,
2125 					    u8 vlan_type, u8 vf_id,
2126 					    struct hclge_desc *desc)
2127 {
2128 	struct hclge_vlan_filter_ctrl_cmd *req;
2129 	int ret;
2130 
2131 	hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
2132 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data;
2133 	req->vlan_type = vlan_type;
2134 	req->vf_id = vf_id;
2135 
2136 	ret = hclge_cmd_send(&hdev->hw, desc, 1);
2137 	if (ret)
2138 		dev_err(&hdev->pdev->dev,
2139 			"failed to get vport%u vlan filter config, ret = %d.\n",
2140 			vf_id, ret);
2141 
2142 	return ret;
2143 }
2144 
2145 static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type,
2146 				       u8 vf_id, u8 *vlan_fe)
2147 {
2148 	struct hclge_vlan_filter_ctrl_cmd *req;
2149 	struct hclge_desc desc;
2150 	int ret;
2151 
2152 	ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc);
2153 	if (ret)
2154 		return ret;
2155 
2156 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
2157 	*vlan_fe = req->vlan_fe;
2158 
2159 	return 0;
2160 }
2161 
2162 static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
2163 						   u8 vf_id, u8 *bypass_en)
2164 {
2165 	struct hclge_port_vlan_filter_bypass_cmd *req;
2166 	struct hclge_desc desc;
2167 	int ret;
2168 
2169 	if (!test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
2170 		return 0;
2171 
2172 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, true);
2173 	req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
2174 	req->vf_id = vf_id;
2175 
2176 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2177 	if (ret) {
2178 		dev_err(&hdev->pdev->dev,
2179 			"failed to get vport%u port vlan filter bypass state, ret = %d.\n",
2180 			vf_id, ret);
2181 		return ret;
2182 	}
2183 
2184 	*bypass_en = hnae3_get_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B);
2185 
2186 	return 0;
2187 }
2188 
2189 static const struct hclge_dbg_item vlan_filter_items[] = {
2190 	{ "FUNC_ID", 2 },
2191 	{ "I_VF_VLAN_FILTER", 2 },
2192 	{ "E_VF_VLAN_FILTER", 2 },
2193 	{ "PORT_VLAN_FILTER_BYPASS", 0 }
2194 };
2195 
2196 static const struct hclge_dbg_item vlan_offload_items[] = {
2197 	{ "FUNC_ID", 2 },
2198 	{ "PVID", 4 },
2199 	{ "ACCEPT_TAG1", 2 },
2200 	{ "ACCEPT_TAG2", 2 },
2201 	{ "ACCEPT_UNTAG1", 2 },
2202 	{ "ACCEPT_UNTAG2", 2 },
2203 	{ "INSERT_TAG1", 2 },
2204 	{ "INSERT_TAG2", 2 },
2205 	{ "SHIFT_TAG", 2 },
2206 	{ "STRIP_TAG1", 2 },
2207 	{ "STRIP_TAG2", 2 },
2208 	{ "DROP_TAG1", 2 },
2209 	{ "DROP_TAG2", 2 },
2210 	{ "PRI_ONLY_TAG1", 2 },
2211 	{ "PRI_ONLY_TAG2", 0 }
2212 };
2213 
2214 static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
2215 					     int len, int *pos)
2216 {
2217 	char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
2218 	const char *result[ARRAY_SIZE(vlan_filter_items)];
2219 	u8 i, j, vlan_fe, bypass, ingress, egress;
2220 	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2221 	int ret;
2222 
2223 	ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
2224 					  &vlan_fe);
2225 	if (ret)
2226 		return ret;
2227 	ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2228 	egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2229 
2230 	*pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
2231 			  state_str[ingress]);
2232 	*pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
2233 			  state_str[egress]);
2234 
2235 	hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
2236 			       NULL, ARRAY_SIZE(vlan_filter_items));
2237 	*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2238 
2239 	for (i = 0; i < func_num; i++) {
2240 		ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
2241 						  &vlan_fe);
2242 		if (ret)
2243 			return ret;
2244 
2245 		ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2246 		egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2247 		ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
2248 		if (ret)
2249 			return ret;
2250 		j = 0;
2251 		result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2252 		result[j++] = state_str[ingress];
2253 		result[j++] = state_str[egress];
2254 		result[j++] =
2255 			test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
2256 				 hdev->ae_dev->caps) ? state_str[bypass] : "NA";
2257 		hclge_dbg_fill_content(content, sizeof(content),
2258 				       vlan_filter_items, result,
2259 				       ARRAY_SIZE(vlan_filter_items));
2260 		*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2261 	}
2262 	*pos += scnprintf(buf + *pos, len - *pos, "\n");
2263 
2264 	return 0;
2265 }
2266 
2267 static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
2268 					      int len, int *pos)
2269 {
2270 	char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
2271 	const char *result[ARRAY_SIZE(vlan_offload_items)];
2272 	char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
2273 	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2274 	struct hclge_dbg_vlan_cfg vlan_cfg;
2275 	int ret;
2276 	u8 i, j;
2277 
2278 	hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
2279 			       NULL, ARRAY_SIZE(vlan_offload_items));
2280 	*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2281 
2282 	for (i = 0; i < func_num; i++) {
2283 		ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
2284 		if (ret)
2285 			return ret;
2286 
2287 		ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg);
2288 		if (ret)
2289 			return ret;
2290 
2291 		sprintf(str_pvid, "%u", vlan_cfg.pvid);
2292 		j = 0;
2293 		result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2294 		result[j++] = str_pvid;
2295 		result[j++] = state_str[vlan_cfg.accept_tag1];
2296 		result[j++] = state_str[vlan_cfg.accept_tag2];
2297 		result[j++] = state_str[vlan_cfg.accept_untag1];
2298 		result[j++] = state_str[vlan_cfg.accept_untag2];
2299 		result[j++] = state_str[vlan_cfg.insert_tag1];
2300 		result[j++] = state_str[vlan_cfg.insert_tag2];
2301 		result[j++] = state_str[vlan_cfg.shift_tag];
2302 		result[j++] = state_str[vlan_cfg.strip_tag1];
2303 		result[j++] = state_str[vlan_cfg.strip_tag2];
2304 		result[j++] = state_str[vlan_cfg.drop_tag1];
2305 		result[j++] = state_str[vlan_cfg.drop_tag2];
2306 		result[j++] = state_str[vlan_cfg.pri_only1];
2307 		result[j++] = state_str[vlan_cfg.pri_only2];
2308 
2309 		hclge_dbg_fill_content(content, sizeof(content),
2310 				       vlan_offload_items, result,
2311 				       ARRAY_SIZE(vlan_offload_items));
2312 		*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2313 	}
2314 
2315 	return 0;
2316 }
2317 
2318 static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
2319 				      int len)
2320 {
2321 	int pos = 0;
2322 	int ret;
2323 
2324 	ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
2325 	if (ret)
2326 		return ret;
2327 
2328 	return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
2329 }
2330 
2331 static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
2332 {
2333 	struct hclge_ptp *ptp = hdev->ptp;
2334 	u32 sw_cfg = ptp->ptp_cfg;
2335 	unsigned int tx_start;
2336 	unsigned int last_rx;
2337 	int pos = 0;
2338 	u32 hw_cfg;
2339 	int ret;
2340 
2341 	pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
2342 			 ptp->info.name);
2343 	pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
2344 			 test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
2345 			 "yes" : "no");
2346 	pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
2347 			 test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
2348 			 "yes" : "no");
2349 	pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
2350 			 test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
2351 			 "yes" : "no");
2352 
2353 	last_rx = jiffies_to_msecs(ptp->last_rx);
2354 	pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
2355 			 last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
2356 	pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
2357 
2358 	tx_start = jiffies_to_msecs(ptp->tx_start);
2359 	pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
2360 			 tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
2361 	pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
2362 	pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
2363 			 ptp->tx_skipped);
2364 	pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
2365 			 ptp->tx_timeout);
2366 	pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
2367 			 ptp->last_tx_seqid);
2368 
2369 	ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
2370 	if (ret)
2371 		return ret;
2372 
2373 	pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
2374 			 sw_cfg, hw_cfg);
2375 
2376 	pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
2377 			 ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
2378 
2379 	return 0;
2380 }
2381 
2382 static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
2383 {
2384 	hclge_dbg_dump_mac_list(hdev, buf, len, true);
2385 
2386 	return 0;
2387 }
2388 
2389 static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
2390 {
2391 	hclge_dbg_dump_mac_list(hdev, buf, len, false);
2392 
2393 	return 0;
2394 }
2395 
2396 static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
2397 	{
2398 		.cmd = HNAE3_DBG_CMD_TM_NODES,
2399 		.dbg_dump = hclge_dbg_dump_tm_nodes,
2400 	},
2401 	{
2402 		.cmd = HNAE3_DBG_CMD_TM_PRI,
2403 		.dbg_dump = hclge_dbg_dump_tm_pri,
2404 	},
2405 	{
2406 		.cmd = HNAE3_DBG_CMD_TM_QSET,
2407 		.dbg_dump = hclge_dbg_dump_tm_qset,
2408 	},
2409 	{
2410 		.cmd = HNAE3_DBG_CMD_TM_MAP,
2411 		.dbg_dump = hclge_dbg_dump_tm_map,
2412 	},
2413 	{
2414 		.cmd = HNAE3_DBG_CMD_TM_PG,
2415 		.dbg_dump = hclge_dbg_dump_tm_pg,
2416 	},
2417 	{
2418 		.cmd = HNAE3_DBG_CMD_TM_PORT,
2419 		.dbg_dump = hclge_dbg_dump_tm_port,
2420 	},
2421 	{
2422 		.cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
2423 		.dbg_dump = hclge_dbg_dump_tc,
2424 	},
2425 	{
2426 		.cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
2427 		.dbg_dump = hclge_dbg_dump_qos_pause_cfg,
2428 	},
2429 	{
2430 		.cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
2431 		.dbg_dump = hclge_dbg_dump_qos_pri_map,
2432 	},
2433 	{
2434 		.cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
2435 		.dbg_dump = hclge_dbg_dump_qos_dscp_map,
2436 	},
2437 	{
2438 		.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
2439 		.dbg_dump = hclge_dbg_dump_qos_buf_cfg,
2440 	},
2441 	{
2442 		.cmd = HNAE3_DBG_CMD_MAC_UC,
2443 		.dbg_dump = hclge_dbg_dump_mac_uc,
2444 	},
2445 	{
2446 		.cmd = HNAE3_DBG_CMD_MAC_MC,
2447 		.dbg_dump = hclge_dbg_dump_mac_mc,
2448 	},
2449 	{
2450 		.cmd = HNAE3_DBG_CMD_MNG_TBL,
2451 		.dbg_dump = hclge_dbg_dump_mng_table,
2452 	},
2453 	{
2454 		.cmd = HNAE3_DBG_CMD_LOOPBACK,
2455 		.dbg_dump = hclge_dbg_dump_loopback,
2456 	},
2457 	{
2458 		.cmd = HNAE3_DBG_CMD_PTP_INFO,
2459 		.dbg_dump = hclge_dbg_dump_ptp_info,
2460 	},
2461 	{
2462 		.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
2463 		.dbg_dump = hclge_dbg_dump_interrupt,
2464 	},
2465 	{
2466 		.cmd = HNAE3_DBG_CMD_RESET_INFO,
2467 		.dbg_dump = hclge_dbg_dump_rst_info,
2468 	},
2469 	{
2470 		.cmd = HNAE3_DBG_CMD_IMP_INFO,
2471 		.dbg_dump = hclge_dbg_get_imp_stats_info,
2472 	},
2473 	{
2474 		.cmd = HNAE3_DBG_CMD_NCL_CONFIG,
2475 		.dbg_dump = hclge_dbg_dump_ncl_config,
2476 	},
2477 	{
2478 		.cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
2479 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2480 	},
2481 	{
2482 		.cmd = HNAE3_DBG_CMD_REG_SSU,
2483 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2484 	},
2485 	{
2486 		.cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
2487 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2488 	},
2489 	{
2490 		.cmd = HNAE3_DBG_CMD_REG_RPU,
2491 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2492 	},
2493 	{
2494 		.cmd = HNAE3_DBG_CMD_REG_NCSI,
2495 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2496 	},
2497 	{
2498 		.cmd = HNAE3_DBG_CMD_REG_RTC,
2499 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2500 	},
2501 	{
2502 		.cmd = HNAE3_DBG_CMD_REG_PPP,
2503 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2504 	},
2505 	{
2506 		.cmd = HNAE3_DBG_CMD_REG_RCB,
2507 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2508 	},
2509 	{
2510 		.cmd = HNAE3_DBG_CMD_REG_TQP,
2511 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2512 	},
2513 	{
2514 		.cmd = HNAE3_DBG_CMD_REG_MAC,
2515 		.dbg_dump = hclge_dbg_dump_mac,
2516 	},
2517 	{
2518 		.cmd = HNAE3_DBG_CMD_REG_DCB,
2519 		.dbg_dump = hclge_dbg_dump_dcb,
2520 	},
2521 	{
2522 		.cmd = HNAE3_DBG_CMD_FD_TCAM,
2523 		.dbg_dump = hclge_dbg_dump_fd_tcam,
2524 	},
2525 	{
2526 		.cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
2527 		.dbg_dump = hclge_dbg_dump_mac_tnl_status,
2528 	},
2529 	{
2530 		.cmd = HNAE3_DBG_CMD_SERV_INFO,
2531 		.dbg_dump = hclge_dbg_dump_serv_info,
2532 	},
2533 	{
2534 		.cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
2535 		.dbg_dump = hclge_dbg_dump_vlan_config,
2536 	},
2537 	{
2538 		.cmd = HNAE3_DBG_CMD_FD_COUNTER,
2539 		.dbg_dump = hclge_dbg_dump_fd_counter,
2540 	},
2541 	{
2542 		.cmd = HNAE3_DBG_CMD_UMV_INFO,
2543 		.dbg_dump = hclge_dbg_dump_umv_info,
2544 	},
2545 };
2546 
2547 int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
2548 		       char *buf, int len)
2549 {
2550 	struct hclge_vport *vport = hclge_get_vport(handle);
2551 	const struct hclge_dbg_func *cmd_func;
2552 	struct hclge_dev *hdev = vport->back;
2553 	u32 i;
2554 
2555 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
2556 		if (cmd == hclge_dbg_cmd_func[i].cmd) {
2557 			cmd_func = &hclge_dbg_cmd_func[i];
2558 			if (cmd_func->dbg_dump)
2559 				return cmd_func->dbg_dump(hdev, buf, len);
2560 			else
2561 				return cmd_func->dbg_dump_reg(hdev, cmd, buf,
2562 							      len);
2563 		}
2564 	}
2565 
2566 	dev_err(&hdev->pdev->dev, "invalid command(%d)\n", cmd);
2567 	return -EINVAL;
2568 }
2569