1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018-2019 Hisilicon Limited. */
3 
4 #include <linux/device.h>
5 #include <linux/sched/clock.h>
6 
7 #include "hclge_debugfs.h"
8 #include "hclge_err.h"
9 #include "hclge_main.h"
10 #include "hclge_tm.h"
11 #include "hnae3.h"
12 
13 static const char * const state_str[] = { "off", "on" };
14 static const char * const hclge_mac_state_str[] = {
15 	"TO_ADD", "TO_DEL", "ACTIVE"
16 };
17 
18 static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" };
19 
20 static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
21 	{ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
22 	  .dfx_msg = &hclge_dbg_bios_common_reg[0],
23 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
24 		       .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
25 		       .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
26 	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
27 	  .dfx_msg = &hclge_dbg_ssu_reg_0[0],
28 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
29 		       .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
30 		       .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
31 	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
32 	  .dfx_msg = &hclge_dbg_ssu_reg_1[0],
33 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
34 		       .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
35 		       .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
36 	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
37 	  .dfx_msg = &hclge_dbg_ssu_reg_2[0],
38 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
39 		       .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
40 		       .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
41 	{ .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
42 	  .dfx_msg = &hclge_dbg_igu_egu_reg[0],
43 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
44 		       .offset = HCLGE_DBG_DFX_IGU_OFFSET,
45 		       .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
46 	{ .cmd = HNAE3_DBG_CMD_REG_RPU,
47 	  .dfx_msg = &hclge_dbg_rpu_reg_0[0],
48 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
49 		       .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
50 		       .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
51 	{ .cmd = HNAE3_DBG_CMD_REG_RPU,
52 	  .dfx_msg = &hclge_dbg_rpu_reg_1[0],
53 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
54 		       .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
55 		       .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
56 	{ .cmd = HNAE3_DBG_CMD_REG_NCSI,
57 	  .dfx_msg = &hclge_dbg_ncsi_reg[0],
58 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
59 		       .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
60 		       .cmd = HCLGE_OPC_DFX_NCSI_REG } },
61 	{ .cmd = HNAE3_DBG_CMD_REG_RTC,
62 	  .dfx_msg = &hclge_dbg_rtc_reg[0],
63 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
64 		       .offset = HCLGE_DBG_DFX_RTC_OFFSET,
65 		       .cmd = HCLGE_OPC_DFX_RTC_REG } },
66 	{ .cmd = HNAE3_DBG_CMD_REG_PPP,
67 	  .dfx_msg = &hclge_dbg_ppp_reg[0],
68 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
69 		       .offset = HCLGE_DBG_DFX_PPP_OFFSET,
70 		       .cmd = HCLGE_OPC_DFX_PPP_REG } },
71 	{ .cmd = HNAE3_DBG_CMD_REG_RCB,
72 	  .dfx_msg = &hclge_dbg_rcb_reg[0],
73 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
74 		       .offset = HCLGE_DBG_DFX_RCB_OFFSET,
75 		       .cmd = HCLGE_OPC_DFX_RCB_REG } },
76 	{ .cmd = HNAE3_DBG_CMD_REG_TQP,
77 	  .dfx_msg = &hclge_dbg_tqp_reg[0],
78 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
79 		       .offset = HCLGE_DBG_DFX_TQP_OFFSET,
80 		       .cmd = HCLGE_OPC_DFX_TQP_REG } },
81 };
82 
83 /* make sure: len(name) + interval >= maxlen(item data) + 2,
84  * for example, name = "pkt_num"(len: 7), the prototype of item data is u32,
85  * and print as "%u"(maxlen: 10), so the interval should be at least 5.
86  */
87 static void hclge_dbg_fill_content(char *content, u16 len,
88 				   const struct hclge_dbg_item *items,
89 				   const char **result, u16 size)
90 {
91 	char *pos = content;
92 	u16 i;
93 
94 	memset(content, ' ', len);
95 	for (i = 0; i < size; i++) {
96 		if (result)
97 			strncpy(pos, result[i], strlen(result[i]));
98 		else
99 			strncpy(pos, items[i].name, strlen(items[i].name));
100 		pos += strlen(items[i].name) + items[i].interval;
101 	}
102 	*pos++ = '\n';
103 	*pos++ = '\0';
104 }
105 
106 static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
107 {
108 	if (id)
109 		sprintf(buf, "vf%u", id - 1U);
110 	else
111 		sprintf(buf, "pf");
112 
113 	return buf;
114 }
115 
116 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset,
117 				    u32 *bd_num)
118 {
119 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
120 	int entries_per_desc;
121 	int index;
122 	int ret;
123 
124 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
125 	if (ret) {
126 		dev_err(&hdev->pdev->dev,
127 			"failed to get dfx bd_num, offset = %d, ret = %d\n",
128 			offset, ret);
129 		return ret;
130 	}
131 
132 	entries_per_desc = ARRAY_SIZE(desc[0].data);
133 	index = offset % entries_per_desc;
134 
135 	*bd_num = le32_to_cpu(desc[offset / entries_per_desc].data[index]);
136 	if (!(*bd_num)) {
137 		dev_err(&hdev->pdev->dev, "The value of dfx bd_num is 0!\n");
138 		return -EINVAL;
139 	}
140 
141 	return 0;
142 }
143 
144 static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
145 			      struct hclge_desc *desc_src,
146 			      int index, int bd_num,
147 			      enum hclge_opcode_type cmd)
148 {
149 	struct hclge_desc *desc = desc_src;
150 	int ret, i;
151 
152 	hclge_cmd_setup_basic_desc(desc, cmd, true);
153 	desc->data[0] = cpu_to_le32(index);
154 
155 	for (i = 1; i < bd_num; i++) {
156 		desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
157 		desc++;
158 		hclge_cmd_setup_basic_desc(desc, cmd, true);
159 	}
160 
161 	ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
162 	if (ret)
163 		dev_err(&hdev->pdev->dev,
164 			"cmd(0x%x) send fail, ret = %d\n", cmd, ret);
165 	return ret;
166 }
167 
168 static int
169 hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
170 		       const struct hclge_dbg_reg_type_info *reg_info,
171 		       char *buf, int len, int *pos)
172 {
173 	const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
174 	const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
175 	struct hclge_desc *desc_src;
176 	u32 index, entry, i, cnt;
177 	int bd_num, min_num, ret;
178 	struct hclge_desc *desc;
179 
180 	ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
181 	if (ret)
182 		return ret;
183 
184 	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
185 	if (!desc_src)
186 		return -ENOMEM;
187 
188 	min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
189 
190 	for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
191 		*pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n",
192 				  cnt++, dfx_message->message);
193 
194 	for (i = 0; i < cnt; i++)
195 		*pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i);
196 
197 	*pos += scnprintf(buf + *pos, len - *pos, "\n");
198 
199 	for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
200 		dfx_message = reg_info->dfx_msg;
201 		desc = desc_src;
202 		ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num,
203 					 reg_msg->cmd);
204 		if (ret)
205 			break;
206 
207 		for (i = 0; i < min_num; i++, dfx_message++) {
208 			entry = i % HCLGE_DESC_DATA_LEN;
209 			if (i > 0 && !entry)
210 				desc++;
211 
212 			*pos += scnprintf(buf + *pos, len - *pos, "%#x\t",
213 					  le32_to_cpu(desc->data[entry]));
214 		}
215 		*pos += scnprintf(buf + *pos, len - *pos, "\n");
216 	}
217 
218 	kfree(desc_src);
219 	return ret;
220 }
221 
222 static int
223 hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
224 			  const struct hclge_dbg_reg_type_info *reg_info,
225 			  char *buf, int len, int *pos)
226 {
227 	const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
228 	const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
229 	struct hclge_desc *desc_src;
230 	int bd_num, min_num, ret;
231 	struct hclge_desc *desc;
232 	u32 entry, i;
233 
234 	ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
235 	if (ret)
236 		return ret;
237 
238 	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
239 	if (!desc_src)
240 		return -ENOMEM;
241 
242 	desc = desc_src;
243 
244 	ret = hclge_dbg_cmd_send(hdev, desc, 0, bd_num, reg_msg->cmd);
245 	if (ret) {
246 		kfree(desc);
247 		return ret;
248 	}
249 
250 	min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
251 
252 	for (i = 0; i < min_num; i++, dfx_message++) {
253 		entry = i % HCLGE_DESC_DATA_LEN;
254 		if (i > 0 && !entry)
255 			desc++;
256 		if (!dfx_message->flag)
257 			continue;
258 
259 		*pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
260 				  dfx_message->message,
261 				  le32_to_cpu(desc->data[entry]));
262 	}
263 
264 	kfree(desc_src);
265 	return 0;
266 }
267 
268 static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
269 	{HCLGE_MAC_TX_EN_B,  "mac_trans_en"},
270 	{HCLGE_MAC_RX_EN_B,  "mac_rcv_en"},
271 	{HCLGE_MAC_PAD_TX_B, "pad_trans_en"},
272 	{HCLGE_MAC_PAD_RX_B, "pad_rcv_en"},
273 	{HCLGE_MAC_1588_TX_B, "1588_trans_en"},
274 	{HCLGE_MAC_1588_RX_B, "1588_rcv_en"},
275 	{HCLGE_MAC_APP_LP_B,  "mac_app_loop_en"},
276 	{HCLGE_MAC_LINE_LP_B, "mac_line_loop_en"},
277 	{HCLGE_MAC_FCS_TX_B,  "mac_fcs_tx_en"},
278 	{HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, "mac_rx_oversize_truncate_en"},
279 	{HCLGE_MAC_RX_FCS_STRIP_B, "mac_rx_fcs_strip_en"},
280 	{HCLGE_MAC_RX_FCS_B, "mac_rx_fcs_en"},
281 	{HCLGE_MAC_TX_UNDER_MIN_ERR_B, "mac_tx_under_min_err_en"},
282 	{HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
283 };
284 
285 static int  hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
286 					     int len, int *pos)
287 {
288 	struct hclge_config_mac_mode_cmd *req;
289 	struct hclge_desc desc;
290 	u32 loop_en, i, offset;
291 	int ret;
292 
293 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
294 
295 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
296 	if (ret) {
297 		dev_err(&hdev->pdev->dev,
298 			"failed to dump mac enable status, ret = %d\n", ret);
299 		return ret;
300 	}
301 
302 	req = (struct hclge_config_mac_mode_cmd *)desc.data;
303 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
304 
305 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
306 		offset = hclge_dbg_mac_en_status[i].offset;
307 		*pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
308 				  hclge_dbg_mac_en_status[i].message,
309 				  hnae3_get_bit(loop_en, offset));
310 	}
311 
312 	return 0;
313 }
314 
315 static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
316 					 int len, int *pos)
317 {
318 	struct hclge_config_max_frm_size_cmd *req;
319 	struct hclge_desc desc;
320 	int ret;
321 
322 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
323 
324 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
325 	if (ret) {
326 		dev_err(&hdev->pdev->dev,
327 			"failed to dump mac frame size, ret = %d\n", ret);
328 		return ret;
329 	}
330 
331 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
332 
333 	*pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n",
334 			  le16_to_cpu(req->max_frm_size));
335 	*pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n",
336 			  req->min_frm_size);
337 
338 	return 0;
339 }
340 
341 static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
342 					   int len, int *pos)
343 {
344 #define HCLGE_MAC_SPEED_SHIFT	0
345 #define HCLGE_MAC_SPEED_MASK	GENMASK(5, 0)
346 #define HCLGE_MAC_DUPLEX_SHIFT	7
347 
348 	struct hclge_config_mac_speed_dup_cmd *req;
349 	struct hclge_desc desc;
350 	int ret;
351 
352 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
353 
354 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
355 	if (ret) {
356 		dev_err(&hdev->pdev->dev,
357 			"failed to dump mac speed duplex, ret = %d\n", ret);
358 		return ret;
359 	}
360 
361 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
362 
363 	*pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n",
364 			  hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
365 					  HCLGE_MAC_SPEED_SHIFT));
366 	*pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n",
367 			  hnae3_get_bit(req->speed_dup,
368 					HCLGE_MAC_DUPLEX_SHIFT));
369 	return 0;
370 }
371 
372 static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
373 {
374 	int pos = 0;
375 	int ret;
376 
377 	ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos);
378 	if (ret)
379 		return ret;
380 
381 	ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos);
382 	if (ret)
383 		return ret;
384 
385 	return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos);
386 }
387 
388 static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
389 				   int *pos)
390 {
391 	struct hclge_dbg_bitmap_cmd req;
392 	struct hclge_desc desc;
393 	u16 qset_id, qset_num;
394 	int ret;
395 
396 	ret = hclge_tm_get_qset_num(hdev, &qset_num);
397 	if (ret)
398 		return ret;
399 
400 	*pos += scnprintf(buf + *pos, len - *pos,
401 			  "qset_id  roce_qset_mask  nic_qset_mask  qset_shaping_pass  qset_bp_status\n");
402 	for (qset_id = 0; qset_id < qset_num; qset_id++) {
403 		ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
404 					 HCLGE_OPC_QSET_DFX_STS);
405 		if (ret)
406 			return ret;
407 
408 		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
409 
410 		*pos += scnprintf(buf + *pos, len - *pos,
411 				  "%04u           %#x            %#x             %#x               %#x\n",
412 				  qset_id, req.bit0, req.bit1, req.bit2,
413 				  req.bit3);
414 	}
415 
416 	return 0;
417 }
418 
419 static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
420 				  int *pos)
421 {
422 	struct hclge_dbg_bitmap_cmd req;
423 	struct hclge_desc desc;
424 	u8 pri_id, pri_num;
425 	int ret;
426 
427 	ret = hclge_tm_get_pri_num(hdev, &pri_num);
428 	if (ret)
429 		return ret;
430 
431 	*pos += scnprintf(buf + *pos, len - *pos,
432 			  "pri_id  pri_mask  pri_cshaping_pass  pri_pshaping_pass\n");
433 	for (pri_id = 0; pri_id < pri_num; pri_id++) {
434 		ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
435 					 HCLGE_OPC_PRI_DFX_STS);
436 		if (ret)
437 			return ret;
438 
439 		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
440 
441 		*pos += scnprintf(buf + *pos, len - *pos,
442 				  "%03u       %#x           %#x                %#x\n",
443 				  pri_id, req.bit0, req.bit1, req.bit2);
444 	}
445 
446 	return 0;
447 }
448 
449 static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
450 				 int *pos)
451 {
452 	struct hclge_dbg_bitmap_cmd req;
453 	struct hclge_desc desc;
454 	u8 pg_id;
455 	int ret;
456 
457 	*pos += scnprintf(buf + *pos, len - *pos,
458 			  "pg_id  pg_mask  pg_cshaping_pass  pg_pshaping_pass\n");
459 	for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
460 		ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
461 					 HCLGE_OPC_PG_DFX_STS);
462 		if (ret)
463 			return ret;
464 
465 		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
466 
467 		*pos += scnprintf(buf + *pos, len - *pos,
468 				  "%03u      %#x           %#x               %#x\n",
469 				  pg_id, req.bit0, req.bit1, req.bit2);
470 	}
471 
472 	return 0;
473 }
474 
475 static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
476 				    int *pos)
477 {
478 	struct hclge_desc desc;
479 	u16 nq_id;
480 	int ret;
481 
482 	*pos += scnprintf(buf + *pos, len - *pos,
483 			  "nq_id  sch_nic_queue_cnt  sch_roce_queue_cnt\n");
484 	for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
485 		ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
486 					 HCLGE_OPC_SCH_NQ_CNT);
487 		if (ret)
488 			return ret;
489 
490 		*pos += scnprintf(buf + *pos, len - *pos, "%04u           %#x",
491 				  nq_id, le32_to_cpu(desc.data[1]));
492 
493 		ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
494 					 HCLGE_OPC_SCH_RQ_CNT);
495 		if (ret)
496 			return ret;
497 
498 		*pos += scnprintf(buf + *pos, len - *pos,
499 				  "               %#x\n",
500 				  le32_to_cpu(desc.data[1]));
501 	}
502 
503 	return 0;
504 }
505 
506 static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
507 				   int *pos)
508 {
509 	struct hclge_dbg_bitmap_cmd req;
510 	struct hclge_desc desc;
511 	u8 port_id = 0;
512 	int ret;
513 
514 	ret = hclge_dbg_cmd_send(hdev, &desc, port_id, 1,
515 				 HCLGE_OPC_PORT_DFX_STS);
516 	if (ret)
517 		return ret;
518 
519 	req.bitmap = (u8)le32_to_cpu(desc.data[1]);
520 
521 	*pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
522 			 req.bit0);
523 	*pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
524 			 req.bit1);
525 
526 	return 0;
527 }
528 
529 static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
530 				 int *pos)
531 {
532 	struct hclge_desc desc[2];
533 	u8 port_id = 0;
534 	int ret;
535 
536 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
537 				 HCLGE_OPC_TM_INTERNAL_CNT);
538 	if (ret)
539 		return ret;
540 
541 	*pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n",
542 			  le32_to_cpu(desc[0].data[1]));
543 	*pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n",
544 			  le32_to_cpu(desc[0].data[2]));
545 
546 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
547 				 HCLGE_OPC_TM_INTERNAL_STS);
548 	if (ret)
549 		return ret;
550 
551 	*pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n",
552 			  le32_to_cpu(desc[0].data[1]));
553 	*pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n",
554 			  le32_to_cpu(desc[0].data[2]));
555 	*pos += scnprintf(buf + *pos, len - *pos,
556 			  "sch_roce_fifo_afull_gap: %#x\n",
557 			  le32_to_cpu(desc[0].data[3]));
558 	*pos += scnprintf(buf + *pos, len - *pos,
559 			  "tx_private_waterline: %#x\n",
560 			  le32_to_cpu(desc[0].data[4]));
561 	*pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n",
562 			  le32_to_cpu(desc[0].data[5]));
563 	*pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n",
564 			  le32_to_cpu(desc[1].data[0]));
565 	*pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n",
566 			  le32_to_cpu(desc[1].data[1]));
567 
568 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
569 		return 0;
570 
571 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
572 				 HCLGE_OPC_TM_INTERNAL_STS_1);
573 	if (ret)
574 		return ret;
575 
576 	*pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n",
577 			  le32_to_cpu(desc[0].data[1]));
578 	*pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n",
579 			  le32_to_cpu(desc[0].data[2]));
580 	*pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n",
581 			  le32_to_cpu(desc[0].data[3]));
582 	*pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n",
583 			  le32_to_cpu(desc[0].data[4]));
584 	*pos += scnprintf(buf + *pos, len - *pos,
585 			  "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
586 			  le32_to_cpu(desc[0].data[5]));
587 
588 	return 0;
589 }
590 
591 static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len)
592 {
593 	int pos = 0;
594 	int ret;
595 
596 	ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos);
597 	if (ret)
598 		return ret;
599 
600 	ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos);
601 	if (ret)
602 		return ret;
603 
604 	ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos);
605 	if (ret)
606 		return ret;
607 
608 	ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos);
609 	if (ret)
610 		return ret;
611 
612 	ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos);
613 	if (ret)
614 		return ret;
615 
616 	return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos);
617 }
618 
619 static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
620 				  enum hnae3_dbg_cmd cmd, char *buf, int len)
621 {
622 	const struct hclge_dbg_reg_type_info *reg_info;
623 	int pos = 0, ret = 0;
624 	int i;
625 
626 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
627 		reg_info = &hclge_dbg_reg_info[i];
628 		if (cmd == reg_info->cmd) {
629 			if (cmd == HNAE3_DBG_CMD_REG_TQP)
630 				return hclge_dbg_dump_reg_tqp(hdev, reg_info,
631 							      buf, len, &pos);
632 
633 			ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf,
634 							len, &pos);
635 			if (ret)
636 				break;
637 		}
638 	}
639 
640 	return ret;
641 }
642 
643 static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
644 {
645 	struct hclge_ets_tc_weight_cmd *ets_weight;
646 	struct hclge_desc desc;
647 	char *sch_mode_str;
648 	int pos = 0;
649 	int ret;
650 	u8 i;
651 
652 	if (!hnae3_dev_dcb_supported(hdev)) {
653 		dev_err(&hdev->pdev->dev,
654 			"Only DCB-supported dev supports tc\n");
655 		return -EOPNOTSUPP;
656 	}
657 
658 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
659 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
660 	if (ret) {
661 		dev_err(&hdev->pdev->dev, "failed to get tc weight, ret = %d\n",
662 			ret);
663 		return ret;
664 	}
665 
666 	ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
667 
668 	pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n",
669 			 hdev->tm_info.num_tc);
670 	pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n",
671 			 ets_weight->weight_offset);
672 
673 	pos += scnprintf(buf + pos, len - pos, "TC    MODE  WEIGHT\n");
674 	for (i = 0; i < HNAE3_MAX_TC; i++) {
675 		sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
676 		pos += scnprintf(buf + pos, len - pos, "%u     %4s    %3u\n",
677 				 i, sch_mode_str,
678 				 hdev->tm_info.pg_info[0].tc_dwrr[i]);
679 	}
680 
681 	return 0;
682 }
683 
684 static const struct hclge_dbg_item tm_pg_items[] = {
685 	{ "ID", 2 },
686 	{ "PRI_MAP", 2 },
687 	{ "MODE", 2 },
688 	{ "DWRR", 2 },
689 	{ "C_IR_B", 2 },
690 	{ "C_IR_U", 2 },
691 	{ "C_IR_S", 2 },
692 	{ "C_BS_B", 2 },
693 	{ "C_BS_S", 2 },
694 	{ "C_FLAG", 2 },
695 	{ "C_RATE(Mbps)", 2 },
696 	{ "P_IR_B", 2 },
697 	{ "P_IR_U", 2 },
698 	{ "P_IR_S", 2 },
699 	{ "P_BS_B", 2 },
700 	{ "P_BS_S", 2 },
701 	{ "P_FLAG", 2 },
702 	{ "P_RATE(Mbps)", 0 }
703 };
704 
705 static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
706 					  char **result, u8 *index)
707 {
708 	sprintf(result[(*index)++], "%3u", para->ir_b);
709 	sprintf(result[(*index)++], "%3u", para->ir_u);
710 	sprintf(result[(*index)++], "%3u", para->ir_s);
711 	sprintf(result[(*index)++], "%3u", para->bs_b);
712 	sprintf(result[(*index)++], "%3u", para->bs_s);
713 	sprintf(result[(*index)++], "%3u", para->flag);
714 	sprintf(result[(*index)++], "%6u", para->rate);
715 }
716 
717 static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
718 				  char *buf, int len)
719 {
720 	struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
721 	char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
722 	u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
723 	char content[HCLGE_DBG_TM_INFO_LEN];
724 	int pos = 0;
725 	int ret;
726 
727 	for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
728 		result[i] = data_str;
729 		data_str += HCLGE_DBG_DATA_STR_LEN;
730 	}
731 
732 	hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
733 			       NULL, ARRAY_SIZE(tm_pg_items));
734 	pos += scnprintf(buf + pos, len - pos, "%s", content);
735 
736 	for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
737 		ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
738 		if (ret)
739 			return ret;
740 
741 		ret = hclge_tm_get_pg_sch_mode(hdev, pg_id, &sch_mode);
742 		if (ret)
743 			return ret;
744 
745 		ret = hclge_tm_get_pg_weight(hdev, pg_id, &weight);
746 		if (ret)
747 			return ret;
748 
749 		ret = hclge_tm_get_pg_shaper(hdev, pg_id,
750 					     HCLGE_OPC_TM_PG_C_SHAPPING,
751 					     &c_shaper_para);
752 		if (ret)
753 			return ret;
754 
755 		ret = hclge_tm_get_pg_shaper(hdev, pg_id,
756 					     HCLGE_OPC_TM_PG_P_SHAPPING,
757 					     &p_shaper_para);
758 		if (ret)
759 			return ret;
760 
761 		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
762 				       "sp";
763 
764 		j = 0;
765 		sprintf(result[j++], "%02u", pg_id);
766 		sprintf(result[j++], "0x%02x", pri_bit_map);
767 		sprintf(result[j++], "%4s", sch_mode_str);
768 		sprintf(result[j++], "%3u", weight);
769 		hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
770 		hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
771 
772 		hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
773 				       (const char **)result,
774 				       ARRAY_SIZE(tm_pg_items));
775 		pos += scnprintf(buf + pos, len - pos, "%s", content);
776 	}
777 
778 	return 0;
779 }
780 
781 static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
782 {
783 	char *data_str;
784 	int ret;
785 
786 	data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
787 			   HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
788 	if (!data_str)
789 		return -ENOMEM;
790 
791 	ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
792 
793 	kfree(data_str);
794 
795 	return ret;
796 }
797 
798 static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev,  char *buf, int len)
799 {
800 	struct hclge_tm_shaper_para shaper_para;
801 	int pos = 0;
802 	int ret;
803 
804 	ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
805 	if (ret)
806 		return ret;
807 
808 	pos += scnprintf(buf + pos, len - pos,
809 			 "IR_B  IR_U  IR_S  BS_B  BS_S  FLAG  RATE(Mbps)\n");
810 	pos += scnprintf(buf + pos, len - pos,
811 			 "%3u   %3u   %3u   %3u   %3u     %1u   %6u\n",
812 			 shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
813 			 shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
814 			 shaper_para.rate);
815 
816 	return 0;
817 }
818 
819 static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
820 					 char *buf, int len)
821 {
822 	u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
823 	struct hclge_bp_to_qs_map_cmd *map;
824 	struct hclge_desc desc;
825 	int pos = 0;
826 	u8 group_id;
827 	u8 grp_num;
828 	u16 i = 0;
829 	int ret;
830 
831 	grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ?
832 		  HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM;
833 	map = (struct hclge_bp_to_qs_map_cmd *)desc.data;
834 	for (group_id = 0; group_id < grp_num; group_id++) {
835 		hclge_cmd_setup_basic_desc(&desc,
836 					   HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
837 					   true);
838 		map->tc_id = tc_id;
839 		map->qs_group_id = group_id;
840 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
841 		if (ret) {
842 			dev_err(&hdev->pdev->dev,
843 				"failed to get bp to qset map, ret = %d\n",
844 				ret);
845 			return ret;
846 		}
847 
848 		qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
849 	}
850 
851 	pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n");
852 	for (group_id = 0; group_id < grp_num / 8; group_id++) {
853 		pos += scnprintf(buf + pos, len - pos,
854 			 "%04d  | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
855 			 group_id * 256, qset_mapping[i + 7],
856 			 qset_mapping[i + 6], qset_mapping[i + 5],
857 			 qset_mapping[i + 4], qset_mapping[i + 3],
858 			 qset_mapping[i + 2], qset_mapping[i + 1],
859 			 qset_mapping[i]);
860 		i += 8;
861 	}
862 
863 	return pos;
864 }
865 
866 static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
867 {
868 	u16 queue_id;
869 	u16 qset_id;
870 	u8 link_vld;
871 	int pos = 0;
872 	u8 pri_id;
873 	u8 tc_id;
874 	int ret;
875 
876 	for (queue_id = 0; queue_id < hdev->num_tqps; queue_id++) {
877 		ret = hclge_tm_get_q_to_qs_map(hdev, queue_id, &qset_id);
878 		if (ret)
879 			return ret;
880 
881 		ret = hclge_tm_get_qset_map_pri(hdev, qset_id, &pri_id,
882 						&link_vld);
883 		if (ret)
884 			return ret;
885 
886 		ret = hclge_tm_get_q_to_tc(hdev, queue_id, &tc_id);
887 		if (ret)
888 			return ret;
889 
890 		pos += scnprintf(buf + pos, len - pos,
891 				 "QUEUE_ID   QSET_ID   PRI_ID   TC_ID\n");
892 		pos += scnprintf(buf + pos, len - pos,
893 				 "%04u        %4u       %3u      %2u\n",
894 				 queue_id, qset_id, pri_id, tc_id);
895 
896 		if (!hnae3_dev_dcb_supported(hdev))
897 			continue;
898 
899 		ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos,
900 						    len - pos);
901 		if (ret < 0)
902 			return ret;
903 		pos += ret;
904 
905 		pos += scnprintf(buf + pos, len - pos, "\n");
906 	}
907 
908 	return 0;
909 }
910 
911 static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
912 {
913 	struct hclge_tm_nodes_cmd *nodes;
914 	struct hclge_desc desc;
915 	int pos = 0;
916 	int ret;
917 
918 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
919 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
920 	if (ret) {
921 		dev_err(&hdev->pdev->dev,
922 			"failed to dump tm nodes, ret = %d\n", ret);
923 		return ret;
924 	}
925 
926 	nodes = (struct hclge_tm_nodes_cmd *)desc.data;
927 
928 	pos += scnprintf(buf + pos, len - pos, "       BASE_ID  MAX_NUM\n");
929 	pos += scnprintf(buf + pos, len - pos, "PG      %4u      %4u\n",
930 			 nodes->pg_base_id, nodes->pg_num);
931 	pos += scnprintf(buf + pos, len - pos, "PRI     %4u      %4u\n",
932 			 nodes->pri_base_id, nodes->pri_num);
933 	pos += scnprintf(buf + pos, len - pos, "QSET    %4u      %4u\n",
934 			 le16_to_cpu(nodes->qset_base_id),
935 			 le16_to_cpu(nodes->qset_num));
936 	pos += scnprintf(buf + pos, len - pos, "QUEUE   %4u      %4u\n",
937 			 le16_to_cpu(nodes->queue_base_id),
938 			 le16_to_cpu(nodes->queue_num));
939 
940 	return 0;
941 }
942 
943 static const struct hclge_dbg_item tm_pri_items[] = {
944 	{ "ID", 4 },
945 	{ "MODE", 2 },
946 	{ "DWRR", 2 },
947 	{ "C_IR_B", 2 },
948 	{ "C_IR_U", 2 },
949 	{ "C_IR_S", 2 },
950 	{ "C_BS_B", 2 },
951 	{ "C_BS_S", 2 },
952 	{ "C_FLAG", 2 },
953 	{ "C_RATE(Mbps)", 2 },
954 	{ "P_IR_B", 2 },
955 	{ "P_IR_U", 2 },
956 	{ "P_IR_S", 2 },
957 	{ "P_BS_B", 2 },
958 	{ "P_BS_S", 2 },
959 	{ "P_FLAG", 2 },
960 	{ "P_RATE(Mbps)", 0 }
961 };
962 
963 static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
964 {
965 	char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN];
966 	struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
967 	char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
968 	char content[HCLGE_DBG_TM_INFO_LEN];
969 	u8 pri_num, sch_mode, weight, i, j;
970 	int pos, ret;
971 
972 	ret = hclge_tm_get_pri_num(hdev, &pri_num);
973 	if (ret)
974 		return ret;
975 
976 	for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
977 		result[i] = &data_str[i][0];
978 
979 	hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
980 			       NULL, ARRAY_SIZE(tm_pri_items));
981 	pos = scnprintf(buf, len, "%s", content);
982 
983 	for (i = 0; i < pri_num; i++) {
984 		ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
985 		if (ret)
986 			return ret;
987 
988 		ret = hclge_tm_get_pri_weight(hdev, i, &weight);
989 		if (ret)
990 			return ret;
991 
992 		ret = hclge_tm_get_pri_shaper(hdev, i,
993 					      HCLGE_OPC_TM_PRI_C_SHAPPING,
994 					      &c_shaper_para);
995 		if (ret)
996 			return ret;
997 
998 		ret = hclge_tm_get_pri_shaper(hdev, i,
999 					      HCLGE_OPC_TM_PRI_P_SHAPPING,
1000 					      &p_shaper_para);
1001 		if (ret)
1002 			return ret;
1003 
1004 		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1005 			       "sp";
1006 
1007 		j = 0;
1008 		sprintf(result[j++], "%04u", i);
1009 		sprintf(result[j++], "%4s", sch_mode_str);
1010 		sprintf(result[j++], "%3u", weight);
1011 		hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
1012 		hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
1013 		hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
1014 				       (const char **)result,
1015 				       ARRAY_SIZE(tm_pri_items));
1016 		pos += scnprintf(buf + pos, len - pos, "%s", content);
1017 	}
1018 
1019 	return 0;
1020 }
1021 
1022 static const struct hclge_dbg_item tm_qset_items[] = {
1023 	{ "ID", 4 },
1024 	{ "MAP_PRI", 2 },
1025 	{ "LINK_VLD", 2 },
1026 	{ "MODE", 2 },
1027 	{ "DWRR", 2 },
1028 	{ "IR_B", 2 },
1029 	{ "IR_U", 2 },
1030 	{ "IR_S", 2 },
1031 	{ "BS_B", 2 },
1032 	{ "BS_S", 2 },
1033 	{ "FLAG", 2 },
1034 	{ "RATE(Mbps)", 0 }
1035 };
1036 
1037 static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
1038 {
1039 	char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN];
1040 	char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str;
1041 	u8 priority, link_vld, sch_mode, weight;
1042 	struct hclge_tm_shaper_para shaper_para;
1043 	char content[HCLGE_DBG_TM_INFO_LEN];
1044 	u16 qset_num, i;
1045 	int ret, pos;
1046 	u8 j;
1047 
1048 	ret = hclge_tm_get_qset_num(hdev, &qset_num);
1049 	if (ret)
1050 		return ret;
1051 
1052 	for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++)
1053 		result[i] = &data_str[i][0];
1054 
1055 	hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1056 			       NULL, ARRAY_SIZE(tm_qset_items));
1057 	pos = scnprintf(buf, len, "%s", content);
1058 
1059 	for (i = 0; i < qset_num; i++) {
1060 		ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
1061 		if (ret)
1062 			return ret;
1063 
1064 		ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode);
1065 		if (ret)
1066 			return ret;
1067 
1068 		ret = hclge_tm_get_qset_weight(hdev, i, &weight);
1069 		if (ret)
1070 			return ret;
1071 
1072 		ret = hclge_tm_get_qset_shaper(hdev, i, &shaper_para);
1073 		if (ret)
1074 			return ret;
1075 
1076 		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1077 			       "sp";
1078 
1079 		j = 0;
1080 		sprintf(result[j++], "%04u", i);
1081 		sprintf(result[j++], "%4u", priority);
1082 		sprintf(result[j++], "%4u", link_vld);
1083 		sprintf(result[j++], "%4s", sch_mode_str);
1084 		sprintf(result[j++], "%3u", weight);
1085 		hclge_dbg_fill_shaper_content(&shaper_para, result, &j);
1086 
1087 		hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1088 				       (const char **)result,
1089 				       ARRAY_SIZE(tm_qset_items));
1090 		pos += scnprintf(buf + pos, len - pos, "%s", content);
1091 	}
1092 
1093 	return 0;
1094 }
1095 
1096 static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
1097 					int len)
1098 {
1099 	struct hclge_cfg_pause_param_cmd *pause_param;
1100 	struct hclge_desc desc;
1101 	int pos = 0;
1102 	int ret;
1103 
1104 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
1105 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1106 	if (ret) {
1107 		dev_err(&hdev->pdev->dev,
1108 			"failed to dump qos pause, ret = %d\n", ret);
1109 		return ret;
1110 	}
1111 
1112 	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
1113 
1114 	pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n",
1115 			 pause_param->pause_trans_gap);
1116 	pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n",
1117 			 le16_to_cpu(pause_param->pause_trans_time));
1118 	return 0;
1119 }
1120 
1121 #define HCLGE_DBG_TC_MASK		0x0F
1122 
1123 static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
1124 				      int len)
1125 {
1126 #define HCLGE_DBG_TC_BIT_WIDTH		4
1127 
1128 	struct hclge_qos_pri_map_cmd *pri_map;
1129 	struct hclge_desc desc;
1130 	int pos = 0;
1131 	u8 *pri_tc;
1132 	u8 tc, i;
1133 	int ret;
1134 
1135 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
1136 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1137 	if (ret) {
1138 		dev_err(&hdev->pdev->dev,
1139 			"failed to dump qos pri map, ret = %d\n", ret);
1140 		return ret;
1141 	}
1142 
1143 	pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
1144 
1145 	pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n",
1146 			 pri_map->vlan_pri);
1147 	pos += scnprintf(buf + pos, len - pos, "PRI  TC\n");
1148 
1149 	pri_tc = (u8 *)pri_map;
1150 	for (i = 0; i < HNAE3_MAX_TC; i++) {
1151 		tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
1152 		tc &= HCLGE_DBG_TC_MASK;
1153 		pos += scnprintf(buf + pos, len - pos, "%u     %u\n", i, tc);
1154 	}
1155 
1156 	return 0;
1157 }
1158 
1159 static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
1160 				       int len)
1161 {
1162 	struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo;
1163 	struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
1164 	u8 *req0 = (u8 *)desc[0].data;
1165 	u8 *req1 = (u8 *)desc[1].data;
1166 	u8 dscp_tc[HNAE3_MAX_DSCP];
1167 	int pos, ret;
1168 	u8 i, j;
1169 
1170 	pos = scnprintf(buf, len, "tc map mode: %s\n",
1171 			tc_map_mode_str[kinfo->tc_map_mode]);
1172 
1173 	if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
1174 		return 0;
1175 
1176 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, true);
1177 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1178 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, true);
1179 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
1180 	if (ret) {
1181 		dev_err(&hdev->pdev->dev,
1182 			"failed to dump qos dscp map, ret = %d\n", ret);
1183 		return ret;
1184 	}
1185 
1186 	pos += scnprintf(buf + pos, len - pos, "\nDSCP  PRIO  TC\n");
1187 
1188 	/* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
1189 	for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
1190 		j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
1191 		/* Each dscp setting has 4 bits, so each byte saves two dscp
1192 		 * setting
1193 		 */
1194 		dscp_tc[i] = req0[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
1195 		dscp_tc[j] = req1[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
1196 		dscp_tc[i] &= HCLGE_DBG_TC_MASK;
1197 		dscp_tc[j] &= HCLGE_DBG_TC_MASK;
1198 	}
1199 
1200 	for (i = 0; i < HNAE3_MAX_DSCP; i++) {
1201 		if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID)
1202 			continue;
1203 
1204 		pos += scnprintf(buf + pos, len - pos, " %2u    %u    %u\n",
1205 				 i, kinfo->dscp_prio[i], dscp_tc[i]);
1206 	}
1207 
1208 	return 0;
1209 }
1210 
1211 static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
1212 {
1213 	struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
1214 	struct hclge_desc desc;
1215 	int pos = 0;
1216 	int i, ret;
1217 
1218 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
1219 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1220 	if (ret) {
1221 		dev_err(&hdev->pdev->dev,
1222 			"failed to dump tx buf, ret = %d\n", ret);
1223 		return ret;
1224 	}
1225 
1226 	tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1227 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1228 		pos += scnprintf(buf + pos, len - pos,
1229 				 "tx_packet_buf_tc_%d: 0x%x\n", i,
1230 				 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
1231 
1232 	return pos;
1233 }
1234 
1235 static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
1236 					  int len)
1237 {
1238 	struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
1239 	struct hclge_desc desc;
1240 	int pos = 0;
1241 	int i, ret;
1242 
1243 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
1244 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1245 	if (ret) {
1246 		dev_err(&hdev->pdev->dev,
1247 			"failed to dump rx priv buf, ret = %d\n", ret);
1248 		return ret;
1249 	}
1250 
1251 	pos += scnprintf(buf + pos, len - pos, "\n");
1252 
1253 	rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
1254 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1255 		pos += scnprintf(buf + pos, len - pos,
1256 				 "rx_packet_buf_tc_%d: 0x%x\n", i,
1257 				 le16_to_cpu(rx_buf_cmd->buf_num[i]));
1258 
1259 	pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n",
1260 			 le16_to_cpu(rx_buf_cmd->shared_buf));
1261 
1262 	return pos;
1263 }
1264 
1265 static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
1266 					   int len)
1267 {
1268 	struct hclge_rx_com_wl *rx_com_wl;
1269 	struct hclge_desc desc;
1270 	int pos = 0;
1271 	int ret;
1272 
1273 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
1274 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1275 	if (ret) {
1276 		dev_err(&hdev->pdev->dev,
1277 			"failed to dump rx common wl, ret = %d\n", ret);
1278 		return ret;
1279 	}
1280 
1281 	rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
1282 	pos += scnprintf(buf + pos, len - pos, "\n");
1283 	pos += scnprintf(buf + pos, len - pos,
1284 			 "rx_com_wl: high: 0x%x, low: 0x%x\n",
1285 			 le16_to_cpu(rx_com_wl->com_wl.high),
1286 			 le16_to_cpu(rx_com_wl->com_wl.low));
1287 
1288 	return pos;
1289 }
1290 
1291 static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
1292 					    int len)
1293 {
1294 	struct hclge_rx_com_wl *rx_packet_cnt;
1295 	struct hclge_desc desc;
1296 	int pos = 0;
1297 	int ret;
1298 
1299 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
1300 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1301 	if (ret) {
1302 		dev_err(&hdev->pdev->dev,
1303 			"failed to dump rx global pkt cnt, ret = %d\n", ret);
1304 		return ret;
1305 	}
1306 
1307 	rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
1308 	pos += scnprintf(buf + pos, len - pos,
1309 			 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
1310 			 le16_to_cpu(rx_packet_cnt->com_wl.high),
1311 			 le16_to_cpu(rx_packet_cnt->com_wl.low));
1312 
1313 	return pos;
1314 }
1315 
1316 static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
1317 					     int len)
1318 {
1319 	struct hclge_rx_priv_wl_buf *rx_priv_wl;
1320 	struct hclge_desc desc[2];
1321 	int pos = 0;
1322 	int i, ret;
1323 
1324 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1325 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1326 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1327 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1328 	if (ret) {
1329 		dev_err(&hdev->pdev->dev,
1330 			"failed to dump rx priv wl buf, ret = %d\n", ret);
1331 		return ret;
1332 	}
1333 
1334 	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
1335 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1336 		pos += scnprintf(buf + pos, len - pos,
1337 			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
1338 			 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1339 			 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1340 
1341 	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
1342 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1343 		pos += scnprintf(buf + pos, len - pos,
1344 			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
1345 			 i + HCLGE_TC_NUM_ONE_DESC,
1346 			 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1347 			 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1348 
1349 	return pos;
1350 }
1351 
1352 static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
1353 						  char *buf, int len)
1354 {
1355 	struct hclge_rx_com_thrd *rx_com_thrd;
1356 	struct hclge_desc desc[2];
1357 	int pos = 0;
1358 	int i, ret;
1359 
1360 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1361 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1362 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1363 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1364 	if (ret) {
1365 		dev_err(&hdev->pdev->dev,
1366 			"failed to dump rx common threshold, ret = %d\n", ret);
1367 		return ret;
1368 	}
1369 
1370 	pos += scnprintf(buf + pos, len - pos, "\n");
1371 	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
1372 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1373 		pos += scnprintf(buf + pos, len - pos,
1374 			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
1375 			 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1376 			 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1377 
1378 	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
1379 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1380 		pos += scnprintf(buf + pos, len - pos,
1381 			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
1382 			 i + HCLGE_TC_NUM_ONE_DESC,
1383 			 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1384 			 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1385 
1386 	return pos;
1387 }
1388 
1389 static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf,
1390 				      int len)
1391 {
1392 	int pos = 0;
1393 	int ret;
1394 
1395 	ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos);
1396 	if (ret < 0)
1397 		return ret;
1398 	pos += ret;
1399 
1400 	ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos);
1401 	if (ret < 0)
1402 		return ret;
1403 	pos += ret;
1404 
1405 	ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos);
1406 	if (ret < 0)
1407 		return ret;
1408 	pos += ret;
1409 
1410 	ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos);
1411 	if (ret < 0)
1412 		return ret;
1413 	pos += ret;
1414 
1415 	pos += scnprintf(buf + pos, len - pos, "\n");
1416 	if (!hnae3_dev_dcb_supported(hdev))
1417 		return 0;
1418 
1419 	ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos);
1420 	if (ret < 0)
1421 		return ret;
1422 	pos += ret;
1423 
1424 	ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos,
1425 						     len - pos);
1426 	if (ret < 0)
1427 		return ret;
1428 
1429 	return 0;
1430 }
1431 
1432 static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
1433 {
1434 	struct hclge_mac_ethertype_idx_rd_cmd *req0;
1435 	struct hclge_desc desc;
1436 	u32 msg_egress_port;
1437 	int pos = 0;
1438 	int ret, i;
1439 
1440 	pos += scnprintf(buf + pos, len - pos,
1441 			 "entry  mac_addr          mask  ether  ");
1442 	pos += scnprintf(buf + pos, len - pos,
1443 			 "mask  vlan  mask  i_map  i_dir  e_type  ");
1444 	pos += scnprintf(buf + pos, len - pos, "pf_id  vf_id  q_id  drop\n");
1445 
1446 	for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
1447 		hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
1448 					   true);
1449 		req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
1450 		req0->index = cpu_to_le16(i);
1451 
1452 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1453 		if (ret) {
1454 			dev_err(&hdev->pdev->dev,
1455 				"failed to dump manage table, ret = %d\n", ret);
1456 			return ret;
1457 		}
1458 
1459 		if (!req0->resp_code)
1460 			continue;
1461 
1462 		pos += scnprintf(buf + pos, len - pos, "%02u     %pM ",
1463 				 le16_to_cpu(req0->index), req0->mac_addr);
1464 
1465 		pos += scnprintf(buf + pos, len - pos,
1466 				 "%x     %04x   %x     %04x  ",
1467 				 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
1468 				 le16_to_cpu(req0->ethter_type),
1469 				 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
1470 				 le16_to_cpu(req0->vlan_tag) &
1471 				 HCLGE_DBG_MNG_VLAN_TAG);
1472 
1473 		pos += scnprintf(buf + pos, len - pos,
1474 				 "%x     %02x     %02x     ",
1475 				 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
1476 				 req0->i_port_bitmap, req0->i_port_direction);
1477 
1478 		msg_egress_port = le16_to_cpu(req0->egress_port);
1479 		pos += scnprintf(buf + pos, len - pos,
1480 				 "%x       %x      %02x     %04x  %x\n",
1481 				 !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
1482 				 msg_egress_port & HCLGE_DBG_MNG_PF_ID,
1483 				 (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
1484 				 le16_to_cpu(req0->egress_queue),
1485 				 !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
1486 	}
1487 
1488 	return 0;
1489 }
1490 
1491 #define HCLGE_DBG_TCAM_BUF_SIZE 256
1492 
1493 static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
1494 				  char *tcam_buf,
1495 				  struct hclge_dbg_tcam_msg tcam_msg)
1496 {
1497 	struct hclge_fd_tcam_config_1_cmd *req1;
1498 	struct hclge_fd_tcam_config_2_cmd *req2;
1499 	struct hclge_fd_tcam_config_3_cmd *req3;
1500 	struct hclge_desc desc[3];
1501 	int pos = 0;
1502 	int ret, i;
1503 	u32 *req;
1504 
1505 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
1506 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1507 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
1508 	desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1509 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
1510 
1511 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
1512 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
1513 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
1514 
1515 	req1->stage  = tcam_msg.stage;
1516 	req1->xy_sel = sel_x ? 1 : 0;
1517 	req1->index  = cpu_to_le32(tcam_msg.loc);
1518 
1519 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
1520 	if (ret)
1521 		return ret;
1522 
1523 	pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1524 			 "read result tcam key %s(%u):\n", sel_x ? "x" : "y",
1525 			 tcam_msg.loc);
1526 
1527 	/* tcam_data0 ~ tcam_data1 */
1528 	req = (u32 *)req1->tcam_data;
1529 	for (i = 0; i < 2; i++)
1530 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1531 				 "%08x\n", *req++);
1532 
1533 	/* tcam_data2 ~ tcam_data7 */
1534 	req = (u32 *)req2->tcam_data;
1535 	for (i = 0; i < 6; i++)
1536 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1537 				 "%08x\n", *req++);
1538 
1539 	/* tcam_data8 ~ tcam_data12 */
1540 	req = (u32 *)req3->tcam_data;
1541 	for (i = 0; i < 5; i++)
1542 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1543 				 "%08x\n", *req++);
1544 
1545 	return ret;
1546 }
1547 
1548 static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
1549 {
1550 	struct hclge_fd_rule *rule;
1551 	struct hlist_node *node;
1552 	int cnt = 0;
1553 
1554 	spin_lock_bh(&hdev->fd_rule_lock);
1555 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
1556 		rule_locs[cnt] = rule->location;
1557 		cnt++;
1558 	}
1559 	spin_unlock_bh(&hdev->fd_rule_lock);
1560 
1561 	if (cnt != hdev->hclge_fd_rule_num || cnt == 0)
1562 		return -EINVAL;
1563 
1564 	return cnt;
1565 }
1566 
1567 static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
1568 {
1569 	u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
1570 	struct hclge_dbg_tcam_msg tcam_msg;
1571 	int i, ret, rule_cnt;
1572 	u16 *rule_locs;
1573 	char *tcam_buf;
1574 	int pos = 0;
1575 
1576 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
1577 		dev_err(&hdev->pdev->dev,
1578 			"Only FD-supported dev supports dump fd tcam\n");
1579 		return -EOPNOTSUPP;
1580 	}
1581 
1582 	if (!hdev->hclge_fd_rule_num || !rule_num)
1583 		return 0;
1584 
1585 	rule_locs = kcalloc(rule_num, sizeof(u16), GFP_KERNEL);
1586 	if (!rule_locs)
1587 		return -ENOMEM;
1588 
1589 	tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
1590 	if (!tcam_buf) {
1591 		kfree(rule_locs);
1592 		return -ENOMEM;
1593 	}
1594 
1595 	rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
1596 	if (rule_cnt < 0) {
1597 		ret = rule_cnt;
1598 		dev_err(&hdev->pdev->dev,
1599 			"failed to get rule number, ret = %d\n", ret);
1600 		goto out;
1601 	}
1602 
1603 	ret = 0;
1604 	for (i = 0; i < rule_cnt; i++) {
1605 		tcam_msg.stage = HCLGE_FD_STAGE_1;
1606 		tcam_msg.loc = rule_locs[i];
1607 
1608 		ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg);
1609 		if (ret) {
1610 			dev_err(&hdev->pdev->dev,
1611 				"failed to get fd tcam key x, ret = %d\n", ret);
1612 			goto out;
1613 		}
1614 
1615 		pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1616 
1617 		ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg);
1618 		if (ret) {
1619 			dev_err(&hdev->pdev->dev,
1620 				"failed to get fd tcam key y, ret = %d\n", ret);
1621 			goto out;
1622 		}
1623 
1624 		pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1625 	}
1626 
1627 out:
1628 	kfree(tcam_buf);
1629 	kfree(rule_locs);
1630 	return ret;
1631 }
1632 
1633 static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
1634 {
1635 	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
1636 	struct hclge_fd_ad_cnt_read_cmd *req;
1637 	char str_id[HCLGE_DBG_ID_LEN];
1638 	struct hclge_desc desc;
1639 	int pos = 0;
1640 	int ret;
1641 	u64 cnt;
1642 	u8 i;
1643 
1644 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
1645 		return -EOPNOTSUPP;
1646 
1647 	pos += scnprintf(buf + pos, len - pos,
1648 			 "func_id\thit_times\n");
1649 
1650 	for (i = 0; i < func_num; i++) {
1651 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
1652 		req = (struct hclge_fd_ad_cnt_read_cmd *)desc.data;
1653 		req->index = cpu_to_le16(i);
1654 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1655 		if (ret) {
1656 			dev_err(&hdev->pdev->dev, "failed to get fd counter, ret = %d\n",
1657 				ret);
1658 			return ret;
1659 		}
1660 		cnt = le64_to_cpu(req->cnt);
1661 		hclge_dbg_get_func_id_str(str_id, i);
1662 		pos += scnprintf(buf + pos, len - pos,
1663 				 "%s\t%llu\n", str_id, cnt);
1664 	}
1665 
1666 	return 0;
1667 }
1668 
1669 static const struct hclge_dbg_status_dfx_info hclge_dbg_rst_info[] = {
1670 	{HCLGE_MISC_VECTOR_REG_BASE, "vector0 interrupt enable status"},
1671 	{HCLGE_MISC_RESET_STS_REG,   "reset interrupt source"},
1672 	{HCLGE_MISC_VECTOR_INT_STS,  "reset interrupt status"},
1673 	{HCLGE_RAS_PF_OTHER_INT_STS_REG, "RAS interrupt status"},
1674 	{HCLGE_GLOBAL_RESET_REG,  "hardware reset status"},
1675 	{HCLGE_NIC_CSQ_DEPTH_REG, "handshake status"},
1676 	{HCLGE_FUN_RST_ING, "function reset status"}
1677 };
1678 
1679 int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
1680 {
1681 	u32 i, offset;
1682 	int pos = 0;
1683 
1684 	pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n",
1685 			 hdev->rst_stats.pf_rst_cnt);
1686 	pos += scnprintf(buf + pos, len - pos, "FLR reset count: %u\n",
1687 			 hdev->rst_stats.flr_rst_cnt);
1688 	pos += scnprintf(buf + pos, len - pos, "GLOBAL reset count: %u\n",
1689 			 hdev->rst_stats.global_rst_cnt);
1690 	pos += scnprintf(buf + pos, len - pos, "IMP reset count: %u\n",
1691 			 hdev->rst_stats.imp_rst_cnt);
1692 	pos += scnprintf(buf + pos, len - pos, "reset done count: %u\n",
1693 			 hdev->rst_stats.reset_done_cnt);
1694 	pos += scnprintf(buf + pos, len - pos, "HW reset done count: %u\n",
1695 			 hdev->rst_stats.hw_reset_done_cnt);
1696 	pos += scnprintf(buf + pos, len - pos, "reset count: %u\n",
1697 			 hdev->rst_stats.reset_cnt);
1698 	pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n",
1699 			 hdev->rst_stats.reset_fail_cnt);
1700 
1701 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
1702 		offset = hclge_dbg_rst_info[i].offset;
1703 		pos += scnprintf(buf + pos, len - pos, "%s: 0x%x\n",
1704 				 hclge_dbg_rst_info[i].message,
1705 				 hclge_read_dev(&hdev->hw, offset));
1706 	}
1707 
1708 	pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n",
1709 			 hdev->state);
1710 
1711 	return 0;
1712 }
1713 
1714 static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len)
1715 {
1716 	unsigned long rem_nsec;
1717 	int pos = 0;
1718 	u64 lc;
1719 
1720 	lc = local_clock();
1721 	rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);
1722 
1723 	pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n",
1724 			 (unsigned long)lc, rem_nsec / 1000);
1725 	pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n",
1726 			 jiffies_to_msecs(jiffies - hdev->last_serv_processed));
1727 	pos += scnprintf(buf + pos, len - pos,
1728 			 "last_service_task_processed: %lu(jiffies)\n",
1729 			 hdev->last_serv_processed);
1730 	pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n",
1731 			 hdev->serv_processed_cnt);
1732 
1733 	return 0;
1734 }
1735 
1736 static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len)
1737 {
1738 	int pos = 0;
1739 
1740 	pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n",
1741 			 hdev->num_nic_msi);
1742 	pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n",
1743 			 hdev->num_roce_msi);
1744 	pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n",
1745 			 hdev->num_msi_used);
1746 	pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n",
1747 			 hdev->num_msi_left);
1748 
1749 	return 0;
1750 }
1751 
1752 static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src,
1753 					  char *buf, int len, u32 bd_num)
1754 {
1755 #define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2
1756 
1757 	struct hclge_desc *desc_index = desc_src;
1758 	u32 offset = 0;
1759 	int pos = 0;
1760 	u32 i, j;
1761 
1762 	pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1763 
1764 	for (i = 0; i < bd_num; i++) {
1765 		j = 0;
1766 		while (j < HCLGE_DESC_DATA_LEN - 1) {
1767 			pos += scnprintf(buf + pos, len - pos, "0x%04x | ",
1768 					 offset);
1769 			pos += scnprintf(buf + pos, len - pos, "0x%08x  ",
1770 					 le32_to_cpu(desc_index->data[j++]));
1771 			pos += scnprintf(buf + pos, len - pos, "0x%08x\n",
1772 					 le32_to_cpu(desc_index->data[j++]));
1773 			offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
1774 		}
1775 		desc_index++;
1776 	}
1777 }
1778 
1779 static int
1780 hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
1781 {
1782 	struct hclge_get_imp_bd_cmd *req;
1783 	struct hclge_desc *desc_src;
1784 	struct hclge_desc desc;
1785 	u32 bd_num;
1786 	int ret;
1787 
1788 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_STATS_BD, true);
1789 
1790 	req = (struct hclge_get_imp_bd_cmd *)desc.data;
1791 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1792 	if (ret) {
1793 		dev_err(&hdev->pdev->dev,
1794 			"failed to get imp statistics bd number, ret = %d\n",
1795 			ret);
1796 		return ret;
1797 	}
1798 
1799 	bd_num = le32_to_cpu(req->bd_num);
1800 	if (!bd_num) {
1801 		dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
1802 		return -EINVAL;
1803 	}
1804 
1805 	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
1806 	if (!desc_src)
1807 		return -ENOMEM;
1808 
1809 	ret  = hclge_dbg_cmd_send(hdev, desc_src, 0, bd_num,
1810 				  HCLGE_OPC_IMP_STATS_INFO);
1811 	if (ret) {
1812 		kfree(desc_src);
1813 		dev_err(&hdev->pdev->dev,
1814 			"failed to get imp statistics, ret = %d\n", ret);
1815 		return ret;
1816 	}
1817 
1818 	hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num);
1819 
1820 	kfree(desc_src);
1821 
1822 	return 0;
1823 }
1824 
1825 #define HCLGE_CMD_NCL_CONFIG_BD_NUM	5
1826 #define HCLGE_MAX_NCL_CONFIG_LENGTH	16384
1827 
1828 static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
1829 					char *buf, int len, int *pos)
1830 {
1831 #define HCLGE_CMD_DATA_NUM		6
1832 
1833 	int offset = HCLGE_MAX_NCL_CONFIG_LENGTH - *index;
1834 	int i, j;
1835 
1836 	for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
1837 		for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
1838 			if (i == 0 && j == 0)
1839 				continue;
1840 
1841 			*pos += scnprintf(buf + *pos, len - *pos,
1842 					  "0x%04x | 0x%08x\n", offset,
1843 					  le32_to_cpu(desc[i].data[j]));
1844 
1845 			offset += sizeof(u32);
1846 			*index -= sizeof(u32);
1847 
1848 			if (*index <= 0)
1849 				return;
1850 		}
1851 	}
1852 }
1853 
1854 static int
1855 hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
1856 {
1857 #define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD	(20 + 24 * 4)
1858 
1859 	struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
1860 	int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
1861 	int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
1862 	int pos = 0;
1863 	u32 data0;
1864 	int ret;
1865 
1866 	pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1867 
1868 	while (index > 0) {
1869 		data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
1870 		if (index >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
1871 			data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
1872 		else
1873 			data0 |= (u32)index << 16;
1874 		ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
1875 					 HCLGE_OPC_QUERY_NCL_CONFIG);
1876 		if (ret)
1877 			return ret;
1878 
1879 		hclge_ncl_config_data_print(desc, &index, buf, len, &pos);
1880 	}
1881 
1882 	return 0;
1883 }
1884 
1885 static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
1886 {
1887 	struct phy_device *phydev = hdev->hw.mac.phydev;
1888 	struct hclge_config_mac_mode_cmd *req_app;
1889 	struct hclge_common_lb_cmd *req_common;
1890 	struct hclge_desc desc;
1891 	u8 loopback_en;
1892 	int pos = 0;
1893 	int ret;
1894 
1895 	req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
1896 	req_common = (struct hclge_common_lb_cmd *)desc.data;
1897 
1898 	pos += scnprintf(buf + pos, len - pos, "mac id: %u\n",
1899 			 hdev->hw.mac.mac_id);
1900 
1901 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
1902 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1903 	if (ret) {
1904 		dev_err(&hdev->pdev->dev,
1905 			"failed to dump app loopback status, ret = %d\n", ret);
1906 		return ret;
1907 	}
1908 
1909 	loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
1910 				    HCLGE_MAC_APP_LP_B);
1911 	pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
1912 			 state_str[loopback_en]);
1913 
1914 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
1915 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1916 	if (ret) {
1917 		dev_err(&hdev->pdev->dev,
1918 			"failed to dump common loopback status, ret = %d\n",
1919 			ret);
1920 		return ret;
1921 	}
1922 
1923 	loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
1924 	pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
1925 			 state_str[loopback_en]);
1926 
1927 	loopback_en = req_common->enable &
1928 			HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
1929 	pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
1930 			 state_str[loopback_en]);
1931 
1932 	if (phydev) {
1933 		loopback_en = phydev->loopback_enabled;
1934 		pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1935 				 state_str[loopback_en]);
1936 	} else if (hnae3_dev_phy_imp_supported(hdev)) {
1937 		loopback_en = req_common->enable &
1938 			      HCLGE_CMD_GE_PHY_INNER_LOOP_B;
1939 		pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1940 				 state_str[loopback_en]);
1941 	}
1942 
1943 	return 0;
1944 }
1945 
1946 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
1947  * @hdev: pointer to struct hclge_dev
1948  */
1949 static int
1950 hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len)
1951 {
1952 	struct hclge_mac_tnl_stats stats;
1953 	unsigned long rem_nsec;
1954 	int pos = 0;
1955 
1956 	pos += scnprintf(buf + pos, len - pos,
1957 			 "Recently generated mac tnl interruption:\n");
1958 
1959 	while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
1960 		rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
1961 
1962 		pos += scnprintf(buf + pos, len - pos,
1963 				 "[%07lu.%03lu] status = 0x%x\n",
1964 				 (unsigned long)stats.time, rem_nsec / 1000,
1965 				 stats.status);
1966 	}
1967 
1968 	return 0;
1969 }
1970 
1971 
1972 static const struct hclge_dbg_item mac_list_items[] = {
1973 	{ "FUNC_ID", 2 },
1974 	{ "MAC_ADDR", 12 },
1975 	{ "STATE", 2 },
1976 };
1977 
1978 static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
1979 				    bool is_unicast)
1980 {
1981 	char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN];
1982 	char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
1983 	char *result[ARRAY_SIZE(mac_list_items)];
1984 	struct hclge_mac_node *mac_node, *tmp;
1985 	struct hclge_vport *vport;
1986 	struct list_head *list;
1987 	u32 func_id;
1988 	int pos = 0;
1989 	int i;
1990 
1991 	for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
1992 		result[i] = &data_str[i][0];
1993 
1994 	pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n",
1995 			 is_unicast ? "UC" : "MC");
1996 	hclge_dbg_fill_content(content, sizeof(content), mac_list_items,
1997 			       NULL, ARRAY_SIZE(mac_list_items));
1998 	pos += scnprintf(buf + pos, len - pos, "%s", content);
1999 
2000 	for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
2001 		vport = &hdev->vport[func_id];
2002 		list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
2003 		spin_lock_bh(&vport->mac_list_lock);
2004 		list_for_each_entry_safe(mac_node, tmp, list, node) {
2005 			i = 0;
2006 			result[i++] = hclge_dbg_get_func_id_str(str_id,
2007 								func_id);
2008 			sprintf(result[i++], "%pM", mac_node->mac_addr);
2009 			sprintf(result[i++], "%5s",
2010 				hclge_mac_state_str[mac_node->state]);
2011 			hclge_dbg_fill_content(content, sizeof(content),
2012 					       mac_list_items,
2013 					       (const char **)result,
2014 					       ARRAY_SIZE(mac_list_items));
2015 			pos += scnprintf(buf + pos, len - pos, "%s", content);
2016 		}
2017 		spin_unlock_bh(&vport->mac_list_lock);
2018 	}
2019 }
2020 
2021 static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
2022 {
2023 	u8 func_num = pci_num_vf(hdev->pdev) + 1;
2024 	struct hclge_vport *vport;
2025 	int pos = 0;
2026 	u8 i;
2027 
2028 	pos += scnprintf(buf, len, "num_alloc_vport   : %u\n",
2029 			  hdev->num_alloc_vport);
2030 	pos += scnprintf(buf + pos, len - pos, "max_umv_size     : %u\n",
2031 			 hdev->max_umv_size);
2032 	pos += scnprintf(buf + pos, len - pos, "wanted_umv_size  : %u\n",
2033 			 hdev->wanted_umv_size);
2034 	pos += scnprintf(buf + pos, len - pos, "priv_umv_size    : %u\n",
2035 			 hdev->priv_umv_size);
2036 
2037 	mutex_lock(&hdev->vport_lock);
2038 	pos += scnprintf(buf + pos, len - pos, "share_umv_size   : %u\n",
2039 			 hdev->share_umv_size);
2040 	for (i = 0; i < func_num; i++) {
2041 		vport = &hdev->vport[i];
2042 		pos += scnprintf(buf + pos, len - pos,
2043 				 "vport(%u) used_umv_num : %u\n",
2044 				 i, vport->used_umv_num);
2045 	}
2046 	mutex_unlock(&hdev->vport_lock);
2047 
2048 	pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num  : %u\n",
2049 			 hdev->used_mc_mac_num);
2050 
2051 	return 0;
2052 }
2053 
2054 static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
2055 					 struct hclge_dbg_vlan_cfg *vlan_cfg)
2056 {
2057 	struct hclge_vport_vtag_rx_cfg_cmd *req;
2058 	struct hclge_desc desc;
2059 	u16 bmap_index;
2060 	u8 rx_cfg;
2061 	int ret;
2062 
2063 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true);
2064 
2065 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
2066 	req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2067 	bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2068 	req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2069 
2070 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2071 	if (ret) {
2072 		dev_err(&hdev->pdev->dev,
2073 			"failed to get vport%u rxvlan cfg, ret = %d\n",
2074 			vf_id, ret);
2075 		return ret;
2076 	}
2077 
2078 	rx_cfg = req->vport_vlan_cfg;
2079 	vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B);
2080 	vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B);
2081 	vlan_cfg->drop_tag1 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG1_EN_B);
2082 	vlan_cfg->drop_tag2 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG2_EN_B);
2083 	vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B);
2084 	vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B);
2085 
2086 	return 0;
2087 }
2088 
2089 static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
2090 					 struct hclge_dbg_vlan_cfg *vlan_cfg)
2091 {
2092 	struct hclge_vport_vtag_tx_cfg_cmd *req;
2093 	struct hclge_desc desc;
2094 	u16 bmap_index;
2095 	u8 tx_cfg;
2096 	int ret;
2097 
2098 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true);
2099 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
2100 	req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2101 	bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2102 	req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2103 
2104 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2105 	if (ret) {
2106 		dev_err(&hdev->pdev->dev,
2107 			"failed to get vport%u txvlan cfg, ret = %d\n",
2108 			vf_id, ret);
2109 		return ret;
2110 	}
2111 
2112 	tx_cfg = req->vport_vlan_cfg;
2113 	vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1);
2114 
2115 	vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B);
2116 	vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B);
2117 	vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B);
2118 	vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B);
2119 	vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B);
2120 	vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B);
2121 	vlan_cfg->shift_tag = hnae3_get_bit(tx_cfg, HCLGE_TAG_SHIFT_MODE_EN_B);
2122 
2123 	return 0;
2124 }
2125 
2126 static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev,
2127 					    u8 vlan_type, u8 vf_id,
2128 					    struct hclge_desc *desc)
2129 {
2130 	struct hclge_vlan_filter_ctrl_cmd *req;
2131 	int ret;
2132 
2133 	hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
2134 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data;
2135 	req->vlan_type = vlan_type;
2136 	req->vf_id = vf_id;
2137 
2138 	ret = hclge_cmd_send(&hdev->hw, desc, 1);
2139 	if (ret)
2140 		dev_err(&hdev->pdev->dev,
2141 			"failed to get vport%u vlan filter config, ret = %d.\n",
2142 			vf_id, ret);
2143 
2144 	return ret;
2145 }
2146 
2147 static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type,
2148 				       u8 vf_id, u8 *vlan_fe)
2149 {
2150 	struct hclge_vlan_filter_ctrl_cmd *req;
2151 	struct hclge_desc desc;
2152 	int ret;
2153 
2154 	ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc);
2155 	if (ret)
2156 		return ret;
2157 
2158 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
2159 	*vlan_fe = req->vlan_fe;
2160 
2161 	return 0;
2162 }
2163 
2164 static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
2165 						   u8 vf_id, u8 *bypass_en)
2166 {
2167 	struct hclge_port_vlan_filter_bypass_cmd *req;
2168 	struct hclge_desc desc;
2169 	int ret;
2170 
2171 	if (!test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
2172 		return 0;
2173 
2174 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, true);
2175 	req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
2176 	req->vf_id = vf_id;
2177 
2178 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2179 	if (ret) {
2180 		dev_err(&hdev->pdev->dev,
2181 			"failed to get vport%u port vlan filter bypass state, ret = %d.\n",
2182 			vf_id, ret);
2183 		return ret;
2184 	}
2185 
2186 	*bypass_en = hnae3_get_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B);
2187 
2188 	return 0;
2189 }
2190 
2191 static const struct hclge_dbg_item vlan_filter_items[] = {
2192 	{ "FUNC_ID", 2 },
2193 	{ "I_VF_VLAN_FILTER", 2 },
2194 	{ "E_VF_VLAN_FILTER", 2 },
2195 	{ "PORT_VLAN_FILTER_BYPASS", 0 }
2196 };
2197 
2198 static const struct hclge_dbg_item vlan_offload_items[] = {
2199 	{ "FUNC_ID", 2 },
2200 	{ "PVID", 4 },
2201 	{ "ACCEPT_TAG1", 2 },
2202 	{ "ACCEPT_TAG2", 2 },
2203 	{ "ACCEPT_UNTAG1", 2 },
2204 	{ "ACCEPT_UNTAG2", 2 },
2205 	{ "INSERT_TAG1", 2 },
2206 	{ "INSERT_TAG2", 2 },
2207 	{ "SHIFT_TAG", 2 },
2208 	{ "STRIP_TAG1", 2 },
2209 	{ "STRIP_TAG2", 2 },
2210 	{ "DROP_TAG1", 2 },
2211 	{ "DROP_TAG2", 2 },
2212 	{ "PRI_ONLY_TAG1", 2 },
2213 	{ "PRI_ONLY_TAG2", 0 }
2214 };
2215 
2216 static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
2217 					     int len, int *pos)
2218 {
2219 	char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
2220 	const char *result[ARRAY_SIZE(vlan_filter_items)];
2221 	u8 i, j, vlan_fe, bypass, ingress, egress;
2222 	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2223 	int ret;
2224 
2225 	ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
2226 					  &vlan_fe);
2227 	if (ret)
2228 		return ret;
2229 	ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2230 	egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2231 
2232 	*pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
2233 			  state_str[ingress]);
2234 	*pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
2235 			  state_str[egress]);
2236 
2237 	hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
2238 			       NULL, ARRAY_SIZE(vlan_filter_items));
2239 	*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2240 
2241 	for (i = 0; i < func_num; i++) {
2242 		ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
2243 						  &vlan_fe);
2244 		if (ret)
2245 			return ret;
2246 
2247 		ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2248 		egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2249 		ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
2250 		if (ret)
2251 			return ret;
2252 		j = 0;
2253 		result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2254 		result[j++] = state_str[ingress];
2255 		result[j++] = state_str[egress];
2256 		result[j++] =
2257 			test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
2258 				 hdev->ae_dev->caps) ? state_str[bypass] : "NA";
2259 		hclge_dbg_fill_content(content, sizeof(content),
2260 				       vlan_filter_items, result,
2261 				       ARRAY_SIZE(vlan_filter_items));
2262 		*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2263 	}
2264 	*pos += scnprintf(buf + *pos, len - *pos, "\n");
2265 
2266 	return 0;
2267 }
2268 
2269 static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
2270 					      int len, int *pos)
2271 {
2272 	char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
2273 	const char *result[ARRAY_SIZE(vlan_offload_items)];
2274 	char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
2275 	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2276 	struct hclge_dbg_vlan_cfg vlan_cfg;
2277 	int ret;
2278 	u8 i, j;
2279 
2280 	hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
2281 			       NULL, ARRAY_SIZE(vlan_offload_items));
2282 	*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2283 
2284 	for (i = 0; i < func_num; i++) {
2285 		ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
2286 		if (ret)
2287 			return ret;
2288 
2289 		ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg);
2290 		if (ret)
2291 			return ret;
2292 
2293 		sprintf(str_pvid, "%u", vlan_cfg.pvid);
2294 		j = 0;
2295 		result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2296 		result[j++] = str_pvid;
2297 		result[j++] = state_str[vlan_cfg.accept_tag1];
2298 		result[j++] = state_str[vlan_cfg.accept_tag2];
2299 		result[j++] = state_str[vlan_cfg.accept_untag1];
2300 		result[j++] = state_str[vlan_cfg.accept_untag2];
2301 		result[j++] = state_str[vlan_cfg.insert_tag1];
2302 		result[j++] = state_str[vlan_cfg.insert_tag2];
2303 		result[j++] = state_str[vlan_cfg.shift_tag];
2304 		result[j++] = state_str[vlan_cfg.strip_tag1];
2305 		result[j++] = state_str[vlan_cfg.strip_tag2];
2306 		result[j++] = state_str[vlan_cfg.drop_tag1];
2307 		result[j++] = state_str[vlan_cfg.drop_tag2];
2308 		result[j++] = state_str[vlan_cfg.pri_only1];
2309 		result[j++] = state_str[vlan_cfg.pri_only2];
2310 
2311 		hclge_dbg_fill_content(content, sizeof(content),
2312 				       vlan_offload_items, result,
2313 				       ARRAY_SIZE(vlan_offload_items));
2314 		*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2315 	}
2316 
2317 	return 0;
2318 }
2319 
2320 static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
2321 				      int len)
2322 {
2323 	int pos = 0;
2324 	int ret;
2325 
2326 	ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
2327 	if (ret)
2328 		return ret;
2329 
2330 	return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
2331 }
2332 
2333 static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
2334 {
2335 	struct hclge_ptp *ptp = hdev->ptp;
2336 	u32 sw_cfg = ptp->ptp_cfg;
2337 	unsigned int tx_start;
2338 	unsigned int last_rx;
2339 	int pos = 0;
2340 	u32 hw_cfg;
2341 	int ret;
2342 
2343 	pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
2344 			 ptp->info.name);
2345 	pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
2346 			 test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
2347 			 "yes" : "no");
2348 	pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
2349 			 test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
2350 			 "yes" : "no");
2351 	pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
2352 			 test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
2353 			 "yes" : "no");
2354 
2355 	last_rx = jiffies_to_msecs(ptp->last_rx);
2356 	pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
2357 			 last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
2358 	pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
2359 
2360 	tx_start = jiffies_to_msecs(ptp->tx_start);
2361 	pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
2362 			 tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
2363 	pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
2364 	pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
2365 			 ptp->tx_skipped);
2366 	pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
2367 			 ptp->tx_timeout);
2368 	pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
2369 			 ptp->last_tx_seqid);
2370 
2371 	ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
2372 	if (ret)
2373 		return ret;
2374 
2375 	pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
2376 			 sw_cfg, hw_cfg);
2377 
2378 	pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
2379 			 ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
2380 
2381 	return 0;
2382 }
2383 
2384 static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
2385 {
2386 	hclge_dbg_dump_mac_list(hdev, buf, len, true);
2387 
2388 	return 0;
2389 }
2390 
2391 static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
2392 {
2393 	hclge_dbg_dump_mac_list(hdev, buf, len, false);
2394 
2395 	return 0;
2396 }
2397 
2398 static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
2399 	{
2400 		.cmd = HNAE3_DBG_CMD_TM_NODES,
2401 		.dbg_dump = hclge_dbg_dump_tm_nodes,
2402 	},
2403 	{
2404 		.cmd = HNAE3_DBG_CMD_TM_PRI,
2405 		.dbg_dump = hclge_dbg_dump_tm_pri,
2406 	},
2407 	{
2408 		.cmd = HNAE3_DBG_CMD_TM_QSET,
2409 		.dbg_dump = hclge_dbg_dump_tm_qset,
2410 	},
2411 	{
2412 		.cmd = HNAE3_DBG_CMD_TM_MAP,
2413 		.dbg_dump = hclge_dbg_dump_tm_map,
2414 	},
2415 	{
2416 		.cmd = HNAE3_DBG_CMD_TM_PG,
2417 		.dbg_dump = hclge_dbg_dump_tm_pg,
2418 	},
2419 	{
2420 		.cmd = HNAE3_DBG_CMD_TM_PORT,
2421 		.dbg_dump = hclge_dbg_dump_tm_port,
2422 	},
2423 	{
2424 		.cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
2425 		.dbg_dump = hclge_dbg_dump_tc,
2426 	},
2427 	{
2428 		.cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
2429 		.dbg_dump = hclge_dbg_dump_qos_pause_cfg,
2430 	},
2431 	{
2432 		.cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
2433 		.dbg_dump = hclge_dbg_dump_qos_pri_map,
2434 	},
2435 	{
2436 		.cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
2437 		.dbg_dump = hclge_dbg_dump_qos_dscp_map,
2438 	},
2439 	{
2440 		.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
2441 		.dbg_dump = hclge_dbg_dump_qos_buf_cfg,
2442 	},
2443 	{
2444 		.cmd = HNAE3_DBG_CMD_MAC_UC,
2445 		.dbg_dump = hclge_dbg_dump_mac_uc,
2446 	},
2447 	{
2448 		.cmd = HNAE3_DBG_CMD_MAC_MC,
2449 		.dbg_dump = hclge_dbg_dump_mac_mc,
2450 	},
2451 	{
2452 		.cmd = HNAE3_DBG_CMD_MNG_TBL,
2453 		.dbg_dump = hclge_dbg_dump_mng_table,
2454 	},
2455 	{
2456 		.cmd = HNAE3_DBG_CMD_LOOPBACK,
2457 		.dbg_dump = hclge_dbg_dump_loopback,
2458 	},
2459 	{
2460 		.cmd = HNAE3_DBG_CMD_PTP_INFO,
2461 		.dbg_dump = hclge_dbg_dump_ptp_info,
2462 	},
2463 	{
2464 		.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
2465 		.dbg_dump = hclge_dbg_dump_interrupt,
2466 	},
2467 	{
2468 		.cmd = HNAE3_DBG_CMD_RESET_INFO,
2469 		.dbg_dump = hclge_dbg_dump_rst_info,
2470 	},
2471 	{
2472 		.cmd = HNAE3_DBG_CMD_IMP_INFO,
2473 		.dbg_dump = hclge_dbg_get_imp_stats_info,
2474 	},
2475 	{
2476 		.cmd = HNAE3_DBG_CMD_NCL_CONFIG,
2477 		.dbg_dump = hclge_dbg_dump_ncl_config,
2478 	},
2479 	{
2480 		.cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
2481 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2482 	},
2483 	{
2484 		.cmd = HNAE3_DBG_CMD_REG_SSU,
2485 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2486 	},
2487 	{
2488 		.cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
2489 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2490 	},
2491 	{
2492 		.cmd = HNAE3_DBG_CMD_REG_RPU,
2493 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2494 	},
2495 	{
2496 		.cmd = HNAE3_DBG_CMD_REG_NCSI,
2497 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2498 	},
2499 	{
2500 		.cmd = HNAE3_DBG_CMD_REG_RTC,
2501 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2502 	},
2503 	{
2504 		.cmd = HNAE3_DBG_CMD_REG_PPP,
2505 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2506 	},
2507 	{
2508 		.cmd = HNAE3_DBG_CMD_REG_RCB,
2509 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2510 	},
2511 	{
2512 		.cmd = HNAE3_DBG_CMD_REG_TQP,
2513 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2514 	},
2515 	{
2516 		.cmd = HNAE3_DBG_CMD_REG_MAC,
2517 		.dbg_dump = hclge_dbg_dump_mac,
2518 	},
2519 	{
2520 		.cmd = HNAE3_DBG_CMD_REG_DCB,
2521 		.dbg_dump = hclge_dbg_dump_dcb,
2522 	},
2523 	{
2524 		.cmd = HNAE3_DBG_CMD_FD_TCAM,
2525 		.dbg_dump = hclge_dbg_dump_fd_tcam,
2526 	},
2527 	{
2528 		.cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
2529 		.dbg_dump = hclge_dbg_dump_mac_tnl_status,
2530 	},
2531 	{
2532 		.cmd = HNAE3_DBG_CMD_SERV_INFO,
2533 		.dbg_dump = hclge_dbg_dump_serv_info,
2534 	},
2535 	{
2536 		.cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
2537 		.dbg_dump = hclge_dbg_dump_vlan_config,
2538 	},
2539 	{
2540 		.cmd = HNAE3_DBG_CMD_FD_COUNTER,
2541 		.dbg_dump = hclge_dbg_dump_fd_counter,
2542 	},
2543 	{
2544 		.cmd = HNAE3_DBG_CMD_UMV_INFO,
2545 		.dbg_dump = hclge_dbg_dump_umv_info,
2546 	},
2547 };
2548 
2549 int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
2550 		       char *buf, int len)
2551 {
2552 	struct hclge_vport *vport = hclge_get_vport(handle);
2553 	const struct hclge_dbg_func *cmd_func;
2554 	struct hclge_dev *hdev = vport->back;
2555 	u32 i;
2556 
2557 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
2558 		if (cmd == hclge_dbg_cmd_func[i].cmd) {
2559 			cmd_func = &hclge_dbg_cmd_func[i];
2560 			if (cmd_func->dbg_dump)
2561 				return cmd_func->dbg_dump(hdev, buf, len);
2562 			else
2563 				return cmd_func->dbg_dump_reg(hdev, cmd, buf,
2564 							      len);
2565 		}
2566 	}
2567 
2568 	dev_err(&hdev->pdev->dev, "invalid command(%d)\n", cmd);
2569 	return -EINVAL;
2570 }
2571