1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018-2019 Hisilicon Limited. */
3 
4 #include <linux/device.h>
5 
6 #include "hclge_debugfs.h"
7 #include "hclge_err.h"
8 #include "hclge_main.h"
9 #include "hclge_tm.h"
10 #include "hnae3.h"
11 
12 static const char * const state_str[] = { "off", "on" };
13 static const char * const hclge_mac_state_str[] = {
14 	"TO_ADD", "TO_DEL", "ACTIVE"
15 };
16 
17 static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
18 	{ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
19 	  .dfx_msg = &hclge_dbg_bios_common_reg[0],
20 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
21 		       .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
22 		       .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
23 	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
24 	  .dfx_msg = &hclge_dbg_ssu_reg_0[0],
25 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
26 		       .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
27 		       .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
28 	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
29 	  .dfx_msg = &hclge_dbg_ssu_reg_1[0],
30 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
31 		       .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
32 		       .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
33 	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
34 	  .dfx_msg = &hclge_dbg_ssu_reg_2[0],
35 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
36 		       .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
37 		       .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
38 	{ .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
39 	  .dfx_msg = &hclge_dbg_igu_egu_reg[0],
40 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
41 		       .offset = HCLGE_DBG_DFX_IGU_OFFSET,
42 		       .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
43 	{ .cmd = HNAE3_DBG_CMD_REG_RPU,
44 	  .dfx_msg = &hclge_dbg_rpu_reg_0[0],
45 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
46 		       .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
47 		       .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
48 	{ .cmd = HNAE3_DBG_CMD_REG_RPU,
49 	  .dfx_msg = &hclge_dbg_rpu_reg_1[0],
50 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
51 		       .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
52 		       .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
53 	{ .cmd = HNAE3_DBG_CMD_REG_NCSI,
54 	  .dfx_msg = &hclge_dbg_ncsi_reg[0],
55 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
56 		       .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
57 		       .cmd = HCLGE_OPC_DFX_NCSI_REG } },
58 	{ .cmd = HNAE3_DBG_CMD_REG_RTC,
59 	  .dfx_msg = &hclge_dbg_rtc_reg[0],
60 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
61 		       .offset = HCLGE_DBG_DFX_RTC_OFFSET,
62 		       .cmd = HCLGE_OPC_DFX_RTC_REG } },
63 	{ .cmd = HNAE3_DBG_CMD_REG_PPP,
64 	  .dfx_msg = &hclge_dbg_ppp_reg[0],
65 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
66 		       .offset = HCLGE_DBG_DFX_PPP_OFFSET,
67 		       .cmd = HCLGE_OPC_DFX_PPP_REG } },
68 	{ .cmd = HNAE3_DBG_CMD_REG_RCB,
69 	  .dfx_msg = &hclge_dbg_rcb_reg[0],
70 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
71 		       .offset = HCLGE_DBG_DFX_RCB_OFFSET,
72 		       .cmd = HCLGE_OPC_DFX_RCB_REG } },
73 	{ .cmd = HNAE3_DBG_CMD_REG_TQP,
74 	  .dfx_msg = &hclge_dbg_tqp_reg[0],
75 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
76 		       .offset = HCLGE_DBG_DFX_TQP_OFFSET,
77 		       .cmd = HCLGE_OPC_DFX_TQP_REG } },
78 };
79 
80 /* make sure: len(name) + interval >= maxlen(item data) + 2,
81  * for example, name = "pkt_num"(len: 7), the prototype of item data is u32,
82  * and print as "%u"(maxlen: 10), so the interval should be at least 5.
83  */
84 static void hclge_dbg_fill_content(char *content, u16 len,
85 				   const struct hclge_dbg_item *items,
86 				   const char **result, u16 size)
87 {
88 	char *pos = content;
89 	u16 i;
90 
91 	memset(content, ' ', len);
92 	for (i = 0; i < size; i++) {
93 		if (result)
94 			strncpy(pos, result[i], strlen(result[i]));
95 		else
96 			strncpy(pos, items[i].name, strlen(items[i].name));
97 		pos += strlen(items[i].name) + items[i].interval;
98 	}
99 	*pos++ = '\n';
100 	*pos++ = '\0';
101 }
102 
103 static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
104 {
105 	if (id)
106 		sprintf(buf, "vf%u", id - 1U);
107 	else
108 		sprintf(buf, "pf");
109 
110 	return buf;
111 }
112 
113 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset,
114 				    u32 *bd_num)
115 {
116 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
117 	int entries_per_desc;
118 	int index;
119 	int ret;
120 
121 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
122 	if (ret) {
123 		dev_err(&hdev->pdev->dev,
124 			"failed to get dfx bd_num, offset = %d, ret = %d\n",
125 			offset, ret);
126 		return ret;
127 	}
128 
129 	entries_per_desc = ARRAY_SIZE(desc[0].data);
130 	index = offset % entries_per_desc;
131 
132 	*bd_num = le32_to_cpu(desc[offset / entries_per_desc].data[index]);
133 	if (!(*bd_num)) {
134 		dev_err(&hdev->pdev->dev, "The value of dfx bd_num is 0!\n");
135 		return -EINVAL;
136 	}
137 
138 	return 0;
139 }
140 
141 static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
142 			      struct hclge_desc *desc_src,
143 			      int index, int bd_num,
144 			      enum hclge_opcode_type cmd)
145 {
146 	struct hclge_desc *desc = desc_src;
147 	int ret, i;
148 
149 	hclge_cmd_setup_basic_desc(desc, cmd, true);
150 	desc->data[0] = cpu_to_le32(index);
151 
152 	for (i = 1; i < bd_num; i++) {
153 		desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
154 		desc++;
155 		hclge_cmd_setup_basic_desc(desc, cmd, true);
156 	}
157 
158 	ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
159 	if (ret)
160 		dev_err(&hdev->pdev->dev,
161 			"cmd(0x%x) send fail, ret = %d\n", cmd, ret);
162 	return ret;
163 }
164 
165 static int
166 hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
167 		       const struct hclge_dbg_reg_type_info *reg_info,
168 		       char *buf, int len, int *pos)
169 {
170 	const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
171 	const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
172 	struct hclge_desc *desc_src;
173 	u32 index, entry, i, cnt;
174 	int bd_num, min_num, ret;
175 	struct hclge_desc *desc;
176 
177 	ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
178 	if (ret)
179 		return ret;
180 
181 	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
182 	if (!desc_src)
183 		return -ENOMEM;
184 
185 	min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
186 
187 	for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
188 		*pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n",
189 				  cnt++, dfx_message->message);
190 
191 	for (i = 0; i < cnt; i++)
192 		*pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i);
193 
194 	*pos += scnprintf(buf + *pos, len - *pos, "\n");
195 
196 	for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
197 		dfx_message = reg_info->dfx_msg;
198 		desc = desc_src;
199 		ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num,
200 					 reg_msg->cmd);
201 		if (ret)
202 			break;
203 
204 		for (i = 0; i < min_num; i++, dfx_message++) {
205 			entry = i % HCLGE_DESC_DATA_LEN;
206 			if (i > 0 && !entry)
207 				desc++;
208 
209 			*pos += scnprintf(buf + *pos, len - *pos, "%#x\t",
210 					  le32_to_cpu(desc->data[entry]));
211 		}
212 		*pos += scnprintf(buf + *pos, len - *pos, "\n");
213 	}
214 
215 	kfree(desc_src);
216 	return ret;
217 }
218 
219 static int
220 hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
221 			  const struct hclge_dbg_reg_type_info *reg_info,
222 			  char *buf, int len, int *pos)
223 {
224 	const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
225 	const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
226 	struct hclge_desc *desc_src;
227 	int bd_num, min_num, ret;
228 	struct hclge_desc *desc;
229 	u32 entry, i;
230 
231 	ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
232 	if (ret)
233 		return ret;
234 
235 	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
236 	if (!desc_src)
237 		return -ENOMEM;
238 
239 	desc = desc_src;
240 
241 	ret = hclge_dbg_cmd_send(hdev, desc, 0, bd_num, reg_msg->cmd);
242 	if (ret) {
243 		kfree(desc);
244 		return ret;
245 	}
246 
247 	min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
248 
249 	for (i = 0; i < min_num; i++, dfx_message++) {
250 		entry = i % HCLGE_DESC_DATA_LEN;
251 		if (i > 0 && !entry)
252 			desc++;
253 		if (!dfx_message->flag)
254 			continue;
255 
256 		*pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
257 				  dfx_message->message,
258 				  le32_to_cpu(desc->data[entry]));
259 	}
260 
261 	kfree(desc_src);
262 	return 0;
263 }
264 
265 static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
266 	{HCLGE_MAC_TX_EN_B,  "mac_trans_en"},
267 	{HCLGE_MAC_RX_EN_B,  "mac_rcv_en"},
268 	{HCLGE_MAC_PAD_TX_B, "pad_trans_en"},
269 	{HCLGE_MAC_PAD_RX_B, "pad_rcv_en"},
270 	{HCLGE_MAC_1588_TX_B, "1588_trans_en"},
271 	{HCLGE_MAC_1588_RX_B, "1588_rcv_en"},
272 	{HCLGE_MAC_APP_LP_B,  "mac_app_loop_en"},
273 	{HCLGE_MAC_LINE_LP_B, "mac_line_loop_en"},
274 	{HCLGE_MAC_FCS_TX_B,  "mac_fcs_tx_en"},
275 	{HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, "mac_rx_oversize_truncate_en"},
276 	{HCLGE_MAC_RX_FCS_STRIP_B, "mac_rx_fcs_strip_en"},
277 	{HCLGE_MAC_RX_FCS_B, "mac_rx_fcs_en"},
278 	{HCLGE_MAC_TX_UNDER_MIN_ERR_B, "mac_tx_under_min_err_en"},
279 	{HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
280 };
281 
282 static int  hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
283 					     int len, int *pos)
284 {
285 	struct hclge_config_mac_mode_cmd *req;
286 	struct hclge_desc desc;
287 	u32 loop_en, i, offset;
288 	int ret;
289 
290 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
291 
292 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
293 	if (ret) {
294 		dev_err(&hdev->pdev->dev,
295 			"failed to dump mac enable status, ret = %d\n", ret);
296 		return ret;
297 	}
298 
299 	req = (struct hclge_config_mac_mode_cmd *)desc.data;
300 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
301 
302 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
303 		offset = hclge_dbg_mac_en_status[i].offset;
304 		*pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
305 				  hclge_dbg_mac_en_status[i].message,
306 				  hnae3_get_bit(loop_en, offset));
307 	}
308 
309 	return 0;
310 }
311 
312 static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
313 					 int len, int *pos)
314 {
315 	struct hclge_config_max_frm_size_cmd *req;
316 	struct hclge_desc desc;
317 	int ret;
318 
319 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
320 
321 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
322 	if (ret) {
323 		dev_err(&hdev->pdev->dev,
324 			"failed to dump mac frame size, ret = %d\n", ret);
325 		return ret;
326 	}
327 
328 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
329 
330 	*pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n",
331 			  le16_to_cpu(req->max_frm_size));
332 	*pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n",
333 			  req->min_frm_size);
334 
335 	return 0;
336 }
337 
338 static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
339 					   int len, int *pos)
340 {
341 #define HCLGE_MAC_SPEED_SHIFT	0
342 #define HCLGE_MAC_SPEED_MASK	GENMASK(5, 0)
343 #define HCLGE_MAC_DUPLEX_SHIFT	7
344 
345 	struct hclge_config_mac_speed_dup_cmd *req;
346 	struct hclge_desc desc;
347 	int ret;
348 
349 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
350 
351 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
352 	if (ret) {
353 		dev_err(&hdev->pdev->dev,
354 			"failed to dump mac speed duplex, ret = %d\n", ret);
355 		return ret;
356 	}
357 
358 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
359 
360 	*pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n",
361 			  hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
362 					  HCLGE_MAC_SPEED_SHIFT));
363 	*pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n",
364 			  hnae3_get_bit(req->speed_dup,
365 					HCLGE_MAC_DUPLEX_SHIFT));
366 	return 0;
367 }
368 
369 static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
370 {
371 	int pos = 0;
372 	int ret;
373 
374 	ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos);
375 	if (ret)
376 		return ret;
377 
378 	ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos);
379 	if (ret)
380 		return ret;
381 
382 	return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos);
383 }
384 
385 static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
386 				   int *pos)
387 {
388 	struct hclge_dbg_bitmap_cmd req;
389 	struct hclge_desc desc;
390 	u16 qset_id, qset_num;
391 	int ret;
392 
393 	ret = hclge_tm_get_qset_num(hdev, &qset_num);
394 	if (ret)
395 		return ret;
396 
397 	*pos += scnprintf(buf + *pos, len - *pos,
398 			  "qset_id  roce_qset_mask  nic_qset_mask  qset_shaping_pass  qset_bp_status\n");
399 	for (qset_id = 0; qset_id < qset_num; qset_id++) {
400 		ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
401 					 HCLGE_OPC_QSET_DFX_STS);
402 		if (ret)
403 			return ret;
404 
405 		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
406 
407 		*pos += scnprintf(buf + *pos, len - *pos,
408 				  "%04u           %#x            %#x             %#x               %#x\n",
409 				  qset_id, req.bit0, req.bit1, req.bit2,
410 				  req.bit3);
411 	}
412 
413 	return 0;
414 }
415 
416 static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
417 				  int *pos)
418 {
419 	struct hclge_dbg_bitmap_cmd req;
420 	struct hclge_desc desc;
421 	u8 pri_id, pri_num;
422 	int ret;
423 
424 	ret = hclge_tm_get_pri_num(hdev, &pri_num);
425 	if (ret)
426 		return ret;
427 
428 	*pos += scnprintf(buf + *pos, len - *pos,
429 			  "pri_id  pri_mask  pri_cshaping_pass  pri_pshaping_pass\n");
430 	for (pri_id = 0; pri_id < pri_num; pri_id++) {
431 		ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
432 					 HCLGE_OPC_PRI_DFX_STS);
433 		if (ret)
434 			return ret;
435 
436 		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
437 
438 		*pos += scnprintf(buf + *pos, len - *pos,
439 				  "%03u       %#x           %#x                %#x\n",
440 				  pri_id, req.bit0, req.bit1, req.bit2);
441 	}
442 
443 	return 0;
444 }
445 
446 static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
447 				 int *pos)
448 {
449 	struct hclge_dbg_bitmap_cmd req;
450 	struct hclge_desc desc;
451 	u8 pg_id;
452 	int ret;
453 
454 	*pos += scnprintf(buf + *pos, len - *pos,
455 			  "pg_id  pg_mask  pg_cshaping_pass  pg_pshaping_pass\n");
456 	for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
457 		ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
458 					 HCLGE_OPC_PG_DFX_STS);
459 		if (ret)
460 			return ret;
461 
462 		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
463 
464 		*pos += scnprintf(buf + *pos, len - *pos,
465 				  "%03u      %#x           %#x               %#x\n",
466 				  pg_id, req.bit0, req.bit1, req.bit2);
467 	}
468 
469 	return 0;
470 }
471 
472 static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
473 				    int *pos)
474 {
475 	struct hclge_desc desc;
476 	u16 nq_id;
477 	int ret;
478 
479 	*pos += scnprintf(buf + *pos, len - *pos,
480 			  "nq_id  sch_nic_queue_cnt  sch_roce_queue_cnt\n");
481 	for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
482 		ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
483 					 HCLGE_OPC_SCH_NQ_CNT);
484 		if (ret)
485 			return ret;
486 
487 		*pos += scnprintf(buf + *pos, len - *pos, "%04u           %#x",
488 				  nq_id, le32_to_cpu(desc.data[1]));
489 
490 		ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
491 					 HCLGE_OPC_SCH_RQ_CNT);
492 		if (ret)
493 			return ret;
494 
495 		*pos += scnprintf(buf + *pos, len - *pos,
496 				  "               %#x\n",
497 				  le32_to_cpu(desc.data[1]));
498 	}
499 
500 	return 0;
501 }
502 
503 static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
504 				   int *pos)
505 {
506 	struct hclge_dbg_bitmap_cmd req;
507 	struct hclge_desc desc;
508 	u8 port_id = 0;
509 	int ret;
510 
511 	ret = hclge_dbg_cmd_send(hdev, &desc, port_id, 1,
512 				 HCLGE_OPC_PORT_DFX_STS);
513 	if (ret)
514 		return ret;
515 
516 	req.bitmap = (u8)le32_to_cpu(desc.data[1]);
517 
518 	*pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
519 			 req.bit0);
520 	*pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
521 			 req.bit1);
522 
523 	return 0;
524 }
525 
526 static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
527 				 int *pos)
528 {
529 	struct hclge_desc desc[2];
530 	u8 port_id = 0;
531 	int ret;
532 
533 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
534 				 HCLGE_OPC_TM_INTERNAL_CNT);
535 	if (ret)
536 		return ret;
537 
538 	*pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n",
539 			  le32_to_cpu(desc[0].data[1]));
540 	*pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n",
541 			  le32_to_cpu(desc[0].data[2]));
542 
543 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
544 				 HCLGE_OPC_TM_INTERNAL_STS);
545 	if (ret)
546 		return ret;
547 
548 	*pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n",
549 			  le32_to_cpu(desc[0].data[1]));
550 	*pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n",
551 			  le32_to_cpu(desc[0].data[2]));
552 	*pos += scnprintf(buf + *pos, len - *pos,
553 			  "sch_roce_fifo_afull_gap: %#x\n",
554 			  le32_to_cpu(desc[0].data[3]));
555 	*pos += scnprintf(buf + *pos, len - *pos,
556 			  "tx_private_waterline: %#x\n",
557 			  le32_to_cpu(desc[0].data[4]));
558 	*pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n",
559 			  le32_to_cpu(desc[0].data[5]));
560 	*pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n",
561 			  le32_to_cpu(desc[1].data[0]));
562 	*pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n",
563 			  le32_to_cpu(desc[1].data[1]));
564 
565 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
566 		return 0;
567 
568 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
569 				 HCLGE_OPC_TM_INTERNAL_STS_1);
570 	if (ret)
571 		return ret;
572 
573 	*pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n",
574 			  le32_to_cpu(desc[0].data[1]));
575 	*pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n",
576 			  le32_to_cpu(desc[0].data[2]));
577 	*pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n",
578 			  le32_to_cpu(desc[0].data[3]));
579 	*pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n",
580 			  le32_to_cpu(desc[0].data[4]));
581 	*pos += scnprintf(buf + *pos, len - *pos,
582 			  "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
583 			  le32_to_cpu(desc[0].data[5]));
584 
585 	return 0;
586 }
587 
588 static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len)
589 {
590 	int pos = 0;
591 	int ret;
592 
593 	ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos);
594 	if (ret)
595 		return ret;
596 
597 	ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos);
598 	if (ret)
599 		return ret;
600 
601 	ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos);
602 	if (ret)
603 		return ret;
604 
605 	ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos);
606 	if (ret)
607 		return ret;
608 
609 	ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos);
610 	if (ret)
611 		return ret;
612 
613 	return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos);
614 }
615 
616 static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
617 				  enum hnae3_dbg_cmd cmd, char *buf, int len)
618 {
619 	const struct hclge_dbg_reg_type_info *reg_info;
620 	int pos = 0, ret = 0;
621 	int i;
622 
623 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
624 		reg_info = &hclge_dbg_reg_info[i];
625 		if (cmd == reg_info->cmd) {
626 			if (cmd == HNAE3_DBG_CMD_REG_TQP)
627 				return hclge_dbg_dump_reg_tqp(hdev, reg_info,
628 							      buf, len, &pos);
629 
630 			ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf,
631 							len, &pos);
632 			if (ret)
633 				break;
634 		}
635 	}
636 
637 	return ret;
638 }
639 
640 static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
641 {
642 	struct hclge_ets_tc_weight_cmd *ets_weight;
643 	struct hclge_desc desc;
644 	char *sch_mode_str;
645 	int pos = 0;
646 	int ret;
647 	u8 i;
648 
649 	if (!hnae3_dev_dcb_supported(hdev)) {
650 		dev_err(&hdev->pdev->dev,
651 			"Only DCB-supported dev supports tc\n");
652 		return -EOPNOTSUPP;
653 	}
654 
655 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
656 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
657 	if (ret) {
658 		dev_err(&hdev->pdev->dev, "failed to get tc weight, ret = %d\n",
659 			ret);
660 		return ret;
661 	}
662 
663 	ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
664 
665 	pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n",
666 			 hdev->tm_info.num_tc);
667 	pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n",
668 			 ets_weight->weight_offset);
669 
670 	pos += scnprintf(buf + pos, len - pos, "TC    MODE  WEIGHT\n");
671 	for (i = 0; i < HNAE3_MAX_TC; i++) {
672 		sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
673 		pos += scnprintf(buf + pos, len - pos, "%u     %4s    %3u\n",
674 				 i, sch_mode_str,
675 				 hdev->tm_info.pg_info[0].tc_dwrr[i]);
676 	}
677 
678 	return 0;
679 }
680 
681 static const struct hclge_dbg_item tm_pg_items[] = {
682 	{ "ID", 2 },
683 	{ "PRI_MAP", 2 },
684 	{ "MODE", 2 },
685 	{ "DWRR", 2 },
686 	{ "C_IR_B", 2 },
687 	{ "C_IR_U", 2 },
688 	{ "C_IR_S", 2 },
689 	{ "C_BS_B", 2 },
690 	{ "C_BS_S", 2 },
691 	{ "C_FLAG", 2 },
692 	{ "C_RATE(Mbps)", 2 },
693 	{ "P_IR_B", 2 },
694 	{ "P_IR_U", 2 },
695 	{ "P_IR_S", 2 },
696 	{ "P_BS_B", 2 },
697 	{ "P_BS_S", 2 },
698 	{ "P_FLAG", 2 },
699 	{ "P_RATE(Mbps)", 0 }
700 };
701 
702 static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
703 					  char **result, u8 *index)
704 {
705 	sprintf(result[(*index)++], "%3u", para->ir_b);
706 	sprintf(result[(*index)++], "%3u", para->ir_u);
707 	sprintf(result[(*index)++], "%3u", para->ir_s);
708 	sprintf(result[(*index)++], "%3u", para->bs_b);
709 	sprintf(result[(*index)++], "%3u", para->bs_s);
710 	sprintf(result[(*index)++], "%3u", para->flag);
711 	sprintf(result[(*index)++], "%6u", para->rate);
712 }
713 
714 static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
715 				  char *buf, int len)
716 {
717 	struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
718 	char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
719 	u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
720 	char content[HCLGE_DBG_TM_INFO_LEN];
721 	int pos = 0;
722 	int ret;
723 
724 	for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
725 		result[i] = data_str;
726 		data_str += HCLGE_DBG_DATA_STR_LEN;
727 	}
728 
729 	hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
730 			       NULL, ARRAY_SIZE(tm_pg_items));
731 	pos += scnprintf(buf + pos, len - pos, "%s", content);
732 
733 	for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
734 		ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
735 		if (ret)
736 			return ret;
737 
738 		ret = hclge_tm_get_pg_sch_mode(hdev, pg_id, &sch_mode);
739 		if (ret)
740 			return ret;
741 
742 		ret = hclge_tm_get_pg_weight(hdev, pg_id, &weight);
743 		if (ret)
744 			return ret;
745 
746 		ret = hclge_tm_get_pg_shaper(hdev, pg_id,
747 					     HCLGE_OPC_TM_PG_C_SHAPPING,
748 					     &c_shaper_para);
749 		if (ret)
750 			return ret;
751 
752 		ret = hclge_tm_get_pg_shaper(hdev, pg_id,
753 					     HCLGE_OPC_TM_PG_P_SHAPPING,
754 					     &p_shaper_para);
755 		if (ret)
756 			return ret;
757 
758 		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
759 				       "sp";
760 
761 		j = 0;
762 		sprintf(result[j++], "%02u", pg_id);
763 		sprintf(result[j++], "0x%02x", pri_bit_map);
764 		sprintf(result[j++], "%4s", sch_mode_str);
765 		sprintf(result[j++], "%3u", weight);
766 		hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
767 		hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
768 
769 		hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
770 				       (const char **)result,
771 				       ARRAY_SIZE(tm_pg_items));
772 		pos += scnprintf(buf + pos, len - pos, "%s", content);
773 	}
774 
775 	return 0;
776 }
777 
778 static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
779 {
780 	char *data_str;
781 	int ret;
782 
783 	data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
784 			   HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
785 	if (!data_str)
786 		return -ENOMEM;
787 
788 	ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
789 
790 	kfree(data_str);
791 
792 	return ret;
793 }
794 
795 static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev,  char *buf, int len)
796 {
797 	struct hclge_tm_shaper_para shaper_para;
798 	int pos = 0;
799 	int ret;
800 
801 	ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
802 	if (ret)
803 		return ret;
804 
805 	pos += scnprintf(buf + pos, len - pos,
806 			 "IR_B  IR_U  IR_S  BS_B  BS_S  FLAG  RATE(Mbps)\n");
807 	pos += scnprintf(buf + pos, len - pos,
808 			 "%3u   %3u   %3u   %3u   %3u     %1u   %6u\n",
809 			 shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
810 			 shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
811 			 shaper_para.rate);
812 
813 	return 0;
814 }
815 
816 static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
817 					 char *buf, int len)
818 {
819 	u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
820 	struct hclge_bp_to_qs_map_cmd *map;
821 	struct hclge_desc desc;
822 	int pos = 0;
823 	u8 group_id;
824 	u8 grp_num;
825 	u16 i = 0;
826 	int ret;
827 
828 	grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ?
829 		  HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM;
830 	map = (struct hclge_bp_to_qs_map_cmd *)desc.data;
831 	for (group_id = 0; group_id < grp_num; group_id++) {
832 		hclge_cmd_setup_basic_desc(&desc,
833 					   HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
834 					   true);
835 		map->tc_id = tc_id;
836 		map->qs_group_id = group_id;
837 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
838 		if (ret) {
839 			dev_err(&hdev->pdev->dev,
840 				"failed to get bp to qset map, ret = %d\n",
841 				ret);
842 			return ret;
843 		}
844 
845 		qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
846 	}
847 
848 	pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n");
849 	for (group_id = 0; group_id < grp_num / 8; group_id++) {
850 		pos += scnprintf(buf + pos, len - pos,
851 			 "%04d  | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
852 			 group_id * 256, qset_mapping[i + 7],
853 			 qset_mapping[i + 6], qset_mapping[i + 5],
854 			 qset_mapping[i + 4], qset_mapping[i + 3],
855 			 qset_mapping[i + 2], qset_mapping[i + 1],
856 			 qset_mapping[i]);
857 		i += 8;
858 	}
859 
860 	return pos;
861 }
862 
863 static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
864 {
865 	u16 queue_id;
866 	u16 qset_id;
867 	u8 link_vld;
868 	int pos = 0;
869 	u8 pri_id;
870 	u8 tc_id;
871 	int ret;
872 
873 	for (queue_id = 0; queue_id < hdev->num_tqps; queue_id++) {
874 		ret = hclge_tm_get_q_to_qs_map(hdev, queue_id, &qset_id);
875 		if (ret)
876 			return ret;
877 
878 		ret = hclge_tm_get_qset_map_pri(hdev, qset_id, &pri_id,
879 						&link_vld);
880 		if (ret)
881 			return ret;
882 
883 		ret = hclge_tm_get_q_to_tc(hdev, queue_id, &tc_id);
884 		if (ret)
885 			return ret;
886 
887 		pos += scnprintf(buf + pos, len - pos,
888 				 "QUEUE_ID   QSET_ID   PRI_ID   TC_ID\n");
889 		pos += scnprintf(buf + pos, len - pos,
890 				 "%04u        %4u       %3u      %2u\n",
891 				 queue_id, qset_id, pri_id, tc_id);
892 
893 		if (!hnae3_dev_dcb_supported(hdev))
894 			continue;
895 
896 		ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos,
897 						    len - pos);
898 		if (ret < 0)
899 			return ret;
900 		pos += ret;
901 
902 		pos += scnprintf(buf + pos, len - pos, "\n");
903 	}
904 
905 	return 0;
906 }
907 
908 static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
909 {
910 	struct hclge_tm_nodes_cmd *nodes;
911 	struct hclge_desc desc;
912 	int pos = 0;
913 	int ret;
914 
915 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
916 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
917 	if (ret) {
918 		dev_err(&hdev->pdev->dev,
919 			"failed to dump tm nodes, ret = %d\n", ret);
920 		return ret;
921 	}
922 
923 	nodes = (struct hclge_tm_nodes_cmd *)desc.data;
924 
925 	pos += scnprintf(buf + pos, len - pos, "       BASE_ID  MAX_NUM\n");
926 	pos += scnprintf(buf + pos, len - pos, "PG      %4u      %4u\n",
927 			 nodes->pg_base_id, nodes->pg_num);
928 	pos += scnprintf(buf + pos, len - pos, "PRI     %4u      %4u\n",
929 			 nodes->pri_base_id, nodes->pri_num);
930 	pos += scnprintf(buf + pos, len - pos, "QSET    %4u      %4u\n",
931 			 le16_to_cpu(nodes->qset_base_id),
932 			 le16_to_cpu(nodes->qset_num));
933 	pos += scnprintf(buf + pos, len - pos, "QUEUE   %4u      %4u\n",
934 			 le16_to_cpu(nodes->queue_base_id),
935 			 le16_to_cpu(nodes->queue_num));
936 
937 	return 0;
938 }
939 
940 static const struct hclge_dbg_item tm_pri_items[] = {
941 	{ "ID", 4 },
942 	{ "MODE", 2 },
943 	{ "DWRR", 2 },
944 	{ "C_IR_B", 2 },
945 	{ "C_IR_U", 2 },
946 	{ "C_IR_S", 2 },
947 	{ "C_BS_B", 2 },
948 	{ "C_BS_S", 2 },
949 	{ "C_FLAG", 2 },
950 	{ "C_RATE(Mbps)", 2 },
951 	{ "P_IR_B", 2 },
952 	{ "P_IR_U", 2 },
953 	{ "P_IR_S", 2 },
954 	{ "P_BS_B", 2 },
955 	{ "P_BS_S", 2 },
956 	{ "P_FLAG", 2 },
957 	{ "P_RATE(Mbps)", 0 }
958 };
959 
960 static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
961 {
962 	char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN];
963 	struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
964 	char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
965 	char content[HCLGE_DBG_TM_INFO_LEN];
966 	u8 pri_num, sch_mode, weight, i, j;
967 	int pos, ret;
968 
969 	ret = hclge_tm_get_pri_num(hdev, &pri_num);
970 	if (ret)
971 		return ret;
972 
973 	for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
974 		result[i] = &data_str[i][0];
975 
976 	hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
977 			       NULL, ARRAY_SIZE(tm_pri_items));
978 	pos = scnprintf(buf, len, "%s", content);
979 
980 	for (i = 0; i < pri_num; i++) {
981 		ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
982 		if (ret)
983 			return ret;
984 
985 		ret = hclge_tm_get_pri_weight(hdev, i, &weight);
986 		if (ret)
987 			return ret;
988 
989 		ret = hclge_tm_get_pri_shaper(hdev, i,
990 					      HCLGE_OPC_TM_PRI_C_SHAPPING,
991 					      &c_shaper_para);
992 		if (ret)
993 			return ret;
994 
995 		ret = hclge_tm_get_pri_shaper(hdev, i,
996 					      HCLGE_OPC_TM_PRI_P_SHAPPING,
997 					      &p_shaper_para);
998 		if (ret)
999 			return ret;
1000 
1001 		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1002 			       "sp";
1003 
1004 		j = 0;
1005 		sprintf(result[j++], "%04u", i);
1006 		sprintf(result[j++], "%4s", sch_mode_str);
1007 		sprintf(result[j++], "%3u", weight);
1008 		hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
1009 		hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
1010 		hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
1011 				       (const char **)result,
1012 				       ARRAY_SIZE(tm_pri_items));
1013 		pos += scnprintf(buf + pos, len - pos, "%s", content);
1014 	}
1015 
1016 	return 0;
1017 }
1018 
1019 static const struct hclge_dbg_item tm_qset_items[] = {
1020 	{ "ID", 4 },
1021 	{ "MAP_PRI", 2 },
1022 	{ "LINK_VLD", 2 },
1023 	{ "MODE", 2 },
1024 	{ "DWRR", 2 },
1025 	{ "IR_B", 2 },
1026 	{ "IR_U", 2 },
1027 	{ "IR_S", 2 },
1028 	{ "BS_B", 2 },
1029 	{ "BS_S", 2 },
1030 	{ "FLAG", 2 },
1031 	{ "RATE(Mbps)", 0 }
1032 };
1033 
1034 static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
1035 {
1036 	char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN];
1037 	char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str;
1038 	u8 priority, link_vld, sch_mode, weight;
1039 	struct hclge_tm_shaper_para shaper_para;
1040 	char content[HCLGE_DBG_TM_INFO_LEN];
1041 	u16 qset_num, i;
1042 	int ret, pos;
1043 	u8 j;
1044 
1045 	ret = hclge_tm_get_qset_num(hdev, &qset_num);
1046 	if (ret)
1047 		return ret;
1048 
1049 	for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++)
1050 		result[i] = &data_str[i][0];
1051 
1052 	hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1053 			       NULL, ARRAY_SIZE(tm_qset_items));
1054 	pos = scnprintf(buf, len, "%s", content);
1055 
1056 	for (i = 0; i < qset_num; i++) {
1057 		ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
1058 		if (ret)
1059 			return ret;
1060 
1061 		ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode);
1062 		if (ret)
1063 			return ret;
1064 
1065 		ret = hclge_tm_get_qset_weight(hdev, i, &weight);
1066 		if (ret)
1067 			return ret;
1068 
1069 		ret = hclge_tm_get_qset_shaper(hdev, i, &shaper_para);
1070 		if (ret)
1071 			return ret;
1072 
1073 		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1074 			       "sp";
1075 
1076 		j = 0;
1077 		sprintf(result[j++], "%04u", i);
1078 		sprintf(result[j++], "%4u", priority);
1079 		sprintf(result[j++], "%4u", link_vld);
1080 		sprintf(result[j++], "%4s", sch_mode_str);
1081 		sprintf(result[j++], "%3u", weight);
1082 		hclge_dbg_fill_shaper_content(&shaper_para, result, &j);
1083 
1084 		hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1085 				       (const char **)result,
1086 				       ARRAY_SIZE(tm_qset_items));
1087 		pos += scnprintf(buf + pos, len - pos, "%s", content);
1088 	}
1089 
1090 	return 0;
1091 }
1092 
1093 static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
1094 					int len)
1095 {
1096 	struct hclge_cfg_pause_param_cmd *pause_param;
1097 	struct hclge_desc desc;
1098 	int pos = 0;
1099 	int ret;
1100 
1101 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
1102 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1103 	if (ret) {
1104 		dev_err(&hdev->pdev->dev,
1105 			"failed to dump qos pause, ret = %d\n", ret);
1106 		return ret;
1107 	}
1108 
1109 	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
1110 
1111 	pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n",
1112 			 pause_param->pause_trans_gap);
1113 	pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n",
1114 			 le16_to_cpu(pause_param->pause_trans_time));
1115 	return 0;
1116 }
1117 
1118 static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
1119 				      int len)
1120 {
1121 #define HCLGE_DBG_TC_MASK		0x0F
1122 #define HCLGE_DBG_TC_BIT_WIDTH		4
1123 
1124 	struct hclge_qos_pri_map_cmd *pri_map;
1125 	struct hclge_desc desc;
1126 	int pos = 0;
1127 	u8 *pri_tc;
1128 	u8 tc, i;
1129 	int ret;
1130 
1131 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
1132 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1133 	if (ret) {
1134 		dev_err(&hdev->pdev->dev,
1135 			"failed to dump qos pri map, ret = %d\n", ret);
1136 		return ret;
1137 	}
1138 
1139 	pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
1140 
1141 	pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n",
1142 			 pri_map->vlan_pri);
1143 	pos += scnprintf(buf + pos, len - pos, "PRI  TC\n");
1144 
1145 	pri_tc = (u8 *)pri_map;
1146 	for (i = 0; i < HNAE3_MAX_TC; i++) {
1147 		tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
1148 		tc &= HCLGE_DBG_TC_MASK;
1149 		pos += scnprintf(buf + pos, len - pos, "%u     %u\n", i, tc);
1150 	}
1151 
1152 	return 0;
1153 }
1154 
1155 static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
1156 {
1157 	struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
1158 	struct hclge_desc desc;
1159 	int pos = 0;
1160 	int i, ret;
1161 
1162 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
1163 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1164 	if (ret) {
1165 		dev_err(&hdev->pdev->dev,
1166 			"failed to dump tx buf, ret = %d\n", ret);
1167 		return ret;
1168 	}
1169 
1170 	tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1171 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1172 		pos += scnprintf(buf + pos, len - pos,
1173 				 "tx_packet_buf_tc_%d: 0x%x\n", i,
1174 				 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
1175 
1176 	return pos;
1177 }
1178 
1179 static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
1180 					  int len)
1181 {
1182 	struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
1183 	struct hclge_desc desc;
1184 	int pos = 0;
1185 	int i, ret;
1186 
1187 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
1188 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1189 	if (ret) {
1190 		dev_err(&hdev->pdev->dev,
1191 			"failed to dump rx priv buf, ret = %d\n", ret);
1192 		return ret;
1193 	}
1194 
1195 	pos += scnprintf(buf + pos, len - pos, "\n");
1196 
1197 	rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
1198 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1199 		pos += scnprintf(buf + pos, len - pos,
1200 				 "rx_packet_buf_tc_%d: 0x%x\n", i,
1201 				 le16_to_cpu(rx_buf_cmd->buf_num[i]));
1202 
1203 	pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n",
1204 			 le16_to_cpu(rx_buf_cmd->shared_buf));
1205 
1206 	return pos;
1207 }
1208 
1209 static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
1210 					   int len)
1211 {
1212 	struct hclge_rx_com_wl *rx_com_wl;
1213 	struct hclge_desc desc;
1214 	int pos = 0;
1215 	int ret;
1216 
1217 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
1218 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1219 	if (ret) {
1220 		dev_err(&hdev->pdev->dev,
1221 			"failed to dump rx common wl, ret = %d\n", ret);
1222 		return ret;
1223 	}
1224 
1225 	rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
1226 	pos += scnprintf(buf + pos, len - pos, "\n");
1227 	pos += scnprintf(buf + pos, len - pos,
1228 			 "rx_com_wl: high: 0x%x, low: 0x%x\n",
1229 			 le16_to_cpu(rx_com_wl->com_wl.high),
1230 			 le16_to_cpu(rx_com_wl->com_wl.low));
1231 
1232 	return pos;
1233 }
1234 
1235 static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
1236 					    int len)
1237 {
1238 	struct hclge_rx_com_wl *rx_packet_cnt;
1239 	struct hclge_desc desc;
1240 	int pos = 0;
1241 	int ret;
1242 
1243 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
1244 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1245 	if (ret) {
1246 		dev_err(&hdev->pdev->dev,
1247 			"failed to dump rx global pkt cnt, ret = %d\n", ret);
1248 		return ret;
1249 	}
1250 
1251 	rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
1252 	pos += scnprintf(buf + pos, len - pos,
1253 			 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
1254 			 le16_to_cpu(rx_packet_cnt->com_wl.high),
1255 			 le16_to_cpu(rx_packet_cnt->com_wl.low));
1256 
1257 	return pos;
1258 }
1259 
1260 static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
1261 					     int len)
1262 {
1263 	struct hclge_rx_priv_wl_buf *rx_priv_wl;
1264 	struct hclge_desc desc[2];
1265 	int pos = 0;
1266 	int i, ret;
1267 
1268 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1269 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1270 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1271 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1272 	if (ret) {
1273 		dev_err(&hdev->pdev->dev,
1274 			"failed to dump rx priv wl buf, ret = %d\n", ret);
1275 		return ret;
1276 	}
1277 
1278 	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
1279 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1280 		pos += scnprintf(buf + pos, len - pos,
1281 			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
1282 			 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1283 			 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1284 
1285 	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
1286 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1287 		pos += scnprintf(buf + pos, len - pos,
1288 			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
1289 			 i + HCLGE_TC_NUM_ONE_DESC,
1290 			 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1291 			 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1292 
1293 	return pos;
1294 }
1295 
1296 static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
1297 						  char *buf, int len)
1298 {
1299 	struct hclge_rx_com_thrd *rx_com_thrd;
1300 	struct hclge_desc desc[2];
1301 	int pos = 0;
1302 	int i, ret;
1303 
1304 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1305 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1306 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1307 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1308 	if (ret) {
1309 		dev_err(&hdev->pdev->dev,
1310 			"failed to dump rx common threshold, ret = %d\n", ret);
1311 		return ret;
1312 	}
1313 
1314 	pos += scnprintf(buf + pos, len - pos, "\n");
1315 	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
1316 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1317 		pos += scnprintf(buf + pos, len - pos,
1318 			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
1319 			 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1320 			 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1321 
1322 	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
1323 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1324 		pos += scnprintf(buf + pos, len - pos,
1325 			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
1326 			 i + HCLGE_TC_NUM_ONE_DESC,
1327 			 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1328 			 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1329 
1330 	return pos;
1331 }
1332 
1333 static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf,
1334 				      int len)
1335 {
1336 	int pos = 0;
1337 	int ret;
1338 
1339 	ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos);
1340 	if (ret < 0)
1341 		return ret;
1342 	pos += ret;
1343 
1344 	ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos);
1345 	if (ret < 0)
1346 		return ret;
1347 	pos += ret;
1348 
1349 	ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos);
1350 	if (ret < 0)
1351 		return ret;
1352 	pos += ret;
1353 
1354 	ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos);
1355 	if (ret < 0)
1356 		return ret;
1357 	pos += ret;
1358 
1359 	pos += scnprintf(buf + pos, len - pos, "\n");
1360 	if (!hnae3_dev_dcb_supported(hdev))
1361 		return 0;
1362 
1363 	ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos);
1364 	if (ret < 0)
1365 		return ret;
1366 	pos += ret;
1367 
1368 	ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos,
1369 						     len - pos);
1370 	if (ret < 0)
1371 		return ret;
1372 
1373 	return 0;
1374 }
1375 
1376 static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
1377 {
1378 	struct hclge_mac_ethertype_idx_rd_cmd *req0;
1379 	struct hclge_desc desc;
1380 	u32 msg_egress_port;
1381 	int pos = 0;
1382 	int ret, i;
1383 
1384 	pos += scnprintf(buf + pos, len - pos,
1385 			 "entry  mac_addr          mask  ether  ");
1386 	pos += scnprintf(buf + pos, len - pos,
1387 			 "mask  vlan  mask  i_map  i_dir  e_type  ");
1388 	pos += scnprintf(buf + pos, len - pos, "pf_id  vf_id  q_id  drop\n");
1389 
1390 	for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
1391 		hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
1392 					   true);
1393 		req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
1394 		req0->index = cpu_to_le16(i);
1395 
1396 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1397 		if (ret) {
1398 			dev_err(&hdev->pdev->dev,
1399 				"failed to dump manage table, ret = %d\n", ret);
1400 			return ret;
1401 		}
1402 
1403 		if (!req0->resp_code)
1404 			continue;
1405 
1406 		pos += scnprintf(buf + pos, len - pos, "%02u     %pM ",
1407 				 le16_to_cpu(req0->index), req0->mac_addr);
1408 
1409 		pos += scnprintf(buf + pos, len - pos,
1410 				 "%x     %04x   %x     %04x  ",
1411 				 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
1412 				 le16_to_cpu(req0->ethter_type),
1413 				 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
1414 				 le16_to_cpu(req0->vlan_tag) &
1415 				 HCLGE_DBG_MNG_VLAN_TAG);
1416 
1417 		pos += scnprintf(buf + pos, len - pos,
1418 				 "%x     %02x     %02x     ",
1419 				 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
1420 				 req0->i_port_bitmap, req0->i_port_direction);
1421 
1422 		msg_egress_port = le16_to_cpu(req0->egress_port);
1423 		pos += scnprintf(buf + pos, len - pos,
1424 				 "%x       %x      %02x     %04x  %x\n",
1425 				 !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
1426 				 msg_egress_port & HCLGE_DBG_MNG_PF_ID,
1427 				 (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
1428 				 le16_to_cpu(req0->egress_queue),
1429 				 !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
1430 	}
1431 
1432 	return 0;
1433 }
1434 
1435 #define HCLGE_DBG_TCAM_BUF_SIZE 256
1436 
1437 static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
1438 				  char *tcam_buf,
1439 				  struct hclge_dbg_tcam_msg tcam_msg)
1440 {
1441 	struct hclge_fd_tcam_config_1_cmd *req1;
1442 	struct hclge_fd_tcam_config_2_cmd *req2;
1443 	struct hclge_fd_tcam_config_3_cmd *req3;
1444 	struct hclge_desc desc[3];
1445 	int pos = 0;
1446 	int ret, i;
1447 	u32 *req;
1448 
1449 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
1450 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1451 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
1452 	desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1453 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
1454 
1455 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
1456 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
1457 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
1458 
1459 	req1->stage  = tcam_msg.stage;
1460 	req1->xy_sel = sel_x ? 1 : 0;
1461 	req1->index  = cpu_to_le32(tcam_msg.loc);
1462 
1463 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
1464 	if (ret)
1465 		return ret;
1466 
1467 	pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1468 			 "read result tcam key %s(%u):\n", sel_x ? "x" : "y",
1469 			 tcam_msg.loc);
1470 
1471 	/* tcam_data0 ~ tcam_data1 */
1472 	req = (u32 *)req1->tcam_data;
1473 	for (i = 0; i < 2; i++)
1474 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1475 				 "%08x\n", *req++);
1476 
1477 	/* tcam_data2 ~ tcam_data7 */
1478 	req = (u32 *)req2->tcam_data;
1479 	for (i = 0; i < 6; i++)
1480 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1481 				 "%08x\n", *req++);
1482 
1483 	/* tcam_data8 ~ tcam_data12 */
1484 	req = (u32 *)req3->tcam_data;
1485 	for (i = 0; i < 5; i++)
1486 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1487 				 "%08x\n", *req++);
1488 
1489 	return ret;
1490 }
1491 
1492 static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
1493 {
1494 	struct hclge_fd_rule *rule;
1495 	struct hlist_node *node;
1496 	int cnt = 0;
1497 
1498 	spin_lock_bh(&hdev->fd_rule_lock);
1499 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
1500 		rule_locs[cnt] = rule->location;
1501 		cnt++;
1502 	}
1503 	spin_unlock_bh(&hdev->fd_rule_lock);
1504 
1505 	if (cnt != hdev->hclge_fd_rule_num || cnt == 0)
1506 		return -EINVAL;
1507 
1508 	return cnt;
1509 }
1510 
1511 static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
1512 {
1513 	u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
1514 	struct hclge_dbg_tcam_msg tcam_msg;
1515 	int i, ret, rule_cnt;
1516 	u16 *rule_locs;
1517 	char *tcam_buf;
1518 	int pos = 0;
1519 
1520 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
1521 		dev_err(&hdev->pdev->dev,
1522 			"Only FD-supported dev supports dump fd tcam\n");
1523 		return -EOPNOTSUPP;
1524 	}
1525 
1526 	if (!hdev->hclge_fd_rule_num || !rule_num)
1527 		return 0;
1528 
1529 	rule_locs = kcalloc(rule_num, sizeof(u16), GFP_KERNEL);
1530 	if (!rule_locs)
1531 		return -ENOMEM;
1532 
1533 	tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
1534 	if (!tcam_buf) {
1535 		kfree(rule_locs);
1536 		return -ENOMEM;
1537 	}
1538 
1539 	rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
1540 	if (rule_cnt < 0) {
1541 		ret = rule_cnt;
1542 		dev_err(&hdev->pdev->dev,
1543 			"failed to get rule number, ret = %d\n", ret);
1544 		goto out;
1545 	}
1546 
1547 	ret = 0;
1548 	for (i = 0; i < rule_cnt; i++) {
1549 		tcam_msg.stage = HCLGE_FD_STAGE_1;
1550 		tcam_msg.loc = rule_locs[i];
1551 
1552 		ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg);
1553 		if (ret) {
1554 			dev_err(&hdev->pdev->dev,
1555 				"failed to get fd tcam key x, ret = %d\n", ret);
1556 			goto out;
1557 		}
1558 
1559 		pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1560 
1561 		ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg);
1562 		if (ret) {
1563 			dev_err(&hdev->pdev->dev,
1564 				"failed to get fd tcam key y, ret = %d\n", ret);
1565 			goto out;
1566 		}
1567 
1568 		pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1569 	}
1570 
1571 out:
1572 	kfree(tcam_buf);
1573 	kfree(rule_locs);
1574 	return ret;
1575 }
1576 
1577 static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
1578 {
1579 	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
1580 	struct hclge_fd_ad_cnt_read_cmd *req;
1581 	char str_id[HCLGE_DBG_ID_LEN];
1582 	struct hclge_desc desc;
1583 	int pos = 0;
1584 	int ret;
1585 	u64 cnt;
1586 	u8 i;
1587 
1588 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
1589 		return -EOPNOTSUPP;
1590 
1591 	pos += scnprintf(buf + pos, len - pos,
1592 			 "func_id\thit_times\n");
1593 
1594 	for (i = 0; i < func_num; i++) {
1595 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
1596 		req = (struct hclge_fd_ad_cnt_read_cmd *)desc.data;
1597 		req->index = cpu_to_le16(i);
1598 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1599 		if (ret) {
1600 			dev_err(&hdev->pdev->dev, "failed to get fd counter, ret = %d\n",
1601 				ret);
1602 			return ret;
1603 		}
1604 		cnt = le64_to_cpu(req->cnt);
1605 		hclge_dbg_get_func_id_str(str_id, i);
1606 		pos += scnprintf(buf + pos, len - pos,
1607 				 "%s\t%llu\n", str_id, cnt);
1608 	}
1609 
1610 	return 0;
1611 }
1612 
1613 static const struct hclge_dbg_status_dfx_info hclge_dbg_rst_info[] = {
1614 	{HCLGE_MISC_VECTOR_REG_BASE, "vector0 interrupt enable status"},
1615 	{HCLGE_MISC_RESET_STS_REG,   "reset interrupt source"},
1616 	{HCLGE_MISC_VECTOR_INT_STS,  "reset interrupt status"},
1617 	{HCLGE_RAS_PF_OTHER_INT_STS_REG, "RAS interrupt status"},
1618 	{HCLGE_GLOBAL_RESET_REG,  "hardware reset status"},
1619 	{HCLGE_NIC_CSQ_DEPTH_REG, "handshake status"},
1620 	{HCLGE_FUN_RST_ING, "function reset status"}
1621 };
1622 
1623 int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
1624 {
1625 	u32 i, offset;
1626 	int pos = 0;
1627 
1628 	pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n",
1629 			 hdev->rst_stats.pf_rst_cnt);
1630 	pos += scnprintf(buf + pos, len - pos, "FLR reset count: %u\n",
1631 			 hdev->rst_stats.flr_rst_cnt);
1632 	pos += scnprintf(buf + pos, len - pos, "GLOBAL reset count: %u\n",
1633 			 hdev->rst_stats.global_rst_cnt);
1634 	pos += scnprintf(buf + pos, len - pos, "IMP reset count: %u\n",
1635 			 hdev->rst_stats.imp_rst_cnt);
1636 	pos += scnprintf(buf + pos, len - pos, "reset done count: %u\n",
1637 			 hdev->rst_stats.reset_done_cnt);
1638 	pos += scnprintf(buf + pos, len - pos, "HW reset done count: %u\n",
1639 			 hdev->rst_stats.hw_reset_done_cnt);
1640 	pos += scnprintf(buf + pos, len - pos, "reset count: %u\n",
1641 			 hdev->rst_stats.reset_cnt);
1642 	pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n",
1643 			 hdev->rst_stats.reset_fail_cnt);
1644 
1645 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
1646 		offset = hclge_dbg_rst_info[i].offset;
1647 		pos += scnprintf(buf + pos, len - pos, "%s: 0x%x\n",
1648 				 hclge_dbg_rst_info[i].message,
1649 				 hclge_read_dev(&hdev->hw, offset));
1650 	}
1651 
1652 	pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n",
1653 			 hdev->state);
1654 
1655 	return 0;
1656 }
1657 
1658 static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len)
1659 {
1660 	unsigned long rem_nsec;
1661 	int pos = 0;
1662 	u64 lc;
1663 
1664 	lc = local_clock();
1665 	rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);
1666 
1667 	pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n",
1668 			 (unsigned long)lc, rem_nsec / 1000);
1669 	pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n",
1670 			 jiffies_to_msecs(jiffies - hdev->last_serv_processed));
1671 	pos += scnprintf(buf + pos, len - pos,
1672 			 "last_service_task_processed: %lu(jiffies)\n",
1673 			 hdev->last_serv_processed);
1674 	pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n",
1675 			 hdev->serv_processed_cnt);
1676 
1677 	return 0;
1678 }
1679 
1680 static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len)
1681 {
1682 	int pos = 0;
1683 
1684 	pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n",
1685 			 hdev->num_nic_msi);
1686 	pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n",
1687 			 hdev->num_roce_msi);
1688 	pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n",
1689 			 hdev->num_msi_used);
1690 	pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n",
1691 			 hdev->num_msi_left);
1692 
1693 	return 0;
1694 }
1695 
1696 static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src,
1697 					  char *buf, int len, u32 bd_num)
1698 {
1699 #define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2
1700 
1701 	struct hclge_desc *desc_index = desc_src;
1702 	u32 offset = 0;
1703 	int pos = 0;
1704 	u32 i, j;
1705 
1706 	pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1707 
1708 	for (i = 0; i < bd_num; i++) {
1709 		j = 0;
1710 		while (j < HCLGE_DESC_DATA_LEN - 1) {
1711 			pos += scnprintf(buf + pos, len - pos, "0x%04x | ",
1712 					 offset);
1713 			pos += scnprintf(buf + pos, len - pos, "0x%08x  ",
1714 					 le32_to_cpu(desc_index->data[j++]));
1715 			pos += scnprintf(buf + pos, len - pos, "0x%08x\n",
1716 					 le32_to_cpu(desc_index->data[j++]));
1717 			offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
1718 		}
1719 		desc_index++;
1720 	}
1721 }
1722 
1723 static int
1724 hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
1725 {
1726 	struct hclge_get_imp_bd_cmd *req;
1727 	struct hclge_desc *desc_src;
1728 	struct hclge_desc desc;
1729 	u32 bd_num;
1730 	int ret;
1731 
1732 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_STATS_BD, true);
1733 
1734 	req = (struct hclge_get_imp_bd_cmd *)desc.data;
1735 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1736 	if (ret) {
1737 		dev_err(&hdev->pdev->dev,
1738 			"failed to get imp statistics bd number, ret = %d\n",
1739 			ret);
1740 		return ret;
1741 	}
1742 
1743 	bd_num = le32_to_cpu(req->bd_num);
1744 	if (!bd_num) {
1745 		dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
1746 		return -EINVAL;
1747 	}
1748 
1749 	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
1750 	if (!desc_src)
1751 		return -ENOMEM;
1752 
1753 	ret  = hclge_dbg_cmd_send(hdev, desc_src, 0, bd_num,
1754 				  HCLGE_OPC_IMP_STATS_INFO);
1755 	if (ret) {
1756 		kfree(desc_src);
1757 		dev_err(&hdev->pdev->dev,
1758 			"failed to get imp statistics, ret = %d\n", ret);
1759 		return ret;
1760 	}
1761 
1762 	hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num);
1763 
1764 	kfree(desc_src);
1765 
1766 	return 0;
1767 }
1768 
1769 #define HCLGE_CMD_NCL_CONFIG_BD_NUM	5
1770 #define HCLGE_MAX_NCL_CONFIG_LENGTH	16384
1771 
1772 static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
1773 					char *buf, int len, int *pos)
1774 {
1775 #define HCLGE_CMD_DATA_NUM		6
1776 
1777 	int offset = HCLGE_MAX_NCL_CONFIG_LENGTH - *index;
1778 	int i, j;
1779 
1780 	for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
1781 		for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
1782 			if (i == 0 && j == 0)
1783 				continue;
1784 
1785 			*pos += scnprintf(buf + *pos, len - *pos,
1786 					  "0x%04x | 0x%08x\n", offset,
1787 					  le32_to_cpu(desc[i].data[j]));
1788 
1789 			offset += sizeof(u32);
1790 			*index -= sizeof(u32);
1791 
1792 			if (*index <= 0)
1793 				return;
1794 		}
1795 	}
1796 }
1797 
1798 static int
1799 hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
1800 {
1801 #define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD	(20 + 24 * 4)
1802 
1803 	struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
1804 	int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
1805 	int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
1806 	int pos = 0;
1807 	u32 data0;
1808 	int ret;
1809 
1810 	pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1811 
1812 	while (index > 0) {
1813 		data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
1814 		if (index >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
1815 			data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
1816 		else
1817 			data0 |= (u32)index << 16;
1818 		ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
1819 					 HCLGE_OPC_QUERY_NCL_CONFIG);
1820 		if (ret)
1821 			return ret;
1822 
1823 		hclge_ncl_config_data_print(desc, &index, buf, len, &pos);
1824 	}
1825 
1826 	return 0;
1827 }
1828 
1829 static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
1830 {
1831 	struct phy_device *phydev = hdev->hw.mac.phydev;
1832 	struct hclge_config_mac_mode_cmd *req_app;
1833 	struct hclge_common_lb_cmd *req_common;
1834 	struct hclge_desc desc;
1835 	u8 loopback_en;
1836 	int pos = 0;
1837 	int ret;
1838 
1839 	req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
1840 	req_common = (struct hclge_common_lb_cmd *)desc.data;
1841 
1842 	pos += scnprintf(buf + pos, len - pos, "mac id: %u\n",
1843 			 hdev->hw.mac.mac_id);
1844 
1845 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
1846 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1847 	if (ret) {
1848 		dev_err(&hdev->pdev->dev,
1849 			"failed to dump app loopback status, ret = %d\n", ret);
1850 		return ret;
1851 	}
1852 
1853 	loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
1854 				    HCLGE_MAC_APP_LP_B);
1855 	pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
1856 			 state_str[loopback_en]);
1857 
1858 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
1859 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1860 	if (ret) {
1861 		dev_err(&hdev->pdev->dev,
1862 			"failed to dump common loopback status, ret = %d\n",
1863 			ret);
1864 		return ret;
1865 	}
1866 
1867 	loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
1868 	pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
1869 			 state_str[loopback_en]);
1870 
1871 	loopback_en = req_common->enable &
1872 			HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
1873 	pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
1874 			 state_str[loopback_en]);
1875 
1876 	if (phydev) {
1877 		loopback_en = phydev->loopback_enabled;
1878 		pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1879 				 state_str[loopback_en]);
1880 	} else if (hnae3_dev_phy_imp_supported(hdev)) {
1881 		loopback_en = req_common->enable &
1882 			      HCLGE_CMD_GE_PHY_INNER_LOOP_B;
1883 		pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1884 				 state_str[loopback_en]);
1885 	}
1886 
1887 	return 0;
1888 }
1889 
1890 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
1891  * @hdev: pointer to struct hclge_dev
1892  */
1893 static int
1894 hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len)
1895 {
1896 	struct hclge_mac_tnl_stats stats;
1897 	unsigned long rem_nsec;
1898 	int pos = 0;
1899 
1900 	pos += scnprintf(buf + pos, len - pos,
1901 			 "Recently generated mac tnl interruption:\n");
1902 
1903 	while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
1904 		rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
1905 
1906 		pos += scnprintf(buf + pos, len - pos,
1907 				 "[%07lu.%03lu] status = 0x%x\n",
1908 				 (unsigned long)stats.time, rem_nsec / 1000,
1909 				 stats.status);
1910 	}
1911 
1912 	return 0;
1913 }
1914 
1915 
1916 static const struct hclge_dbg_item mac_list_items[] = {
1917 	{ "FUNC_ID", 2 },
1918 	{ "MAC_ADDR", 12 },
1919 	{ "STATE", 2 },
1920 };
1921 
1922 static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
1923 				    bool is_unicast)
1924 {
1925 	char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN];
1926 	char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
1927 	char *result[ARRAY_SIZE(mac_list_items)];
1928 	struct hclge_mac_node *mac_node, *tmp;
1929 	struct hclge_vport *vport;
1930 	struct list_head *list;
1931 	u32 func_id;
1932 	int pos = 0;
1933 	int i;
1934 
1935 	for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
1936 		result[i] = &data_str[i][0];
1937 
1938 	pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n",
1939 			 is_unicast ? "UC" : "MC");
1940 	hclge_dbg_fill_content(content, sizeof(content), mac_list_items,
1941 			       NULL, ARRAY_SIZE(mac_list_items));
1942 	pos += scnprintf(buf + pos, len - pos, "%s", content);
1943 
1944 	for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
1945 		vport = &hdev->vport[func_id];
1946 		list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
1947 		spin_lock_bh(&vport->mac_list_lock);
1948 		list_for_each_entry_safe(mac_node, tmp, list, node) {
1949 			i = 0;
1950 			result[i++] = hclge_dbg_get_func_id_str(str_id,
1951 								func_id);
1952 			sprintf(result[i++], "%pM", mac_node->mac_addr);
1953 			sprintf(result[i++], "%5s",
1954 				hclge_mac_state_str[mac_node->state]);
1955 			hclge_dbg_fill_content(content, sizeof(content),
1956 					       mac_list_items,
1957 					       (const char **)result,
1958 					       ARRAY_SIZE(mac_list_items));
1959 			pos += scnprintf(buf + pos, len - pos, "%s", content);
1960 		}
1961 		spin_unlock_bh(&vport->mac_list_lock);
1962 	}
1963 }
1964 
1965 static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
1966 {
1967 	u8 func_num = pci_num_vf(hdev->pdev) + 1;
1968 	struct hclge_vport *vport;
1969 	int pos = 0;
1970 	u8 i;
1971 
1972 	pos += scnprintf(buf, len, "num_alloc_vport   : %u\n",
1973 			  hdev->num_alloc_vport);
1974 	pos += scnprintf(buf + pos, len - pos, "max_umv_size     : %u\n",
1975 			 hdev->max_umv_size);
1976 	pos += scnprintf(buf + pos, len - pos, "wanted_umv_size  : %u\n",
1977 			 hdev->wanted_umv_size);
1978 	pos += scnprintf(buf + pos, len - pos, "priv_umv_size    : %u\n",
1979 			 hdev->priv_umv_size);
1980 
1981 	mutex_lock(&hdev->vport_lock);
1982 	pos += scnprintf(buf + pos, len - pos, "share_umv_size   : %u\n",
1983 			 hdev->share_umv_size);
1984 	for (i = 0; i < func_num; i++) {
1985 		vport = &hdev->vport[i];
1986 		pos += scnprintf(buf + pos, len - pos,
1987 				 "vport(%u) used_umv_num : %u\n",
1988 				 i, vport->used_umv_num);
1989 	}
1990 	mutex_unlock(&hdev->vport_lock);
1991 
1992 	pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num  : %u\n",
1993 			 hdev->used_mc_mac_num);
1994 
1995 	return 0;
1996 }
1997 
1998 static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
1999 					 struct hclge_dbg_vlan_cfg *vlan_cfg)
2000 {
2001 	struct hclge_vport_vtag_rx_cfg_cmd *req;
2002 	struct hclge_desc desc;
2003 	u16 bmap_index;
2004 	u8 rx_cfg;
2005 	int ret;
2006 
2007 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true);
2008 
2009 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
2010 	req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2011 	bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2012 	req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2013 
2014 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2015 	if (ret) {
2016 		dev_err(&hdev->pdev->dev,
2017 			"failed to get vport%u rxvlan cfg, ret = %d\n",
2018 			vf_id, ret);
2019 		return ret;
2020 	}
2021 
2022 	rx_cfg = req->vport_vlan_cfg;
2023 	vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B);
2024 	vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B);
2025 	vlan_cfg->drop_tag1 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG1_EN_B);
2026 	vlan_cfg->drop_tag2 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG2_EN_B);
2027 	vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B);
2028 	vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B);
2029 
2030 	return 0;
2031 }
2032 
2033 static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
2034 					 struct hclge_dbg_vlan_cfg *vlan_cfg)
2035 {
2036 	struct hclge_vport_vtag_tx_cfg_cmd *req;
2037 	struct hclge_desc desc;
2038 	u16 bmap_index;
2039 	u8 tx_cfg;
2040 	int ret;
2041 
2042 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true);
2043 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
2044 	req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2045 	bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2046 	req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2047 
2048 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2049 	if (ret) {
2050 		dev_err(&hdev->pdev->dev,
2051 			"failed to get vport%u txvlan cfg, ret = %d\n",
2052 			vf_id, ret);
2053 		return ret;
2054 	}
2055 
2056 	tx_cfg = req->vport_vlan_cfg;
2057 	vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1);
2058 
2059 	vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B);
2060 	vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B);
2061 	vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B);
2062 	vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B);
2063 	vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B);
2064 	vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B);
2065 	vlan_cfg->shift_tag = hnae3_get_bit(tx_cfg, HCLGE_TAG_SHIFT_MODE_EN_B);
2066 
2067 	return 0;
2068 }
2069 
2070 static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev,
2071 					    u8 vlan_type, u8 vf_id,
2072 					    struct hclge_desc *desc)
2073 {
2074 	struct hclge_vlan_filter_ctrl_cmd *req;
2075 	int ret;
2076 
2077 	hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
2078 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data;
2079 	req->vlan_type = vlan_type;
2080 	req->vf_id = vf_id;
2081 
2082 	ret = hclge_cmd_send(&hdev->hw, desc, 1);
2083 	if (ret)
2084 		dev_err(&hdev->pdev->dev,
2085 			"failed to get vport%u vlan filter config, ret = %d.\n",
2086 			vf_id, ret);
2087 
2088 	return ret;
2089 }
2090 
2091 static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type,
2092 				       u8 vf_id, u8 *vlan_fe)
2093 {
2094 	struct hclge_vlan_filter_ctrl_cmd *req;
2095 	struct hclge_desc desc;
2096 	int ret;
2097 
2098 	ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc);
2099 	if (ret)
2100 		return ret;
2101 
2102 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
2103 	*vlan_fe = req->vlan_fe;
2104 
2105 	return 0;
2106 }
2107 
2108 static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
2109 						   u8 vf_id, u8 *bypass_en)
2110 {
2111 	struct hclge_port_vlan_filter_bypass_cmd *req;
2112 	struct hclge_desc desc;
2113 	int ret;
2114 
2115 	if (!test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
2116 		return 0;
2117 
2118 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, true);
2119 	req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
2120 	req->vf_id = vf_id;
2121 
2122 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2123 	if (ret) {
2124 		dev_err(&hdev->pdev->dev,
2125 			"failed to get vport%u port vlan filter bypass state, ret = %d.\n",
2126 			vf_id, ret);
2127 		return ret;
2128 	}
2129 
2130 	*bypass_en = hnae3_get_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B);
2131 
2132 	return 0;
2133 }
2134 
2135 static const struct hclge_dbg_item vlan_filter_items[] = {
2136 	{ "FUNC_ID", 2 },
2137 	{ "I_VF_VLAN_FILTER", 2 },
2138 	{ "E_VF_VLAN_FILTER", 2 },
2139 	{ "PORT_VLAN_FILTER_BYPASS", 0 }
2140 };
2141 
2142 static const struct hclge_dbg_item vlan_offload_items[] = {
2143 	{ "FUNC_ID", 2 },
2144 	{ "PVID", 4 },
2145 	{ "ACCEPT_TAG1", 2 },
2146 	{ "ACCEPT_TAG2", 2 },
2147 	{ "ACCEPT_UNTAG1", 2 },
2148 	{ "ACCEPT_UNTAG2", 2 },
2149 	{ "INSERT_TAG1", 2 },
2150 	{ "INSERT_TAG2", 2 },
2151 	{ "SHIFT_TAG", 2 },
2152 	{ "STRIP_TAG1", 2 },
2153 	{ "STRIP_TAG2", 2 },
2154 	{ "DROP_TAG1", 2 },
2155 	{ "DROP_TAG2", 2 },
2156 	{ "PRI_ONLY_TAG1", 2 },
2157 	{ "PRI_ONLY_TAG2", 0 }
2158 };
2159 
2160 static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
2161 					     int len, int *pos)
2162 {
2163 	char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
2164 	const char *result[ARRAY_SIZE(vlan_filter_items)];
2165 	u8 i, j, vlan_fe, bypass, ingress, egress;
2166 	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2167 	int ret;
2168 
2169 	ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
2170 					  &vlan_fe);
2171 	if (ret)
2172 		return ret;
2173 	ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2174 	egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2175 
2176 	*pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
2177 			  state_str[ingress]);
2178 	*pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
2179 			  state_str[egress]);
2180 
2181 	hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
2182 			       NULL, ARRAY_SIZE(vlan_filter_items));
2183 	*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2184 
2185 	for (i = 0; i < func_num; i++) {
2186 		ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
2187 						  &vlan_fe);
2188 		if (ret)
2189 			return ret;
2190 
2191 		ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2192 		egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2193 		ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
2194 		if (ret)
2195 			return ret;
2196 		j = 0;
2197 		result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2198 		result[j++] = state_str[ingress];
2199 		result[j++] = state_str[egress];
2200 		result[j++] =
2201 			test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
2202 				 hdev->ae_dev->caps) ? state_str[bypass] : "NA";
2203 		hclge_dbg_fill_content(content, sizeof(content),
2204 				       vlan_filter_items, result,
2205 				       ARRAY_SIZE(vlan_filter_items));
2206 		*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2207 	}
2208 	*pos += scnprintf(buf + *pos, len - *pos, "\n");
2209 
2210 	return 0;
2211 }
2212 
2213 static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
2214 					      int len, int *pos)
2215 {
2216 	char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
2217 	const char *result[ARRAY_SIZE(vlan_offload_items)];
2218 	char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
2219 	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2220 	struct hclge_dbg_vlan_cfg vlan_cfg;
2221 	int ret;
2222 	u8 i, j;
2223 
2224 	hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
2225 			       NULL, ARRAY_SIZE(vlan_offload_items));
2226 	*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2227 
2228 	for (i = 0; i < func_num; i++) {
2229 		ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
2230 		if (ret)
2231 			return ret;
2232 
2233 		ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg);
2234 		if (ret)
2235 			return ret;
2236 
2237 		sprintf(str_pvid, "%u", vlan_cfg.pvid);
2238 		j = 0;
2239 		result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2240 		result[j++] = str_pvid;
2241 		result[j++] = state_str[vlan_cfg.accept_tag1];
2242 		result[j++] = state_str[vlan_cfg.accept_tag2];
2243 		result[j++] = state_str[vlan_cfg.accept_untag1];
2244 		result[j++] = state_str[vlan_cfg.accept_untag2];
2245 		result[j++] = state_str[vlan_cfg.insert_tag1];
2246 		result[j++] = state_str[vlan_cfg.insert_tag2];
2247 		result[j++] = state_str[vlan_cfg.shift_tag];
2248 		result[j++] = state_str[vlan_cfg.strip_tag1];
2249 		result[j++] = state_str[vlan_cfg.strip_tag2];
2250 		result[j++] = state_str[vlan_cfg.drop_tag1];
2251 		result[j++] = state_str[vlan_cfg.drop_tag2];
2252 		result[j++] = state_str[vlan_cfg.pri_only1];
2253 		result[j++] = state_str[vlan_cfg.pri_only2];
2254 
2255 		hclge_dbg_fill_content(content, sizeof(content),
2256 				       vlan_offload_items, result,
2257 				       ARRAY_SIZE(vlan_offload_items));
2258 		*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2259 	}
2260 
2261 	return 0;
2262 }
2263 
2264 static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
2265 				      int len)
2266 {
2267 	int pos = 0;
2268 	int ret;
2269 
2270 	ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
2271 	if (ret)
2272 		return ret;
2273 
2274 	return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
2275 }
2276 
2277 static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
2278 {
2279 	struct hclge_ptp *ptp = hdev->ptp;
2280 	u32 sw_cfg = ptp->ptp_cfg;
2281 	unsigned int tx_start;
2282 	unsigned int last_rx;
2283 	int pos = 0;
2284 	u32 hw_cfg;
2285 	int ret;
2286 
2287 	pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
2288 			 ptp->info.name);
2289 	pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
2290 			 test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
2291 			 "yes" : "no");
2292 	pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
2293 			 test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
2294 			 "yes" : "no");
2295 	pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
2296 			 test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
2297 			 "yes" : "no");
2298 
2299 	last_rx = jiffies_to_msecs(ptp->last_rx);
2300 	pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
2301 			 last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
2302 	pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
2303 
2304 	tx_start = jiffies_to_msecs(ptp->tx_start);
2305 	pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
2306 			 tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
2307 	pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
2308 	pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
2309 			 ptp->tx_skipped);
2310 	pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
2311 			 ptp->tx_timeout);
2312 	pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
2313 			 ptp->last_tx_seqid);
2314 
2315 	ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
2316 	if (ret)
2317 		return ret;
2318 
2319 	pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
2320 			 sw_cfg, hw_cfg);
2321 
2322 	pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
2323 			 ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
2324 
2325 	return 0;
2326 }
2327 
2328 static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
2329 {
2330 	hclge_dbg_dump_mac_list(hdev, buf, len, true);
2331 
2332 	return 0;
2333 }
2334 
2335 static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
2336 {
2337 	hclge_dbg_dump_mac_list(hdev, buf, len, false);
2338 
2339 	return 0;
2340 }
2341 
2342 static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
2343 	{
2344 		.cmd = HNAE3_DBG_CMD_TM_NODES,
2345 		.dbg_dump = hclge_dbg_dump_tm_nodes,
2346 	},
2347 	{
2348 		.cmd = HNAE3_DBG_CMD_TM_PRI,
2349 		.dbg_dump = hclge_dbg_dump_tm_pri,
2350 	},
2351 	{
2352 		.cmd = HNAE3_DBG_CMD_TM_QSET,
2353 		.dbg_dump = hclge_dbg_dump_tm_qset,
2354 	},
2355 	{
2356 		.cmd = HNAE3_DBG_CMD_TM_MAP,
2357 		.dbg_dump = hclge_dbg_dump_tm_map,
2358 	},
2359 	{
2360 		.cmd = HNAE3_DBG_CMD_TM_PG,
2361 		.dbg_dump = hclge_dbg_dump_tm_pg,
2362 	},
2363 	{
2364 		.cmd = HNAE3_DBG_CMD_TM_PORT,
2365 		.dbg_dump = hclge_dbg_dump_tm_port,
2366 	},
2367 	{
2368 		.cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
2369 		.dbg_dump = hclge_dbg_dump_tc,
2370 	},
2371 	{
2372 		.cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
2373 		.dbg_dump = hclge_dbg_dump_qos_pause_cfg,
2374 	},
2375 	{
2376 		.cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
2377 		.dbg_dump = hclge_dbg_dump_qos_pri_map,
2378 	},
2379 	{
2380 		.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
2381 		.dbg_dump = hclge_dbg_dump_qos_buf_cfg,
2382 	},
2383 	{
2384 		.cmd = HNAE3_DBG_CMD_MAC_UC,
2385 		.dbg_dump = hclge_dbg_dump_mac_uc,
2386 	},
2387 	{
2388 		.cmd = HNAE3_DBG_CMD_MAC_MC,
2389 		.dbg_dump = hclge_dbg_dump_mac_mc,
2390 	},
2391 	{
2392 		.cmd = HNAE3_DBG_CMD_MNG_TBL,
2393 		.dbg_dump = hclge_dbg_dump_mng_table,
2394 	},
2395 	{
2396 		.cmd = HNAE3_DBG_CMD_LOOPBACK,
2397 		.dbg_dump = hclge_dbg_dump_loopback,
2398 	},
2399 	{
2400 		.cmd = HNAE3_DBG_CMD_PTP_INFO,
2401 		.dbg_dump = hclge_dbg_dump_ptp_info,
2402 	},
2403 	{
2404 		.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
2405 		.dbg_dump = hclge_dbg_dump_interrupt,
2406 	},
2407 	{
2408 		.cmd = HNAE3_DBG_CMD_RESET_INFO,
2409 		.dbg_dump = hclge_dbg_dump_rst_info,
2410 	},
2411 	{
2412 		.cmd = HNAE3_DBG_CMD_IMP_INFO,
2413 		.dbg_dump = hclge_dbg_get_imp_stats_info,
2414 	},
2415 	{
2416 		.cmd = HNAE3_DBG_CMD_NCL_CONFIG,
2417 		.dbg_dump = hclge_dbg_dump_ncl_config,
2418 	},
2419 	{
2420 		.cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
2421 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2422 	},
2423 	{
2424 		.cmd = HNAE3_DBG_CMD_REG_SSU,
2425 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2426 	},
2427 	{
2428 		.cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
2429 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2430 	},
2431 	{
2432 		.cmd = HNAE3_DBG_CMD_REG_RPU,
2433 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2434 	},
2435 	{
2436 		.cmd = HNAE3_DBG_CMD_REG_NCSI,
2437 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2438 	},
2439 	{
2440 		.cmd = HNAE3_DBG_CMD_REG_RTC,
2441 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2442 	},
2443 	{
2444 		.cmd = HNAE3_DBG_CMD_REG_PPP,
2445 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2446 	},
2447 	{
2448 		.cmd = HNAE3_DBG_CMD_REG_RCB,
2449 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2450 	},
2451 	{
2452 		.cmd = HNAE3_DBG_CMD_REG_TQP,
2453 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2454 	},
2455 	{
2456 		.cmd = HNAE3_DBG_CMD_REG_MAC,
2457 		.dbg_dump = hclge_dbg_dump_mac,
2458 	},
2459 	{
2460 		.cmd = HNAE3_DBG_CMD_REG_DCB,
2461 		.dbg_dump = hclge_dbg_dump_dcb,
2462 	},
2463 	{
2464 		.cmd = HNAE3_DBG_CMD_FD_TCAM,
2465 		.dbg_dump = hclge_dbg_dump_fd_tcam,
2466 	},
2467 	{
2468 		.cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
2469 		.dbg_dump = hclge_dbg_dump_mac_tnl_status,
2470 	},
2471 	{
2472 		.cmd = HNAE3_DBG_CMD_SERV_INFO,
2473 		.dbg_dump = hclge_dbg_dump_serv_info,
2474 	},
2475 	{
2476 		.cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
2477 		.dbg_dump = hclge_dbg_dump_vlan_config,
2478 	},
2479 	{
2480 		.cmd = HNAE3_DBG_CMD_FD_COUNTER,
2481 		.dbg_dump = hclge_dbg_dump_fd_counter,
2482 	},
2483 	{
2484 		.cmd = HNAE3_DBG_CMD_UMV_INFO,
2485 		.dbg_dump = hclge_dbg_dump_umv_info,
2486 	},
2487 };
2488 
2489 int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
2490 		       char *buf, int len)
2491 {
2492 	struct hclge_vport *vport = hclge_get_vport(handle);
2493 	const struct hclge_dbg_func *cmd_func;
2494 	struct hclge_dev *hdev = vport->back;
2495 	u32 i;
2496 
2497 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
2498 		if (cmd == hclge_dbg_cmd_func[i].cmd) {
2499 			cmd_func = &hclge_dbg_cmd_func[i];
2500 			if (cmd_func->dbg_dump)
2501 				return cmd_func->dbg_dump(hdev, buf, len);
2502 			else
2503 				return cmd_func->dbg_dump_reg(hdev, cmd, buf,
2504 							      len);
2505 		}
2506 	}
2507 
2508 	dev_err(&hdev->pdev->dev, "invalid command(%d)\n", cmd);
2509 	return -EINVAL;
2510 }
2511