1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018-2019 Hisilicon Limited. */
3 
4 #include <linux/device.h>
5 
6 #include "hclge_debugfs.h"
7 #include "hclge_err.h"
8 #include "hclge_main.h"
9 #include "hclge_tm.h"
10 #include "hnae3.h"
11 
12 static const char * const state_str[] = { "off", "on" };
13 static const char * const hclge_mac_state_str[] = {
14 	"TO_ADD", "TO_DEL", "ACTIVE"
15 };
16 
17 static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
18 	{ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
19 	  .dfx_msg = &hclge_dbg_bios_common_reg[0],
20 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
21 		       .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
22 		       .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
23 	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
24 	  .dfx_msg = &hclge_dbg_ssu_reg_0[0],
25 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
26 		       .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
27 		       .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
28 	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
29 	  .dfx_msg = &hclge_dbg_ssu_reg_1[0],
30 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
31 		       .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
32 		       .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
33 	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
34 	  .dfx_msg = &hclge_dbg_ssu_reg_2[0],
35 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
36 		       .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
37 		       .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
38 	{ .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
39 	  .dfx_msg = &hclge_dbg_igu_egu_reg[0],
40 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
41 		       .offset = HCLGE_DBG_DFX_IGU_OFFSET,
42 		       .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
43 	{ .cmd = HNAE3_DBG_CMD_REG_RPU,
44 	  .dfx_msg = &hclge_dbg_rpu_reg_0[0],
45 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
46 		       .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
47 		       .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
48 	{ .cmd = HNAE3_DBG_CMD_REG_RPU,
49 	  .dfx_msg = &hclge_dbg_rpu_reg_1[0],
50 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
51 		       .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
52 		       .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
53 	{ .cmd = HNAE3_DBG_CMD_REG_NCSI,
54 	  .dfx_msg = &hclge_dbg_ncsi_reg[0],
55 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
56 		       .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
57 		       .cmd = HCLGE_OPC_DFX_NCSI_REG } },
58 	{ .cmd = HNAE3_DBG_CMD_REG_RTC,
59 	  .dfx_msg = &hclge_dbg_rtc_reg[0],
60 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
61 		       .offset = HCLGE_DBG_DFX_RTC_OFFSET,
62 		       .cmd = HCLGE_OPC_DFX_RTC_REG } },
63 	{ .cmd = HNAE3_DBG_CMD_REG_PPP,
64 	  .dfx_msg = &hclge_dbg_ppp_reg[0],
65 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
66 		       .offset = HCLGE_DBG_DFX_PPP_OFFSET,
67 		       .cmd = HCLGE_OPC_DFX_PPP_REG } },
68 	{ .cmd = HNAE3_DBG_CMD_REG_RCB,
69 	  .dfx_msg = &hclge_dbg_rcb_reg[0],
70 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
71 		       .offset = HCLGE_DBG_DFX_RCB_OFFSET,
72 		       .cmd = HCLGE_OPC_DFX_RCB_REG } },
73 	{ .cmd = HNAE3_DBG_CMD_REG_TQP,
74 	  .dfx_msg = &hclge_dbg_tqp_reg[0],
75 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
76 		       .offset = HCLGE_DBG_DFX_TQP_OFFSET,
77 		       .cmd = HCLGE_OPC_DFX_TQP_REG } },
78 };
79 
80 static void hclge_dbg_fill_content(char *content, u16 len,
81 				   const struct hclge_dbg_item *items,
82 				   const char **result, u16 size)
83 {
84 	char *pos = content;
85 	u16 i;
86 
87 	memset(content, ' ', len);
88 	for (i = 0; i < size; i++) {
89 		if (result)
90 			strncpy(pos, result[i], strlen(result[i]));
91 		else
92 			strncpy(pos, items[i].name, strlen(items[i].name));
93 		pos += strlen(items[i].name) + items[i].interval;
94 	}
95 	*pos++ = '\n';
96 	*pos++ = '\0';
97 }
98 
99 static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
100 {
101 	if (id)
102 		sprintf(buf, "vf%u", id - 1);
103 	else
104 		sprintf(buf, "pf");
105 
106 	return buf;
107 }
108 
109 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset,
110 				    u32 *bd_num)
111 {
112 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
113 	int entries_per_desc;
114 	int index;
115 	int ret;
116 
117 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
118 	if (ret) {
119 		dev_err(&hdev->pdev->dev,
120 			"failed to get dfx bd_num, offset = %d, ret = %d\n",
121 			offset, ret);
122 		return ret;
123 	}
124 
125 	entries_per_desc = ARRAY_SIZE(desc[0].data);
126 	index = offset % entries_per_desc;
127 
128 	*bd_num = le32_to_cpu(desc[offset / entries_per_desc].data[index]);
129 	if (!(*bd_num)) {
130 		dev_err(&hdev->pdev->dev, "The value of dfx bd_num is 0!\n");
131 		return -EINVAL;
132 	}
133 
134 	return 0;
135 }
136 
137 static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
138 			      struct hclge_desc *desc_src,
139 			      int index, int bd_num,
140 			      enum hclge_opcode_type cmd)
141 {
142 	struct hclge_desc *desc = desc_src;
143 	int ret, i;
144 
145 	hclge_cmd_setup_basic_desc(desc, cmd, true);
146 	desc->data[0] = cpu_to_le32(index);
147 
148 	for (i = 1; i < bd_num; i++) {
149 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
150 		desc++;
151 		hclge_cmd_setup_basic_desc(desc, cmd, true);
152 	}
153 
154 	ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
155 	if (ret)
156 		dev_err(&hdev->pdev->dev,
157 			"cmd(0x%x) send fail, ret = %d\n", cmd, ret);
158 	return ret;
159 }
160 
161 static int
162 hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
163 		       const struct hclge_dbg_reg_type_info *reg_info,
164 		       char *buf, int len, int *pos)
165 {
166 	const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
167 	const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
168 	struct hclge_desc *desc_src;
169 	u32 index, entry, i, cnt;
170 	int bd_num, min_num, ret;
171 	struct hclge_desc *desc;
172 
173 	ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
174 	if (ret)
175 		return ret;
176 
177 	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
178 	if (!desc_src)
179 		return -ENOMEM;
180 
181 	min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
182 
183 	for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
184 		*pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n",
185 				  cnt++, dfx_message->message);
186 
187 	for (i = 0; i < cnt; i++)
188 		*pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i);
189 
190 	*pos += scnprintf(buf + *pos, len - *pos, "\n");
191 
192 	for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
193 		dfx_message = reg_info->dfx_msg;
194 		desc = desc_src;
195 		ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num,
196 					 reg_msg->cmd);
197 		if (ret)
198 			break;
199 
200 		for (i = 0; i < min_num; i++, dfx_message++) {
201 			entry = i % HCLGE_DESC_DATA_LEN;
202 			if (i > 0 && !entry)
203 				desc++;
204 
205 			*pos += scnprintf(buf + *pos, len - *pos, "%#x\t",
206 					  le32_to_cpu(desc->data[entry]));
207 		}
208 		*pos += scnprintf(buf + *pos, len - *pos, "\n");
209 	}
210 
211 	kfree(desc_src);
212 	return ret;
213 }
214 
215 static int
216 hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
217 			  const struct hclge_dbg_reg_type_info *reg_info,
218 			  char *buf, int len, int *pos)
219 {
220 	const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
221 	const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
222 	struct hclge_desc *desc_src;
223 	int bd_num, min_num, ret;
224 	struct hclge_desc *desc;
225 	u32 entry, i;
226 
227 	ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
228 	if (ret)
229 		return ret;
230 
231 	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
232 	if (!desc_src)
233 		return -ENOMEM;
234 
235 	desc = desc_src;
236 
237 	ret = hclge_dbg_cmd_send(hdev, desc, 0, bd_num, reg_msg->cmd);
238 	if (ret) {
239 		kfree(desc);
240 		return ret;
241 	}
242 
243 	min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
244 
245 	for (i = 0; i < min_num; i++, dfx_message++) {
246 		entry = i % HCLGE_DESC_DATA_LEN;
247 		if (i > 0 && !entry)
248 			desc++;
249 		if (!dfx_message->flag)
250 			continue;
251 
252 		*pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
253 				  dfx_message->message,
254 				  le32_to_cpu(desc->data[entry]));
255 	}
256 
257 	kfree(desc_src);
258 	return 0;
259 }
260 
261 static int  hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
262 					     int len, int *pos)
263 {
264 	struct hclge_config_mac_mode_cmd *req;
265 	struct hclge_desc desc;
266 	u32 loop_en;
267 	int ret;
268 
269 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
270 
271 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
272 	if (ret) {
273 		dev_err(&hdev->pdev->dev,
274 			"failed to dump mac enable status, ret = %d\n", ret);
275 		return ret;
276 	}
277 
278 	req = (struct hclge_config_mac_mode_cmd *)desc.data;
279 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
280 
281 	*pos += scnprintf(buf + *pos, len - *pos, "mac_trans_en: %#x\n",
282 			  hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B));
283 	*pos += scnprintf(buf + *pos, len - *pos, "mac_rcv_en: %#x\n",
284 			  hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B));
285 	*pos += scnprintf(buf + *pos, len - *pos, "pad_trans_en: %#x\n",
286 			  hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B));
287 	*pos += scnprintf(buf + *pos, len - *pos, "pad_rcv_en: %#x\n",
288 			  hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B));
289 	*pos += scnprintf(buf + *pos, len - *pos, "1588_trans_en: %#x\n",
290 			  hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B));
291 	*pos += scnprintf(buf + *pos, len - *pos, "1588_rcv_en: %#x\n",
292 			  hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B));
293 	*pos += scnprintf(buf + *pos, len - *pos, "mac_app_loop_en: %#x\n",
294 			  hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B));
295 	*pos += scnprintf(buf + *pos, len - *pos, "mac_line_loop_en: %#x\n",
296 			  hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B));
297 	*pos += scnprintf(buf + *pos, len - *pos, "mac_fcs_tx_en: %#x\n",
298 			  hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B));
299 	*pos += scnprintf(buf + *pos, len - *pos,
300 			  "mac_rx_oversize_truncate_en: %#x\n",
301 			  hnae3_get_bit(loop_en,
302 					HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B));
303 	*pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_strip_en: %#x\n",
304 			  hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B));
305 	*pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_en: %#x\n",
306 			  hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B));
307 	*pos += scnprintf(buf + *pos, len - *pos,
308 			  "mac_tx_under_min_err_en: %#x\n",
309 			  hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B));
310 	*pos += scnprintf(buf + *pos, len - *pos,
311 			  "mac_tx_oversize_truncate_en: %#x\n",
312 			  hnae3_get_bit(loop_en,
313 					HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B));
314 
315 	return 0;
316 }
317 
318 static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
319 					 int len, int *pos)
320 {
321 	struct hclge_config_max_frm_size_cmd *req;
322 	struct hclge_desc desc;
323 	int ret;
324 
325 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
326 
327 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
328 	if (ret) {
329 		dev_err(&hdev->pdev->dev,
330 			"failed to dump mac frame size, ret = %d\n", ret);
331 		return ret;
332 	}
333 
334 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
335 
336 	*pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n",
337 			  le16_to_cpu(req->max_frm_size));
338 	*pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n",
339 			  req->min_frm_size);
340 
341 	return 0;
342 }
343 
344 static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
345 					   int len, int *pos)
346 {
347 #define HCLGE_MAC_SPEED_SHIFT	0
348 #define HCLGE_MAC_SPEED_MASK	GENMASK(5, 0)
349 #define HCLGE_MAC_DUPLEX_SHIFT	7
350 
351 	struct hclge_config_mac_speed_dup_cmd *req;
352 	struct hclge_desc desc;
353 	int ret;
354 
355 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
356 
357 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
358 	if (ret) {
359 		dev_err(&hdev->pdev->dev,
360 			"failed to dump mac speed duplex, ret = %d\n", ret);
361 		return ret;
362 	}
363 
364 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
365 
366 	*pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n",
367 			  hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
368 					  HCLGE_MAC_SPEED_SHIFT));
369 	*pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n",
370 			  hnae3_get_bit(req->speed_dup,
371 					HCLGE_MAC_DUPLEX_SHIFT));
372 	return 0;
373 }
374 
375 static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
376 {
377 	int pos = 0;
378 	int ret;
379 
380 	ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos);
381 	if (ret)
382 		return ret;
383 
384 	ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos);
385 	if (ret)
386 		return ret;
387 
388 	return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos);
389 }
390 
391 static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
392 				   int *pos)
393 {
394 	struct hclge_dbg_bitmap_cmd req;
395 	struct hclge_desc desc;
396 	u16 qset_id, qset_num;
397 	int ret;
398 
399 	ret = hclge_tm_get_qset_num(hdev, &qset_num);
400 	if (ret)
401 		return ret;
402 
403 	*pos += scnprintf(buf + *pos, len - *pos,
404 			  "qset_id  roce_qset_mask  nic_qset_mask  qset_shaping_pass  qset_bp_status\n");
405 	for (qset_id = 0; qset_id < qset_num; qset_id++) {
406 		ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
407 					 HCLGE_OPC_QSET_DFX_STS);
408 		if (ret)
409 			return ret;
410 
411 		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
412 
413 		*pos += scnprintf(buf + *pos, len - *pos,
414 				  "%04u           %#x            %#x             %#x               %#x\n",
415 				  qset_id, req.bit0, req.bit1, req.bit2,
416 				  req.bit3);
417 	}
418 
419 	return 0;
420 }
421 
422 static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
423 				  int *pos)
424 {
425 	struct hclge_dbg_bitmap_cmd req;
426 	struct hclge_desc desc;
427 	u8 pri_id, pri_num;
428 	int ret;
429 
430 	ret = hclge_tm_get_pri_num(hdev, &pri_num);
431 	if (ret)
432 		return ret;
433 
434 	*pos += scnprintf(buf + *pos, len - *pos,
435 			  "pri_id  pri_mask  pri_cshaping_pass  pri_pshaping_pass\n");
436 	for (pri_id = 0; pri_id < pri_num; pri_id++) {
437 		ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
438 					 HCLGE_OPC_PRI_DFX_STS);
439 		if (ret)
440 			return ret;
441 
442 		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
443 
444 		*pos += scnprintf(buf + *pos, len - *pos,
445 				  "%03u       %#x           %#x                %#x\n",
446 				  pri_id, req.bit0, req.bit1, req.bit2);
447 	}
448 
449 	return 0;
450 }
451 
452 static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
453 				 int *pos)
454 {
455 	struct hclge_dbg_bitmap_cmd req;
456 	struct hclge_desc desc;
457 	u8 pg_id;
458 	int ret;
459 
460 	*pos += scnprintf(buf + *pos, len - *pos,
461 			  "pg_id  pg_mask  pg_cshaping_pass  pg_pshaping_pass\n");
462 	for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
463 		ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
464 					 HCLGE_OPC_PG_DFX_STS);
465 		if (ret)
466 			return ret;
467 
468 		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
469 
470 		*pos += scnprintf(buf + *pos, len - *pos,
471 				  "%03u      %#x           %#x               %#x\n",
472 				  pg_id, req.bit0, req.bit1, req.bit2);
473 	}
474 
475 	return 0;
476 }
477 
478 static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
479 				    int *pos)
480 {
481 	struct hclge_desc desc;
482 	u16 nq_id;
483 	int ret;
484 
485 	*pos += scnprintf(buf + *pos, len - *pos,
486 			  "nq_id  sch_nic_queue_cnt  sch_roce_queue_cnt\n");
487 	for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
488 		ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
489 					 HCLGE_OPC_SCH_NQ_CNT);
490 		if (ret)
491 			return ret;
492 
493 		*pos += scnprintf(buf + *pos, len - *pos, "%04u           %#x",
494 				  nq_id, le32_to_cpu(desc.data[1]));
495 
496 		ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
497 					 HCLGE_OPC_SCH_RQ_CNT);
498 		if (ret)
499 			return ret;
500 
501 		*pos += scnprintf(buf + *pos, len - *pos,
502 				  "               %#x\n",
503 				  le32_to_cpu(desc.data[1]));
504 	}
505 
506 	return 0;
507 }
508 
509 static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
510 				   int *pos)
511 {
512 	struct hclge_dbg_bitmap_cmd req;
513 	struct hclge_desc desc;
514 	u8 port_id = 0;
515 	int ret;
516 
517 	ret = hclge_dbg_cmd_send(hdev, &desc, port_id, 1,
518 				 HCLGE_OPC_PORT_DFX_STS);
519 	if (ret)
520 		return ret;
521 
522 	req.bitmap = (u8)le32_to_cpu(desc.data[1]);
523 
524 	*pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
525 			 req.bit0);
526 	*pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
527 			 req.bit1);
528 
529 	return 0;
530 }
531 
532 static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
533 				 int *pos)
534 {
535 	struct hclge_desc desc[2];
536 	u8 port_id = 0;
537 	int ret;
538 
539 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
540 				 HCLGE_OPC_TM_INTERNAL_CNT);
541 	if (ret)
542 		return ret;
543 
544 	*pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n",
545 			  le32_to_cpu(desc[0].data[1]));
546 	*pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n",
547 			  le32_to_cpu(desc[0].data[2]));
548 
549 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
550 				 HCLGE_OPC_TM_INTERNAL_STS);
551 	if (ret)
552 		return ret;
553 
554 	*pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n",
555 			  le32_to_cpu(desc[0].data[1]));
556 	*pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n",
557 			  le32_to_cpu(desc[0].data[2]));
558 	*pos += scnprintf(buf + *pos, len - *pos,
559 			  "sch_roce_fifo_afull_gap: %#x\n",
560 			  le32_to_cpu(desc[0].data[3]));
561 	*pos += scnprintf(buf + *pos, len - *pos,
562 			  "tx_private_waterline: %#x\n",
563 			  le32_to_cpu(desc[0].data[4]));
564 	*pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n",
565 			  le32_to_cpu(desc[0].data[5]));
566 	*pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n",
567 			  le32_to_cpu(desc[1].data[0]));
568 	*pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n",
569 			  le32_to_cpu(desc[1].data[1]));
570 
571 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
572 		return 0;
573 
574 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
575 				 HCLGE_OPC_TM_INTERNAL_STS_1);
576 	if (ret)
577 		return ret;
578 
579 	*pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n",
580 			  le32_to_cpu(desc[0].data[1]));
581 	*pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n",
582 			  le32_to_cpu(desc[0].data[2]));
583 	*pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n",
584 			  le32_to_cpu(desc[0].data[3]));
585 	*pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n",
586 			  le32_to_cpu(desc[0].data[4]));
587 	*pos += scnprintf(buf + *pos, len - *pos,
588 			  "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
589 			  le32_to_cpu(desc[0].data[5]));
590 
591 	return 0;
592 }
593 
594 static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len)
595 {
596 	int pos = 0;
597 	int ret;
598 
599 	ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos);
600 	if (ret)
601 		return ret;
602 
603 	ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos);
604 	if (ret)
605 		return ret;
606 
607 	ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos);
608 	if (ret)
609 		return ret;
610 
611 	ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos);
612 	if (ret)
613 		return ret;
614 
615 	ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos);
616 	if (ret)
617 		return ret;
618 
619 	return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos);
620 }
621 
622 static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
623 				  enum hnae3_dbg_cmd cmd, char *buf, int len)
624 {
625 	const struct hclge_dbg_reg_type_info *reg_info;
626 	int pos = 0, ret = 0;
627 	int i;
628 
629 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
630 		reg_info = &hclge_dbg_reg_info[i];
631 		if (cmd == reg_info->cmd) {
632 			if (cmd == HNAE3_DBG_CMD_REG_TQP)
633 				return hclge_dbg_dump_reg_tqp(hdev, reg_info,
634 							      buf, len, &pos);
635 
636 			ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf,
637 							len, &pos);
638 			if (ret)
639 				break;
640 		}
641 	}
642 
643 	return ret;
644 }
645 
646 static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
647 {
648 	struct hclge_ets_tc_weight_cmd *ets_weight;
649 	struct hclge_desc desc;
650 	char *sch_mode_str;
651 	int pos = 0;
652 	int ret;
653 	u8 i;
654 
655 	if (!hnae3_dev_dcb_supported(hdev)) {
656 		dev_err(&hdev->pdev->dev,
657 			"Only DCB-supported dev supports tc\n");
658 		return -EOPNOTSUPP;
659 	}
660 
661 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
662 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
663 	if (ret) {
664 		dev_err(&hdev->pdev->dev, "failed to get tc weight, ret = %d\n",
665 			ret);
666 		return ret;
667 	}
668 
669 	ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
670 
671 	pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n",
672 			 hdev->tm_info.num_tc);
673 	pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n",
674 			 ets_weight->weight_offset);
675 
676 	pos += scnprintf(buf + pos, len - pos, "TC    MODE  WEIGHT\n");
677 	for (i = 0; i < HNAE3_MAX_TC; i++) {
678 		sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
679 		pos += scnprintf(buf + pos, len - pos, "%u     %4s    %3u\n",
680 				 i, sch_mode_str,
681 				 hdev->tm_info.pg_info[0].tc_dwrr[i]);
682 	}
683 
684 	return 0;
685 }
686 
687 static const struct hclge_dbg_item tm_pg_items[] = {
688 	{ "ID", 2 },
689 	{ "PRI_MAP", 2 },
690 	{ "MODE", 2 },
691 	{ "DWRR", 2 },
692 	{ "C_IR_B", 2 },
693 	{ "C_IR_U", 2 },
694 	{ "C_IR_S", 2 },
695 	{ "C_BS_B", 2 },
696 	{ "C_BS_S", 2 },
697 	{ "C_FLAG", 2 },
698 	{ "C_RATE(Mbps)", 2 },
699 	{ "P_IR_B", 2 },
700 	{ "P_IR_U", 2 },
701 	{ "P_IR_S", 2 },
702 	{ "P_BS_B", 2 },
703 	{ "P_BS_S", 2 },
704 	{ "P_FLAG", 2 },
705 	{ "P_RATE(Mbps)", 0 }
706 };
707 
708 static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
709 					  char **result, u8 *index)
710 {
711 	sprintf(result[(*index)++], "%3u", para->ir_b);
712 	sprintf(result[(*index)++], "%3u", para->ir_u);
713 	sprintf(result[(*index)++], "%3u", para->ir_s);
714 	sprintf(result[(*index)++], "%3u", para->bs_b);
715 	sprintf(result[(*index)++], "%3u", para->bs_s);
716 	sprintf(result[(*index)++], "%3u", para->flag);
717 	sprintf(result[(*index)++], "%6u", para->rate);
718 }
719 
720 static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
721 				  char *buf, int len)
722 {
723 	struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
724 	char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
725 	u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
726 	char content[HCLGE_DBG_TM_INFO_LEN];
727 	int pos = 0;
728 	int ret;
729 
730 	for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
731 		result[i] = data_str;
732 		data_str += HCLGE_DBG_DATA_STR_LEN;
733 	}
734 
735 	hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
736 			       NULL, ARRAY_SIZE(tm_pg_items));
737 	pos += scnprintf(buf + pos, len - pos, "%s", content);
738 
739 	for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
740 		ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
741 		if (ret)
742 			return ret;
743 
744 		ret = hclge_tm_get_pg_sch_mode(hdev, pg_id, &sch_mode);
745 		if (ret)
746 			return ret;
747 
748 		ret = hclge_tm_get_pg_weight(hdev, pg_id, &weight);
749 		if (ret)
750 			return ret;
751 
752 		ret = hclge_tm_get_pg_shaper(hdev, pg_id,
753 					     HCLGE_OPC_TM_PG_C_SHAPPING,
754 					     &c_shaper_para);
755 		if (ret)
756 			return ret;
757 
758 		ret = hclge_tm_get_pg_shaper(hdev, pg_id,
759 					     HCLGE_OPC_TM_PG_P_SHAPPING,
760 					     &p_shaper_para);
761 		if (ret)
762 			return ret;
763 
764 		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
765 				       "sp";
766 
767 		j = 0;
768 		sprintf(result[j++], "%02u", pg_id);
769 		sprintf(result[j++], "0x%02x", pri_bit_map);
770 		sprintf(result[j++], "%4s", sch_mode_str);
771 		sprintf(result[j++], "%3u", weight);
772 		hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
773 		hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
774 
775 		hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
776 				       (const char **)result,
777 				       ARRAY_SIZE(tm_pg_items));
778 		pos += scnprintf(buf + pos, len - pos, "%s", content);
779 	}
780 
781 	return 0;
782 }
783 
784 static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
785 {
786 	char *data_str;
787 	int ret;
788 
789 	data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
790 			   HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
791 
792 	if (!data_str)
793 		return -ENOMEM;
794 
795 	ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
796 
797 	kfree(data_str);
798 
799 	return ret;
800 }
801 
802 static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev,  char *buf, int len)
803 {
804 	struct hclge_tm_shaper_para shaper_para;
805 	int pos = 0;
806 	int ret;
807 
808 	ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
809 	if (ret)
810 		return ret;
811 
812 	pos += scnprintf(buf + pos, len - pos,
813 			 "IR_B  IR_U  IR_S  BS_B  BS_S  FLAG  RATE(Mbps)\n");
814 	pos += scnprintf(buf + pos, len - pos,
815 			 "%3u   %3u   %3u   %3u   %3u     %1u   %6u\n",
816 			 shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
817 			 shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
818 			 shaper_para.rate);
819 
820 	return 0;
821 }
822 
823 static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
824 					 char *buf, int len)
825 {
826 	u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
827 	struct hclge_bp_to_qs_map_cmd *map;
828 	struct hclge_desc desc;
829 	int pos = 0;
830 	u8 group_id;
831 	u8 grp_num;
832 	u16 i = 0;
833 	int ret;
834 
835 	grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ?
836 		  HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM;
837 	map = (struct hclge_bp_to_qs_map_cmd *)desc.data;
838 	for (group_id = 0; group_id < grp_num; group_id++) {
839 		hclge_cmd_setup_basic_desc(&desc,
840 					   HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
841 					   true);
842 		map->tc_id = tc_id;
843 		map->qs_group_id = group_id;
844 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
845 		if (ret) {
846 			dev_err(&hdev->pdev->dev,
847 				"failed to get bp to qset map, ret = %d\n",
848 				ret);
849 			return ret;
850 		}
851 
852 		qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
853 	}
854 
855 	pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n");
856 	for (group_id = 0; group_id < grp_num / 8; group_id++) {
857 		pos += scnprintf(buf + pos, len - pos,
858 			 "%04d  | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
859 			 group_id * 256, qset_mapping[i + 7],
860 			 qset_mapping[i + 6], qset_mapping[i + 5],
861 			 qset_mapping[i + 4], qset_mapping[i + 3],
862 			 qset_mapping[i + 2], qset_mapping[i + 1],
863 			 qset_mapping[i]);
864 		i += 8;
865 	}
866 
867 	return pos;
868 }
869 
870 static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
871 {
872 	u16 queue_id;
873 	u16 qset_id;
874 	u8 link_vld;
875 	int pos = 0;
876 	u8 pri_id;
877 	u8 tc_id;
878 	int ret;
879 
880 	for (queue_id = 0; queue_id < hdev->num_tqps; queue_id++) {
881 		ret = hclge_tm_get_q_to_qs_map(hdev, queue_id, &qset_id);
882 		if (ret)
883 			return ret;
884 
885 		ret = hclge_tm_get_qset_map_pri(hdev, qset_id, &pri_id,
886 						&link_vld);
887 		if (ret)
888 			return ret;
889 
890 		ret = hclge_tm_get_q_to_tc(hdev, queue_id, &tc_id);
891 		if (ret)
892 			return ret;
893 
894 		pos += scnprintf(buf + pos, len - pos,
895 				 "QUEUE_ID   QSET_ID   PRI_ID   TC_ID\n");
896 		pos += scnprintf(buf + pos, len - pos,
897 				 "%04u        %4u       %3u      %2u\n",
898 				 queue_id, qset_id, pri_id, tc_id);
899 
900 		if (!hnae3_dev_dcb_supported(hdev))
901 			continue;
902 
903 		ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos,
904 						    len - pos);
905 		if (ret < 0)
906 			return ret;
907 		pos += ret;
908 
909 		pos += scnprintf(buf + pos, len - pos, "\n");
910 	}
911 
912 	return 0;
913 }
914 
915 static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
916 {
917 	struct hclge_tm_nodes_cmd *nodes;
918 	struct hclge_desc desc;
919 	int pos = 0;
920 	int ret;
921 
922 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
923 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
924 	if (ret) {
925 		dev_err(&hdev->pdev->dev,
926 			"failed to dump tm nodes, ret = %d\n", ret);
927 		return ret;
928 	}
929 
930 	nodes = (struct hclge_tm_nodes_cmd *)desc.data;
931 
932 	pos += scnprintf(buf + pos, len - pos, "       BASE_ID  MAX_NUM\n");
933 	pos += scnprintf(buf + pos, len - pos, "PG      %4u      %4u\n",
934 			 nodes->pg_base_id, nodes->pg_num);
935 	pos += scnprintf(buf + pos, len - pos, "PRI     %4u      %4u\n",
936 			 nodes->pri_base_id, nodes->pri_num);
937 	pos += scnprintf(buf + pos, len - pos, "QSET    %4u      %4u\n",
938 			 le16_to_cpu(nodes->qset_base_id),
939 			 le16_to_cpu(nodes->qset_num));
940 	pos += scnprintf(buf + pos, len - pos, "QUEUE   %4u      %4u\n",
941 			 le16_to_cpu(nodes->queue_base_id),
942 			 le16_to_cpu(nodes->queue_num));
943 
944 	return 0;
945 }
946 
947 static const struct hclge_dbg_item tm_pri_items[] = {
948 	{ "ID", 4 },
949 	{ "MODE", 2 },
950 	{ "DWRR", 2 },
951 	{ "C_IR_B", 2 },
952 	{ "C_IR_U", 2 },
953 	{ "C_IR_S", 2 },
954 	{ "C_BS_B", 2 },
955 	{ "C_BS_S", 2 },
956 	{ "C_FLAG", 2 },
957 	{ "C_RATE(Mbps)", 2 },
958 	{ "P_IR_B", 2 },
959 	{ "P_IR_U", 2 },
960 	{ "P_IR_S", 2 },
961 	{ "P_BS_B", 2 },
962 	{ "P_BS_S", 2 },
963 	{ "P_FLAG", 2 },
964 	{ "P_RATE(Mbps)", 0 }
965 };
966 
967 static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
968 {
969 	char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN];
970 	struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
971 	char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
972 	char content[HCLGE_DBG_TM_INFO_LEN];
973 	u8 pri_num, sch_mode, weight, i, j;
974 	int pos, ret;
975 
976 	ret = hclge_tm_get_pri_num(hdev, &pri_num);
977 	if (ret)
978 		return ret;
979 
980 	for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
981 		result[i] = &data_str[i][0];
982 
983 	hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
984 			       NULL, ARRAY_SIZE(tm_pri_items));
985 	pos = scnprintf(buf, len, "%s", content);
986 
987 	for (i = 0; i < pri_num; i++) {
988 		ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
989 		if (ret)
990 			return ret;
991 
992 		ret = hclge_tm_get_pri_weight(hdev, i, &weight);
993 		if (ret)
994 			return ret;
995 
996 		ret = hclge_tm_get_pri_shaper(hdev, i,
997 					      HCLGE_OPC_TM_PRI_C_SHAPPING,
998 					      &c_shaper_para);
999 		if (ret)
1000 			return ret;
1001 
1002 		ret = hclge_tm_get_pri_shaper(hdev, i,
1003 					      HCLGE_OPC_TM_PRI_P_SHAPPING,
1004 					      &p_shaper_para);
1005 		if (ret)
1006 			return ret;
1007 
1008 		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1009 			       "sp";
1010 
1011 		j = 0;
1012 		sprintf(result[j++], "%04u", i);
1013 		sprintf(result[j++], "%4s", sch_mode_str);
1014 		sprintf(result[j++], "%3u", weight);
1015 		hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
1016 		hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
1017 		hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
1018 				       (const char **)result,
1019 				       ARRAY_SIZE(tm_pri_items));
1020 		pos += scnprintf(buf + pos, len - pos, "%s", content);
1021 	}
1022 
1023 	return 0;
1024 }
1025 
1026 static const struct hclge_dbg_item tm_qset_items[] = {
1027 	{ "ID", 4 },
1028 	{ "MAP_PRI", 2 },
1029 	{ "LINK_VLD", 2 },
1030 	{ "MODE", 2 },
1031 	{ "DWRR", 2 },
1032 	{ "IR_B", 2 },
1033 	{ "IR_U", 2 },
1034 	{ "IR_S", 2 },
1035 	{ "BS_B", 2 },
1036 	{ "BS_S", 2 },
1037 	{ "FLAG", 2 },
1038 	{ "RATE(Mbps)", 0 }
1039 };
1040 
1041 static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
1042 {
1043 	char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN];
1044 	char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str;
1045 	u8 priority, link_vld, sch_mode, weight;
1046 	struct hclge_tm_shaper_para shaper_para;
1047 	char content[HCLGE_DBG_TM_INFO_LEN];
1048 	u16 qset_num, i;
1049 	int ret, pos;
1050 	u8 j;
1051 
1052 	ret = hclge_tm_get_qset_num(hdev, &qset_num);
1053 	if (ret)
1054 		return ret;
1055 
1056 	for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++)
1057 		result[i] = &data_str[i][0];
1058 
1059 	hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1060 			       NULL, ARRAY_SIZE(tm_qset_items));
1061 	pos = scnprintf(buf, len, "%s", content);
1062 
1063 	for (i = 0; i < qset_num; i++) {
1064 		ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
1065 		if (ret)
1066 			return ret;
1067 
1068 		ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode);
1069 		if (ret)
1070 			return ret;
1071 
1072 		ret = hclge_tm_get_qset_weight(hdev, i, &weight);
1073 		if (ret)
1074 			return ret;
1075 
1076 		ret = hclge_tm_get_qset_shaper(hdev, i, &shaper_para);
1077 		if (ret)
1078 			return ret;
1079 
1080 		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1081 			       "sp";
1082 
1083 		j = 0;
1084 		sprintf(result[j++], "%04u", i);
1085 		sprintf(result[j++], "%4u", priority);
1086 		sprintf(result[j++], "%4u", link_vld);
1087 		sprintf(result[j++], "%4s", sch_mode_str);
1088 		sprintf(result[j++], "%3u", weight);
1089 		hclge_dbg_fill_shaper_content(&shaper_para, result, &j);
1090 
1091 		hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1092 				       (const char **)result,
1093 				       ARRAY_SIZE(tm_qset_items));
1094 		pos += scnprintf(buf + pos, len - pos, "%s", content);
1095 	}
1096 
1097 	return 0;
1098 }
1099 
1100 static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
1101 					int len)
1102 {
1103 	struct hclge_cfg_pause_param_cmd *pause_param;
1104 	struct hclge_desc desc;
1105 	int pos = 0;
1106 	int ret;
1107 
1108 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
1109 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1110 	if (ret) {
1111 		dev_err(&hdev->pdev->dev,
1112 			"failed to dump qos pause, ret = %d\n", ret);
1113 		return ret;
1114 	}
1115 
1116 	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
1117 
1118 	pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n",
1119 			 pause_param->pause_trans_gap);
1120 	pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n",
1121 			 le16_to_cpu(pause_param->pause_trans_time));
1122 	return 0;
1123 }
1124 
1125 static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
1126 				      int len)
1127 {
1128 #define HCLGE_DBG_TC_MASK		0x0F
1129 #define HCLGE_DBG_TC_BIT_WIDTH		4
1130 
1131 	struct hclge_qos_pri_map_cmd *pri_map;
1132 	struct hclge_desc desc;
1133 	int pos = 0;
1134 	u8 *pri_tc;
1135 	u8 tc, i;
1136 	int ret;
1137 
1138 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
1139 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1140 	if (ret) {
1141 		dev_err(&hdev->pdev->dev,
1142 			"failed to dump qos pri map, ret = %d\n", ret);
1143 		return ret;
1144 	}
1145 
1146 	pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
1147 
1148 	pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n",
1149 			 pri_map->vlan_pri);
1150 	pos += scnprintf(buf + pos, len - pos, "PRI  TC\n");
1151 
1152 	pri_tc = (u8 *)pri_map;
1153 	for (i = 0; i < HNAE3_MAX_TC; i++) {
1154 		tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
1155 		tc &= HCLGE_DBG_TC_MASK;
1156 		pos += scnprintf(buf + pos, len - pos, "%u     %u\n", i, tc);
1157 	}
1158 
1159 	return 0;
1160 }
1161 
1162 static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
1163 {
1164 	struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
1165 	struct hclge_desc desc;
1166 	int pos = 0;
1167 	int i, ret;
1168 
1169 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
1170 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1171 	if (ret) {
1172 		dev_err(&hdev->pdev->dev,
1173 			"failed to dump tx buf, ret = %d\n", ret);
1174 		return ret;
1175 	}
1176 
1177 	tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1178 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1179 		pos += scnprintf(buf + pos, len - pos,
1180 				 "tx_packet_buf_tc_%d: 0x%x\n", i,
1181 				 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
1182 
1183 	return pos;
1184 }
1185 
1186 static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
1187 					  int len)
1188 {
1189 	struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
1190 	struct hclge_desc desc;
1191 	int pos = 0;
1192 	int i, ret;
1193 
1194 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
1195 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1196 	if (ret) {
1197 		dev_err(&hdev->pdev->dev,
1198 			"failed to dump rx priv buf, ret = %d\n", ret);
1199 		return ret;
1200 	}
1201 
1202 	pos += scnprintf(buf + pos, len - pos, "\n");
1203 
1204 	rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
1205 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1206 		pos += scnprintf(buf + pos, len - pos,
1207 				 "rx_packet_buf_tc_%d: 0x%x\n", i,
1208 				 le16_to_cpu(rx_buf_cmd->buf_num[i]));
1209 
1210 	pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n",
1211 			 le16_to_cpu(rx_buf_cmd->shared_buf));
1212 
1213 	return pos;
1214 }
1215 
1216 static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
1217 					   int len)
1218 {
1219 	struct hclge_rx_com_wl *rx_com_wl;
1220 	struct hclge_desc desc;
1221 	int pos = 0;
1222 	int ret;
1223 
1224 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
1225 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1226 	if (ret) {
1227 		dev_err(&hdev->pdev->dev,
1228 			"failed to dump rx common wl, ret = %d\n", ret);
1229 		return ret;
1230 	}
1231 
1232 	rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
1233 	pos += scnprintf(buf + pos, len - pos, "\n");
1234 	pos += scnprintf(buf + pos, len - pos,
1235 			 "rx_com_wl: high: 0x%x, low: 0x%x\n",
1236 			 le16_to_cpu(rx_com_wl->com_wl.high),
1237 			 le16_to_cpu(rx_com_wl->com_wl.low));
1238 
1239 	return pos;
1240 }
1241 
1242 static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
1243 					    int len)
1244 {
1245 	struct hclge_rx_com_wl *rx_packet_cnt;
1246 	struct hclge_desc desc;
1247 	int pos = 0;
1248 	int ret;
1249 
1250 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
1251 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1252 	if (ret) {
1253 		dev_err(&hdev->pdev->dev,
1254 			"failed to dump rx global pkt cnt, ret = %d\n", ret);
1255 		return ret;
1256 	}
1257 
1258 	rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
1259 	pos += scnprintf(buf + pos, len - pos,
1260 			 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
1261 			 le16_to_cpu(rx_packet_cnt->com_wl.high),
1262 			 le16_to_cpu(rx_packet_cnt->com_wl.low));
1263 
1264 	return pos;
1265 }
1266 
1267 static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
1268 					     int len)
1269 {
1270 	struct hclge_rx_priv_wl_buf *rx_priv_wl;
1271 	struct hclge_desc desc[2];
1272 	int pos = 0;
1273 	int i, ret;
1274 
1275 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1276 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1277 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1278 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1279 	if (ret) {
1280 		dev_err(&hdev->pdev->dev,
1281 			"failed to dump rx priv wl buf, ret = %d\n", ret);
1282 		return ret;
1283 	}
1284 
1285 	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
1286 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1287 		pos += scnprintf(buf + pos, len - pos,
1288 			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
1289 			 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1290 			 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1291 
1292 	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
1293 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1294 		pos += scnprintf(buf + pos, len - pos,
1295 			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
1296 			 i + HCLGE_TC_NUM_ONE_DESC,
1297 			 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1298 			 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1299 
1300 	return pos;
1301 }
1302 
1303 static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
1304 						  char *buf, int len)
1305 {
1306 	struct hclge_rx_com_thrd *rx_com_thrd;
1307 	struct hclge_desc desc[2];
1308 	int pos = 0;
1309 	int i, ret;
1310 
1311 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1312 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1313 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1314 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1315 	if (ret) {
1316 		dev_err(&hdev->pdev->dev,
1317 			"failed to dump rx common threshold, ret = %d\n", ret);
1318 		return ret;
1319 	}
1320 
1321 	pos += scnprintf(buf + pos, len - pos, "\n");
1322 	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
1323 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1324 		pos += scnprintf(buf + pos, len - pos,
1325 			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
1326 			 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1327 			 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1328 
1329 	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
1330 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1331 		pos += scnprintf(buf + pos, len - pos,
1332 			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
1333 			 i + HCLGE_TC_NUM_ONE_DESC,
1334 			 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1335 			 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1336 
1337 	return pos;
1338 }
1339 
1340 static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf,
1341 				      int len)
1342 {
1343 	int pos = 0;
1344 	int ret;
1345 
1346 	ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos);
1347 	if (ret < 0)
1348 		return ret;
1349 	pos += ret;
1350 
1351 	ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos);
1352 	if (ret < 0)
1353 		return ret;
1354 	pos += ret;
1355 
1356 	ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos);
1357 	if (ret < 0)
1358 		return ret;
1359 	pos += ret;
1360 
1361 	ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos);
1362 	if (ret < 0)
1363 		return ret;
1364 	pos += ret;
1365 
1366 	pos += scnprintf(buf + pos, len - pos, "\n");
1367 	if (!hnae3_dev_dcb_supported(hdev))
1368 		return 0;
1369 
1370 	ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos);
1371 	if (ret < 0)
1372 		return ret;
1373 	pos += ret;
1374 
1375 	ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos,
1376 						     len - pos);
1377 	if (ret < 0)
1378 		return ret;
1379 
1380 	return 0;
1381 }
1382 
1383 static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
1384 {
1385 	struct hclge_mac_ethertype_idx_rd_cmd *req0;
1386 	struct hclge_desc desc;
1387 	u32 msg_egress_port;
1388 	int pos = 0;
1389 	int ret, i;
1390 
1391 	pos += scnprintf(buf + pos, len - pos,
1392 			 "entry  mac_addr          mask  ether  ");
1393 	pos += scnprintf(buf + pos, len - pos,
1394 			 "mask  vlan  mask  i_map  i_dir  e_type  ");
1395 	pos += scnprintf(buf + pos, len - pos, "pf_id  vf_id  q_id  drop\n");
1396 
1397 	for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
1398 		hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
1399 					   true);
1400 		req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
1401 		req0->index = cpu_to_le16(i);
1402 
1403 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1404 		if (ret) {
1405 			dev_err(&hdev->pdev->dev,
1406 				"failed to dump manage table, ret = %d\n", ret);
1407 			return ret;
1408 		}
1409 
1410 		if (!req0->resp_code)
1411 			continue;
1412 
1413 		pos += scnprintf(buf + pos, len - pos, "%02u     %pM ",
1414 				 le16_to_cpu(req0->index), req0->mac_addr);
1415 
1416 		pos += scnprintf(buf + pos, len - pos,
1417 				 "%x     %04x   %x     %04x  ",
1418 				 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
1419 				 le16_to_cpu(req0->ethter_type),
1420 				 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
1421 				 le16_to_cpu(req0->vlan_tag) &
1422 				 HCLGE_DBG_MNG_VLAN_TAG);
1423 
1424 		pos += scnprintf(buf + pos, len - pos,
1425 				 "%x     %02x     %02x     ",
1426 				 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
1427 				 req0->i_port_bitmap, req0->i_port_direction);
1428 
1429 		msg_egress_port = le16_to_cpu(req0->egress_port);
1430 		pos += scnprintf(buf + pos, len - pos,
1431 				 "%x       %x      %02x     %04x  %x\n",
1432 				 !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
1433 				 msg_egress_port & HCLGE_DBG_MNG_PF_ID,
1434 				 (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
1435 				 le16_to_cpu(req0->egress_queue),
1436 				 !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
1437 	}
1438 
1439 	return 0;
1440 }
1441 
1442 #define HCLGE_DBG_TCAM_BUF_SIZE 256
1443 
1444 static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
1445 				  char *tcam_buf,
1446 				  struct hclge_dbg_tcam_msg tcam_msg)
1447 {
1448 	struct hclge_fd_tcam_config_1_cmd *req1;
1449 	struct hclge_fd_tcam_config_2_cmd *req2;
1450 	struct hclge_fd_tcam_config_3_cmd *req3;
1451 	struct hclge_desc desc[3];
1452 	int pos = 0;
1453 	int ret, i;
1454 	u32 *req;
1455 
1456 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
1457 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1458 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
1459 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1460 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
1461 
1462 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
1463 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
1464 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
1465 
1466 	req1->stage  = tcam_msg.stage;
1467 	req1->xy_sel = sel_x ? 1 : 0;
1468 	req1->index  = cpu_to_le32(tcam_msg.loc);
1469 
1470 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
1471 	if (ret)
1472 		return ret;
1473 
1474 	pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1475 			 "read result tcam key %s(%u):\n", sel_x ? "x" : "y",
1476 			 tcam_msg.loc);
1477 
1478 	/* tcam_data0 ~ tcam_data1 */
1479 	req = (u32 *)req1->tcam_data;
1480 	for (i = 0; i < 2; i++)
1481 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1482 				 "%08x\n", *req++);
1483 
1484 	/* tcam_data2 ~ tcam_data7 */
1485 	req = (u32 *)req2->tcam_data;
1486 	for (i = 0; i < 6; i++)
1487 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1488 				 "%08x\n", *req++);
1489 
1490 	/* tcam_data8 ~ tcam_data12 */
1491 	req = (u32 *)req3->tcam_data;
1492 	for (i = 0; i < 5; i++)
1493 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1494 				 "%08x\n", *req++);
1495 
1496 	return ret;
1497 }
1498 
1499 static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
1500 {
1501 	struct hclge_fd_rule *rule;
1502 	struct hlist_node *node;
1503 	int cnt = 0;
1504 
1505 	spin_lock_bh(&hdev->fd_rule_lock);
1506 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
1507 		rule_locs[cnt] = rule->location;
1508 		cnt++;
1509 	}
1510 	spin_unlock_bh(&hdev->fd_rule_lock);
1511 
1512 	if (cnt != hdev->hclge_fd_rule_num || cnt == 0)
1513 		return -EINVAL;
1514 
1515 	return cnt;
1516 }
1517 
1518 static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
1519 {
1520 	u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
1521 	struct hclge_dbg_tcam_msg tcam_msg;
1522 	int i, ret, rule_cnt;
1523 	u16 *rule_locs;
1524 	char *tcam_buf;
1525 	int pos = 0;
1526 
1527 	if (!hnae3_dev_fd_supported(hdev)) {
1528 		dev_err(&hdev->pdev->dev,
1529 			"Only FD-supported dev supports dump fd tcam\n");
1530 		return -EOPNOTSUPP;
1531 	}
1532 
1533 	if (!hdev->hclge_fd_rule_num || !rule_num)
1534 		return 0;
1535 
1536 	rule_locs = kcalloc(rule_num, sizeof(u16), GFP_KERNEL);
1537 	if (!rule_locs)
1538 		return -ENOMEM;
1539 
1540 	tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
1541 	if (!tcam_buf) {
1542 		kfree(rule_locs);
1543 		return -ENOMEM;
1544 	}
1545 
1546 	rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
1547 	if (rule_cnt < 0) {
1548 		ret = rule_cnt;
1549 		dev_err(&hdev->pdev->dev,
1550 			"failed to get rule number, ret = %d\n", ret);
1551 		goto out;
1552 	}
1553 
1554 	ret = 0;
1555 	for (i = 0; i < rule_cnt; i++) {
1556 		tcam_msg.stage = HCLGE_FD_STAGE_1;
1557 		tcam_msg.loc = rule_locs[i];
1558 
1559 		ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg);
1560 		if (ret) {
1561 			dev_err(&hdev->pdev->dev,
1562 				"failed to get fd tcam key x, ret = %d\n", ret);
1563 			goto out;
1564 		}
1565 
1566 		pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1567 
1568 		ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg);
1569 		if (ret) {
1570 			dev_err(&hdev->pdev->dev,
1571 				"failed to get fd tcam key y, ret = %d\n", ret);
1572 			goto out;
1573 		}
1574 
1575 		pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1576 	}
1577 
1578 out:
1579 	kfree(tcam_buf);
1580 	kfree(rule_locs);
1581 	return ret;
1582 }
1583 
1584 static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
1585 {
1586 	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
1587 	struct hclge_fd_ad_cnt_read_cmd *req;
1588 	char str_id[HCLGE_DBG_ID_LEN];
1589 	struct hclge_desc desc;
1590 	int pos = 0;
1591 	int ret;
1592 	u64 cnt;
1593 	u8 i;
1594 
1595 	pos += scnprintf(buf + pos, len - pos,
1596 			 "func_id\thit_times\n");
1597 
1598 	for (i = 0; i < func_num; i++) {
1599 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
1600 		req = (struct hclge_fd_ad_cnt_read_cmd *)desc.data;
1601 		req->index = cpu_to_le16(i);
1602 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1603 		if (ret) {
1604 			dev_err(&hdev->pdev->dev, "failed to get fd counter, ret = %d\n",
1605 				ret);
1606 			return ret;
1607 		}
1608 		cnt = le64_to_cpu(req->cnt);
1609 		hclge_dbg_get_func_id_str(str_id, i);
1610 		pos += scnprintf(buf + pos, len - pos,
1611 				 "%s\t%llu\n", str_id, cnt);
1612 	}
1613 
1614 	return 0;
1615 }
1616 
1617 int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
1618 {
1619 	int pos = 0;
1620 
1621 	pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n",
1622 			 hdev->rst_stats.pf_rst_cnt);
1623 	pos += scnprintf(buf + pos, len - pos, "FLR reset count: %u\n",
1624 			 hdev->rst_stats.flr_rst_cnt);
1625 	pos += scnprintf(buf + pos, len - pos, "GLOBAL reset count: %u\n",
1626 			 hdev->rst_stats.global_rst_cnt);
1627 	pos += scnprintf(buf + pos, len - pos, "IMP reset count: %u\n",
1628 			 hdev->rst_stats.imp_rst_cnt);
1629 	pos += scnprintf(buf + pos, len - pos, "reset done count: %u\n",
1630 			 hdev->rst_stats.reset_done_cnt);
1631 	pos += scnprintf(buf + pos, len - pos, "HW reset done count: %u\n",
1632 			 hdev->rst_stats.hw_reset_done_cnt);
1633 	pos += scnprintf(buf + pos, len - pos, "reset count: %u\n",
1634 			 hdev->rst_stats.reset_cnt);
1635 	pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n",
1636 			 hdev->rst_stats.reset_fail_cnt);
1637 	pos += scnprintf(buf + pos, len - pos,
1638 			 "vector0 interrupt enable status: 0x%x\n",
1639 			 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE));
1640 	pos += scnprintf(buf + pos, len - pos, "reset interrupt source: 0x%x\n",
1641 			 hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG));
1642 	pos += scnprintf(buf + pos, len - pos, "reset interrupt status: 0x%x\n",
1643 			 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS));
1644 	pos += scnprintf(buf + pos, len - pos, "RAS interrupt status: 0x%x\n",
1645 			 hclge_read_dev(&hdev->hw,
1646 					HCLGE_RAS_PF_OTHER_INT_STS_REG));
1647 	pos += scnprintf(buf + pos, len - pos, "hardware reset status: 0x%x\n",
1648 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
1649 	pos += scnprintf(buf + pos, len - pos, "handshake status: 0x%x\n",
1650 			 hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
1651 	pos += scnprintf(buf + pos, len - pos, "function reset status: 0x%x\n",
1652 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
1653 	pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n",
1654 			 hdev->state);
1655 
1656 	return 0;
1657 }
1658 
1659 static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len)
1660 {
1661 	unsigned long rem_nsec;
1662 	int pos = 0;
1663 	u64 lc;
1664 
1665 	lc = local_clock();
1666 	rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);
1667 
1668 	pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n",
1669 			 (unsigned long)lc, rem_nsec / 1000);
1670 	pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n",
1671 			 jiffies_to_msecs(jiffies - hdev->last_serv_processed));
1672 	pos += scnprintf(buf + pos, len - pos,
1673 			 "last_service_task_processed: %lu(jiffies)\n",
1674 			 hdev->last_serv_processed);
1675 	pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n",
1676 			 hdev->serv_processed_cnt);
1677 
1678 	return 0;
1679 }
1680 
1681 static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len)
1682 {
1683 	int pos = 0;
1684 
1685 	pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n",
1686 			 hdev->num_nic_msi);
1687 	pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n",
1688 			 hdev->num_roce_msi);
1689 	pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n",
1690 			 hdev->num_msi_used);
1691 	pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n",
1692 			 hdev->num_msi_left);
1693 
1694 	return 0;
1695 }
1696 
1697 static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src,
1698 					  char *buf, int len, u32 bd_num)
1699 {
1700 #define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2
1701 
1702 	struct hclge_desc *desc_index = desc_src;
1703 	u32 offset = 0;
1704 	int pos = 0;
1705 	u32 i, j;
1706 
1707 	pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1708 
1709 	for (i = 0; i < bd_num; i++) {
1710 		j = 0;
1711 		while (j < HCLGE_DESC_DATA_LEN - 1) {
1712 			pos += scnprintf(buf + pos, len - pos, "0x%04x | ",
1713 					 offset);
1714 			pos += scnprintf(buf + pos, len - pos, "0x%08x  ",
1715 					 le32_to_cpu(desc_index->data[j++]));
1716 			pos += scnprintf(buf + pos, len - pos, "0x%08x\n",
1717 					 le32_to_cpu(desc_index->data[j++]));
1718 			offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
1719 		}
1720 		desc_index++;
1721 	}
1722 }
1723 
1724 static int
1725 hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
1726 {
1727 	struct hclge_get_imp_bd_cmd *req;
1728 	struct hclge_desc *desc_src;
1729 	struct hclge_desc desc;
1730 	u32 bd_num;
1731 	int ret;
1732 
1733 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_STATS_BD, true);
1734 
1735 	req = (struct hclge_get_imp_bd_cmd *)desc.data;
1736 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1737 	if (ret) {
1738 		dev_err(&hdev->pdev->dev,
1739 			"failed to get imp statistics bd number, ret = %d\n",
1740 			ret);
1741 		return ret;
1742 	}
1743 
1744 	bd_num = le32_to_cpu(req->bd_num);
1745 	if (!bd_num) {
1746 		dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
1747 		return -EINVAL;
1748 	}
1749 
1750 	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
1751 	if (!desc_src)
1752 		return -ENOMEM;
1753 
1754 	ret  = hclge_dbg_cmd_send(hdev, desc_src, 0, bd_num,
1755 				  HCLGE_OPC_IMP_STATS_INFO);
1756 	if (ret) {
1757 		kfree(desc_src);
1758 		dev_err(&hdev->pdev->dev,
1759 			"failed to get imp statistics, ret = %d\n", ret);
1760 		return ret;
1761 	}
1762 
1763 	hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num);
1764 
1765 	kfree(desc_src);
1766 
1767 	return 0;
1768 }
1769 
1770 #define HCLGE_CMD_NCL_CONFIG_BD_NUM	5
1771 #define HCLGE_MAX_NCL_CONFIG_LENGTH	16384
1772 
1773 static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
1774 					char *buf, int *len, int *pos)
1775 {
1776 #define HCLGE_CMD_DATA_NUM		6
1777 
1778 	int offset = HCLGE_MAX_NCL_CONFIG_LENGTH - *index;
1779 	int i, j;
1780 
1781 	for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
1782 		for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
1783 			if (i == 0 && j == 0)
1784 				continue;
1785 
1786 			*pos += scnprintf(buf + *pos, *len - *pos,
1787 					  "0x%04x | 0x%08x\n", offset,
1788 					  le32_to_cpu(desc[i].data[j]));
1789 
1790 			offset += sizeof(u32);
1791 			*index -= sizeof(u32);
1792 
1793 			if (*index <= 0)
1794 				return;
1795 		}
1796 	}
1797 }
1798 
1799 static int
1800 hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
1801 {
1802 #define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD	(20 + 24 * 4)
1803 
1804 	struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
1805 	int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
1806 	int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
1807 	int pos = 0;
1808 	u32 data0;
1809 	int ret;
1810 
1811 	pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1812 
1813 	while (index > 0) {
1814 		data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
1815 		if (index >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
1816 			data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
1817 		else
1818 			data0 |= (u32)index << 16;
1819 		ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
1820 					 HCLGE_OPC_QUERY_NCL_CONFIG);
1821 		if (ret)
1822 			return ret;
1823 
1824 		hclge_ncl_config_data_print(desc, &index, buf, &len, &pos);
1825 	}
1826 
1827 	return 0;
1828 }
1829 
1830 static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
1831 {
1832 	struct phy_device *phydev = hdev->hw.mac.phydev;
1833 	struct hclge_config_mac_mode_cmd *req_app;
1834 	struct hclge_common_lb_cmd *req_common;
1835 	struct hclge_desc desc;
1836 	u8 loopback_en;
1837 	int pos = 0;
1838 	int ret;
1839 
1840 	req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
1841 	req_common = (struct hclge_common_lb_cmd *)desc.data;
1842 
1843 	pos += scnprintf(buf + pos, len - pos, "mac id: %u\n",
1844 			 hdev->hw.mac.mac_id);
1845 
1846 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
1847 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1848 	if (ret) {
1849 		dev_err(&hdev->pdev->dev,
1850 			"failed to dump app loopback status, ret = %d\n", ret);
1851 		return ret;
1852 	}
1853 
1854 	loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
1855 				    HCLGE_MAC_APP_LP_B);
1856 	pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
1857 			 state_str[loopback_en]);
1858 
1859 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
1860 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1861 	if (ret) {
1862 		dev_err(&hdev->pdev->dev,
1863 			"failed to dump common loopback status, ret = %d\n",
1864 			ret);
1865 		return ret;
1866 	}
1867 
1868 	loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
1869 	pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
1870 			 state_str[loopback_en]);
1871 
1872 	loopback_en = req_common->enable &
1873 			HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
1874 	pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
1875 			 state_str[loopback_en]);
1876 
1877 	if (phydev) {
1878 		loopback_en = phydev->loopback_enabled;
1879 		pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1880 				 state_str[loopback_en]);
1881 	} else if (hnae3_dev_phy_imp_supported(hdev)) {
1882 		loopback_en = req_common->enable &
1883 			      HCLGE_CMD_GE_PHY_INNER_LOOP_B;
1884 		pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1885 				 state_str[loopback_en]);
1886 	}
1887 
1888 	return 0;
1889 }
1890 
1891 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
1892  * @hdev: pointer to struct hclge_dev
1893  */
1894 static int
1895 hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len)
1896 {
1897 	struct hclge_mac_tnl_stats stats;
1898 	unsigned long rem_nsec;
1899 	int pos = 0;
1900 
1901 	pos += scnprintf(buf + pos, len - pos,
1902 			 "Recently generated mac tnl interruption:\n");
1903 
1904 	while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
1905 		rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
1906 
1907 		pos += scnprintf(buf + pos, len - pos,
1908 				 "[%07lu.%03lu] status = 0x%x\n",
1909 				 (unsigned long)stats.time, rem_nsec / 1000,
1910 				 stats.status);
1911 	}
1912 
1913 	return 0;
1914 }
1915 
1916 
1917 static const struct hclge_dbg_item mac_list_items[] = {
1918 	{ "FUNC_ID", 2 },
1919 	{ "MAC_ADDR", 12 },
1920 	{ "STATE", 2 },
1921 };
1922 
1923 static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
1924 				    bool is_unicast)
1925 {
1926 	char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN];
1927 	char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
1928 	char *result[ARRAY_SIZE(mac_list_items)];
1929 	struct hclge_mac_node *mac_node, *tmp;
1930 	struct hclge_vport *vport;
1931 	struct list_head *list;
1932 	u32 func_id;
1933 	int pos = 0;
1934 	int i;
1935 
1936 	for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
1937 		result[i] = &data_str[i][0];
1938 
1939 	pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n",
1940 			 is_unicast ? "UC" : "MC");
1941 	hclge_dbg_fill_content(content, sizeof(content), mac_list_items,
1942 			       NULL, ARRAY_SIZE(mac_list_items));
1943 	pos += scnprintf(buf + pos, len - pos, "%s", content);
1944 
1945 	for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
1946 		vport = &hdev->vport[func_id];
1947 		list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
1948 		spin_lock_bh(&vport->mac_list_lock);
1949 		list_for_each_entry_safe(mac_node, tmp, list, node) {
1950 			i = 0;
1951 			result[i++] = hclge_dbg_get_func_id_str(str_id,
1952 								func_id);
1953 			sprintf(result[i++], "%pM", mac_node->mac_addr);
1954 			sprintf(result[i++], "%5s",
1955 				hclge_mac_state_str[mac_node->state]);
1956 			hclge_dbg_fill_content(content, sizeof(content),
1957 					       mac_list_items,
1958 					       (const char **)result,
1959 					       ARRAY_SIZE(mac_list_items));
1960 			pos += scnprintf(buf + pos, len - pos, "%s", content);
1961 		}
1962 		spin_unlock_bh(&vport->mac_list_lock);
1963 	}
1964 }
1965 
1966 static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
1967 {
1968 	u8 func_num = pci_num_vf(hdev->pdev) + 1;
1969 	struct hclge_vport *vport;
1970 	int pos = 0;
1971 	u8 i;
1972 
1973 	pos += scnprintf(buf, len, "num_alloc_vport   : %u\n",
1974 			  hdev->num_alloc_vport);
1975 	pos += scnprintf(buf + pos, len - pos, "max_umv_size     : %u\n",
1976 			 hdev->max_umv_size);
1977 	pos += scnprintf(buf + pos, len - pos, "wanted_umv_size  : %u\n",
1978 			 hdev->wanted_umv_size);
1979 	pos += scnprintf(buf + pos, len - pos, "priv_umv_size    : %u\n",
1980 			 hdev->priv_umv_size);
1981 
1982 	mutex_lock(&hdev->vport_lock);
1983 	pos += scnprintf(buf + pos, len - pos, "share_umv_size   : %u\n",
1984 			 hdev->share_umv_size);
1985 	for (i = 0; i < func_num; i++) {
1986 		vport = &hdev->vport[i];
1987 		pos += scnprintf(buf + pos, len - pos,
1988 				 "vport(%u) used_umv_num : %u\n",
1989 				 i, vport->used_umv_num);
1990 	}
1991 	mutex_unlock(&hdev->vport_lock);
1992 
1993 	pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num  : %u\n",
1994 			 hdev->used_mc_mac_num);
1995 
1996 	return 0;
1997 }
1998 
1999 static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
2000 					 struct hclge_dbg_vlan_cfg *vlan_cfg)
2001 {
2002 	struct hclge_vport_vtag_rx_cfg_cmd *req;
2003 	struct hclge_desc desc;
2004 	u16 bmap_index;
2005 	u8 rx_cfg;
2006 	int ret;
2007 
2008 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true);
2009 
2010 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
2011 	req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2012 	bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2013 	req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2014 
2015 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2016 	if (ret) {
2017 		dev_err(&hdev->pdev->dev,
2018 			"failed to get vport%u rxvlan cfg, ret = %d\n",
2019 			vf_id, ret);
2020 		return ret;
2021 	}
2022 
2023 	rx_cfg = req->vport_vlan_cfg;
2024 	vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B);
2025 	vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B);
2026 	vlan_cfg->drop_tag1 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG1_EN_B);
2027 	vlan_cfg->drop_tag2 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG2_EN_B);
2028 	vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B);
2029 	vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B);
2030 
2031 	return 0;
2032 }
2033 
2034 static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
2035 					 struct hclge_dbg_vlan_cfg *vlan_cfg)
2036 {
2037 	struct hclge_vport_vtag_tx_cfg_cmd *req;
2038 	struct hclge_desc desc;
2039 	u16 bmap_index;
2040 	u8 tx_cfg;
2041 	int ret;
2042 
2043 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true);
2044 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
2045 	req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2046 	bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2047 	req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2048 
2049 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2050 	if (ret) {
2051 		dev_err(&hdev->pdev->dev,
2052 			"failed to get vport%u txvlan cfg, ret = %d\n",
2053 			vf_id, ret);
2054 		return ret;
2055 	}
2056 
2057 	tx_cfg = req->vport_vlan_cfg;
2058 	vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1);
2059 
2060 	vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B);
2061 	vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B);
2062 	vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B);
2063 	vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B);
2064 	vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B);
2065 	vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B);
2066 	vlan_cfg->shift_tag = hnae3_get_bit(tx_cfg, HCLGE_TAG_SHIFT_MODE_EN_B);
2067 
2068 	return 0;
2069 }
2070 
2071 static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev,
2072 					    u8 vlan_type, u8 vf_id,
2073 					    struct hclge_desc *desc)
2074 {
2075 	struct hclge_vlan_filter_ctrl_cmd *req;
2076 	int ret;
2077 
2078 	hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
2079 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data;
2080 	req->vlan_type = vlan_type;
2081 	req->vf_id = vf_id;
2082 
2083 	ret = hclge_cmd_send(&hdev->hw, desc, 1);
2084 	if (ret)
2085 		dev_err(&hdev->pdev->dev,
2086 			"failed to get vport%u vlan filter config, ret = %d.\n",
2087 			vf_id, ret);
2088 
2089 	return ret;
2090 }
2091 
2092 static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type,
2093 				       u8 vf_id, u8 *vlan_fe)
2094 {
2095 	struct hclge_vlan_filter_ctrl_cmd *req;
2096 	struct hclge_desc desc;
2097 	int ret;
2098 
2099 	ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc);
2100 	if (ret)
2101 		return ret;
2102 
2103 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
2104 	*vlan_fe = req->vlan_fe;
2105 
2106 	return 0;
2107 }
2108 
2109 static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
2110 						   u8 vf_id, u8 *bypass_en)
2111 {
2112 	struct hclge_port_vlan_filter_bypass_cmd *req;
2113 	struct hclge_desc desc;
2114 	int ret;
2115 
2116 	if (!test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
2117 		return 0;
2118 
2119 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, true);
2120 	req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
2121 	req->vf_id = vf_id;
2122 
2123 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2124 	if (ret) {
2125 		dev_err(&hdev->pdev->dev,
2126 			"failed to get vport%u port vlan filter bypass state, ret = %d.\n",
2127 			vf_id, ret);
2128 		return ret;
2129 	}
2130 
2131 	*bypass_en = hnae3_get_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B);
2132 
2133 	return 0;
2134 }
2135 
2136 static const struct hclge_dbg_item vlan_filter_items[] = {
2137 	{ "FUNC_ID", 2 },
2138 	{ "I_VF_VLAN_FILTER", 2 },
2139 	{ "E_VF_VLAN_FILTER", 2 },
2140 	{ "PORT_VLAN_FILTER_BYPASS", 0 }
2141 };
2142 
2143 static const struct hclge_dbg_item vlan_offload_items[] = {
2144 	{ "FUNC_ID", 2 },
2145 	{ "PVID", 4 },
2146 	{ "ACCEPT_TAG1", 2 },
2147 	{ "ACCEPT_TAG2", 2 },
2148 	{ "ACCEPT_UNTAG1", 2 },
2149 	{ "ACCEPT_UNTAG2", 2 },
2150 	{ "INSERT_TAG1", 2 },
2151 	{ "INSERT_TAG2", 2 },
2152 	{ "SHIFT_TAG", 2 },
2153 	{ "STRIP_TAG1", 2 },
2154 	{ "STRIP_TAG2", 2 },
2155 	{ "DROP_TAG1", 2 },
2156 	{ "DROP_TAG2", 2 },
2157 	{ "PRI_ONLY_TAG1", 2 },
2158 	{ "PRI_ONLY_TAG2", 0 }
2159 };
2160 
2161 static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
2162 					     int len, int *pos)
2163 {
2164 	char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
2165 	const char *result[ARRAY_SIZE(vlan_filter_items)];
2166 	u8 i, j, vlan_fe, bypass, ingress, egress;
2167 	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2168 	int ret;
2169 
2170 	ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
2171 					  &vlan_fe);
2172 	if (ret)
2173 		return ret;
2174 	ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2175 	egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2176 
2177 	*pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
2178 			  state_str[ingress]);
2179 	*pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
2180 			  state_str[egress]);
2181 
2182 	hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
2183 			       NULL, ARRAY_SIZE(vlan_filter_items));
2184 	*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2185 
2186 	for (i = 0; i < func_num; i++) {
2187 		ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
2188 						  &vlan_fe);
2189 		if (ret)
2190 			return ret;
2191 
2192 		ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2193 		egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2194 		ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
2195 		if (ret)
2196 			return ret;
2197 		j = 0;
2198 		result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2199 		result[j++] = state_str[ingress];
2200 		result[j++] = state_str[egress];
2201 		result[j++] =
2202 			test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
2203 				 hdev->ae_dev->caps) ? state_str[bypass] : "NA";
2204 		hclge_dbg_fill_content(content, sizeof(content),
2205 				       vlan_filter_items, result,
2206 				       ARRAY_SIZE(vlan_filter_items));
2207 		*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2208 	}
2209 	*pos += scnprintf(buf + *pos, len - *pos, "\n");
2210 
2211 	return 0;
2212 }
2213 
2214 static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
2215 					      int len, int *pos)
2216 {
2217 	char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
2218 	const char *result[ARRAY_SIZE(vlan_offload_items)];
2219 	char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
2220 	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2221 	struct hclge_dbg_vlan_cfg vlan_cfg;
2222 	int ret;
2223 	u8 i, j;
2224 
2225 	hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
2226 			       NULL, ARRAY_SIZE(vlan_offload_items));
2227 	*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2228 
2229 	for (i = 0; i < func_num; i++) {
2230 		ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
2231 		if (ret)
2232 			return ret;
2233 
2234 		ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg);
2235 		if (ret)
2236 			return ret;
2237 
2238 		sprintf(str_pvid, "%u", vlan_cfg.pvid);
2239 		j = 0;
2240 		result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2241 		result[j++] = str_pvid;
2242 		result[j++] = state_str[vlan_cfg.accept_tag1];
2243 		result[j++] = state_str[vlan_cfg.accept_tag2];
2244 		result[j++] = state_str[vlan_cfg.accept_untag1];
2245 		result[j++] = state_str[vlan_cfg.accept_untag2];
2246 		result[j++] = state_str[vlan_cfg.insert_tag1];
2247 		result[j++] = state_str[vlan_cfg.insert_tag2];
2248 		result[j++] = state_str[vlan_cfg.shift_tag];
2249 		result[j++] = state_str[vlan_cfg.strip_tag1];
2250 		result[j++] = state_str[vlan_cfg.strip_tag2];
2251 		result[j++] = state_str[vlan_cfg.drop_tag1];
2252 		result[j++] = state_str[vlan_cfg.drop_tag2];
2253 		result[j++] = state_str[vlan_cfg.pri_only1];
2254 		result[j++] = state_str[vlan_cfg.pri_only2];
2255 
2256 		hclge_dbg_fill_content(content, sizeof(content),
2257 				       vlan_offload_items, result,
2258 				       ARRAY_SIZE(vlan_offload_items));
2259 		*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2260 	}
2261 
2262 	return 0;
2263 }
2264 
2265 static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
2266 				      int len)
2267 {
2268 	int pos = 0;
2269 	int ret;
2270 
2271 	ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
2272 	if (ret)
2273 		return ret;
2274 
2275 	return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
2276 }
2277 
2278 static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
2279 {
2280 	struct hclge_ptp *ptp = hdev->ptp;
2281 	u32 sw_cfg = ptp->ptp_cfg;
2282 	unsigned int tx_start;
2283 	unsigned int last_rx;
2284 	int pos = 0;
2285 	u32 hw_cfg;
2286 	int ret;
2287 
2288 	pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
2289 			 ptp->info.name);
2290 	pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
2291 			 test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
2292 			 "yes" : "no");
2293 	pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
2294 			 test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
2295 			 "yes" : "no");
2296 	pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
2297 			 test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
2298 			 "yes" : "no");
2299 
2300 	last_rx = jiffies_to_msecs(ptp->last_rx);
2301 	pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
2302 			 last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
2303 	pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
2304 
2305 	tx_start = jiffies_to_msecs(ptp->tx_start);
2306 	pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
2307 			 tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
2308 	pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
2309 	pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
2310 			 ptp->tx_skipped);
2311 	pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
2312 			 ptp->tx_timeout);
2313 	pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
2314 			 ptp->last_tx_seqid);
2315 
2316 	ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
2317 	if (ret)
2318 		return ret;
2319 
2320 	pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
2321 			 sw_cfg, hw_cfg);
2322 
2323 	pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
2324 			 ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
2325 
2326 	return 0;
2327 }
2328 
2329 static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
2330 {
2331 	hclge_dbg_dump_mac_list(hdev, buf, len, true);
2332 
2333 	return 0;
2334 }
2335 
2336 static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
2337 {
2338 	hclge_dbg_dump_mac_list(hdev, buf, len, false);
2339 
2340 	return 0;
2341 }
2342 
2343 static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
2344 	{
2345 		.cmd = HNAE3_DBG_CMD_TM_NODES,
2346 		.dbg_dump = hclge_dbg_dump_tm_nodes,
2347 	},
2348 	{
2349 		.cmd = HNAE3_DBG_CMD_TM_PRI,
2350 		.dbg_dump = hclge_dbg_dump_tm_pri,
2351 	},
2352 	{
2353 		.cmd = HNAE3_DBG_CMD_TM_QSET,
2354 		.dbg_dump = hclge_dbg_dump_tm_qset,
2355 	},
2356 	{
2357 		.cmd = HNAE3_DBG_CMD_TM_MAP,
2358 		.dbg_dump = hclge_dbg_dump_tm_map,
2359 	},
2360 	{
2361 		.cmd = HNAE3_DBG_CMD_TM_PG,
2362 		.dbg_dump = hclge_dbg_dump_tm_pg,
2363 	},
2364 	{
2365 		.cmd = HNAE3_DBG_CMD_TM_PORT,
2366 		.dbg_dump = hclge_dbg_dump_tm_port,
2367 	},
2368 	{
2369 		.cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
2370 		.dbg_dump = hclge_dbg_dump_tc,
2371 	},
2372 	{
2373 		.cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
2374 		.dbg_dump = hclge_dbg_dump_qos_pause_cfg,
2375 	},
2376 	{
2377 		.cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
2378 		.dbg_dump = hclge_dbg_dump_qos_pri_map,
2379 	},
2380 	{
2381 		.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
2382 		.dbg_dump = hclge_dbg_dump_qos_buf_cfg,
2383 	},
2384 	{
2385 		.cmd = HNAE3_DBG_CMD_MAC_UC,
2386 		.dbg_dump = hclge_dbg_dump_mac_uc,
2387 	},
2388 	{
2389 		.cmd = HNAE3_DBG_CMD_MAC_MC,
2390 		.dbg_dump = hclge_dbg_dump_mac_mc,
2391 	},
2392 	{
2393 		.cmd = HNAE3_DBG_CMD_MNG_TBL,
2394 		.dbg_dump = hclge_dbg_dump_mng_table,
2395 	},
2396 	{
2397 		.cmd = HNAE3_DBG_CMD_LOOPBACK,
2398 		.dbg_dump = hclge_dbg_dump_loopback,
2399 	},
2400 	{
2401 		.cmd = HNAE3_DBG_CMD_PTP_INFO,
2402 		.dbg_dump = hclge_dbg_dump_ptp_info,
2403 	},
2404 	{
2405 		.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
2406 		.dbg_dump = hclge_dbg_dump_interrupt,
2407 	},
2408 	{
2409 		.cmd = HNAE3_DBG_CMD_RESET_INFO,
2410 		.dbg_dump = hclge_dbg_dump_rst_info,
2411 	},
2412 	{
2413 		.cmd = HNAE3_DBG_CMD_IMP_INFO,
2414 		.dbg_dump = hclge_dbg_get_imp_stats_info,
2415 	},
2416 	{
2417 		.cmd = HNAE3_DBG_CMD_NCL_CONFIG,
2418 		.dbg_dump = hclge_dbg_dump_ncl_config,
2419 	},
2420 	{
2421 		.cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
2422 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2423 	},
2424 	{
2425 		.cmd = HNAE3_DBG_CMD_REG_SSU,
2426 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2427 	},
2428 	{
2429 		.cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
2430 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2431 	},
2432 	{
2433 		.cmd = HNAE3_DBG_CMD_REG_RPU,
2434 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2435 	},
2436 	{
2437 		.cmd = HNAE3_DBG_CMD_REG_NCSI,
2438 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2439 	},
2440 	{
2441 		.cmd = HNAE3_DBG_CMD_REG_RTC,
2442 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2443 	},
2444 	{
2445 		.cmd = HNAE3_DBG_CMD_REG_PPP,
2446 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2447 	},
2448 	{
2449 		.cmd = HNAE3_DBG_CMD_REG_RCB,
2450 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2451 	},
2452 	{
2453 		.cmd = HNAE3_DBG_CMD_REG_TQP,
2454 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2455 	},
2456 	{
2457 		.cmd = HNAE3_DBG_CMD_REG_MAC,
2458 		.dbg_dump = hclge_dbg_dump_mac,
2459 	},
2460 	{
2461 		.cmd = HNAE3_DBG_CMD_REG_DCB,
2462 		.dbg_dump = hclge_dbg_dump_dcb,
2463 	},
2464 	{
2465 		.cmd = HNAE3_DBG_CMD_FD_TCAM,
2466 		.dbg_dump = hclge_dbg_dump_fd_tcam,
2467 	},
2468 	{
2469 		.cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
2470 		.dbg_dump = hclge_dbg_dump_mac_tnl_status,
2471 	},
2472 	{
2473 		.cmd = HNAE3_DBG_CMD_SERV_INFO,
2474 		.dbg_dump = hclge_dbg_dump_serv_info,
2475 	},
2476 	{
2477 		.cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
2478 		.dbg_dump = hclge_dbg_dump_vlan_config,
2479 	},
2480 	{
2481 		.cmd = HNAE3_DBG_CMD_FD_COUNTER,
2482 		.dbg_dump = hclge_dbg_dump_fd_counter,
2483 	},
2484 	{
2485 		.cmd = HNAE3_DBG_CMD_UMV_INFO,
2486 		.dbg_dump = hclge_dbg_dump_umv_info,
2487 	},
2488 };
2489 
2490 int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
2491 		       char *buf, int len)
2492 {
2493 	struct hclge_vport *vport = hclge_get_vport(handle);
2494 	const struct hclge_dbg_func *cmd_func;
2495 	struct hclge_dev *hdev = vport->back;
2496 	u32 i;
2497 
2498 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
2499 		if (cmd == hclge_dbg_cmd_func[i].cmd) {
2500 			cmd_func = &hclge_dbg_cmd_func[i];
2501 			if (cmd_func->dbg_dump)
2502 				return cmd_func->dbg_dump(hdev, buf, len);
2503 			else
2504 				return cmd_func->dbg_dump_reg(hdev, cmd, buf,
2505 							      len);
2506 		}
2507 	}
2508 
2509 	dev_err(&hdev->pdev->dev, "invalid command(%d)\n", cmd);
2510 	return -EINVAL;
2511 }
2512