1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018-2019 Hisilicon Limited. */
3 
4 #include <linux/device.h>
5 #include <linux/sched/clock.h>
6 
7 #include "hclge_debugfs.h"
8 #include "hclge_err.h"
9 #include "hclge_main.h"
10 #include "hclge_tm.h"
11 #include "hnae3.h"
12 
13 static const char * const state_str[] = { "off", "on" };
14 static const char * const hclge_mac_state_str[] = {
15 	"TO_ADD", "TO_DEL", "ACTIVE"
16 };
17 
18 static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" };
19 
20 static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
21 	{ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
22 	  .dfx_msg = &hclge_dbg_bios_common_reg[0],
23 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
24 		       .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
25 		       .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
26 	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
27 	  .dfx_msg = &hclge_dbg_ssu_reg_0[0],
28 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
29 		       .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
30 		       .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
31 	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
32 	  .dfx_msg = &hclge_dbg_ssu_reg_1[0],
33 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
34 		       .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
35 		       .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
36 	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
37 	  .dfx_msg = &hclge_dbg_ssu_reg_2[0],
38 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
39 		       .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
40 		       .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
41 	{ .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
42 	  .dfx_msg = &hclge_dbg_igu_egu_reg[0],
43 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
44 		       .offset = HCLGE_DBG_DFX_IGU_OFFSET,
45 		       .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
46 	{ .cmd = HNAE3_DBG_CMD_REG_RPU,
47 	  .dfx_msg = &hclge_dbg_rpu_reg_0[0],
48 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
49 		       .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
50 		       .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
51 	{ .cmd = HNAE3_DBG_CMD_REG_RPU,
52 	  .dfx_msg = &hclge_dbg_rpu_reg_1[0],
53 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
54 		       .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
55 		       .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
56 	{ .cmd = HNAE3_DBG_CMD_REG_NCSI,
57 	  .dfx_msg = &hclge_dbg_ncsi_reg[0],
58 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
59 		       .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
60 		       .cmd = HCLGE_OPC_DFX_NCSI_REG } },
61 	{ .cmd = HNAE3_DBG_CMD_REG_RTC,
62 	  .dfx_msg = &hclge_dbg_rtc_reg[0],
63 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
64 		       .offset = HCLGE_DBG_DFX_RTC_OFFSET,
65 		       .cmd = HCLGE_OPC_DFX_RTC_REG } },
66 	{ .cmd = HNAE3_DBG_CMD_REG_PPP,
67 	  .dfx_msg = &hclge_dbg_ppp_reg[0],
68 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
69 		       .offset = HCLGE_DBG_DFX_PPP_OFFSET,
70 		       .cmd = HCLGE_OPC_DFX_PPP_REG } },
71 	{ .cmd = HNAE3_DBG_CMD_REG_RCB,
72 	  .dfx_msg = &hclge_dbg_rcb_reg[0],
73 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
74 		       .offset = HCLGE_DBG_DFX_RCB_OFFSET,
75 		       .cmd = HCLGE_OPC_DFX_RCB_REG } },
76 	{ .cmd = HNAE3_DBG_CMD_REG_TQP,
77 	  .dfx_msg = &hclge_dbg_tqp_reg[0],
78 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
79 		       .offset = HCLGE_DBG_DFX_TQP_OFFSET,
80 		       .cmd = HCLGE_OPC_DFX_TQP_REG } },
81 };
82 
83 /* make sure: len(name) + interval >= maxlen(item data) + 2,
84  * for example, name = "pkt_num"(len: 7), the prototype of item data is u32,
85  * and print as "%u"(maxlen: 10), so the interval should be at least 5.
86  */
87 static void hclge_dbg_fill_content(char *content, u16 len,
88 				   const struct hclge_dbg_item *items,
89 				   const char **result, u16 size)
90 {
91 #define HCLGE_DBG_LINE_END_LEN	2
92 	char *pos = content;
93 	u16 item_len;
94 	u16 i;
95 
96 	if (!len) {
97 		return;
98 	} else if (len <= HCLGE_DBG_LINE_END_LEN) {
99 		*pos++ = '\0';
100 		return;
101 	}
102 
103 	memset(content, ' ', len);
104 	len -= HCLGE_DBG_LINE_END_LEN;
105 
106 	for (i = 0; i < size; i++) {
107 		item_len = strlen(items[i].name) + items[i].interval;
108 		if (len < item_len)
109 			break;
110 
111 		if (result) {
112 			if (item_len < strlen(result[i]))
113 				break;
114 			strscpy(pos, result[i], strlen(result[i]));
115 		} else {
116 			strscpy(pos, items[i].name, strlen(items[i].name));
117 		}
118 		pos += item_len;
119 		len -= item_len;
120 	}
121 	*pos++ = '\n';
122 	*pos++ = '\0';
123 }
124 
125 static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
126 {
127 	if (id)
128 		sprintf(buf, "vf%u", id - 1U);
129 	else
130 		sprintf(buf, "pf");
131 
132 	return buf;
133 }
134 
135 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset,
136 				    u32 *bd_num)
137 {
138 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
139 	int entries_per_desc;
140 	int index;
141 	int ret;
142 
143 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
144 	if (ret) {
145 		dev_err(&hdev->pdev->dev,
146 			"failed to get dfx bd_num, offset = %d, ret = %d\n",
147 			offset, ret);
148 		return ret;
149 	}
150 
151 	entries_per_desc = ARRAY_SIZE(desc[0].data);
152 	index = offset % entries_per_desc;
153 
154 	*bd_num = le32_to_cpu(desc[offset / entries_per_desc].data[index]);
155 	if (!(*bd_num)) {
156 		dev_err(&hdev->pdev->dev, "The value of dfx bd_num is 0!\n");
157 		return -EINVAL;
158 	}
159 
160 	return 0;
161 }
162 
163 static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
164 			      struct hclge_desc *desc_src,
165 			      int index, int bd_num,
166 			      enum hclge_opcode_type cmd)
167 {
168 	struct hclge_desc *desc = desc_src;
169 	int ret, i;
170 
171 	hclge_cmd_setup_basic_desc(desc, cmd, true);
172 	desc->data[0] = cpu_to_le32(index);
173 
174 	for (i = 1; i < bd_num; i++) {
175 		desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
176 		desc++;
177 		hclge_cmd_setup_basic_desc(desc, cmd, true);
178 	}
179 
180 	ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
181 	if (ret)
182 		dev_err(&hdev->pdev->dev,
183 			"cmd(0x%x) send fail, ret = %d\n", cmd, ret);
184 	return ret;
185 }
186 
187 static int
188 hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
189 		       const struct hclge_dbg_reg_type_info *reg_info,
190 		       char *buf, int len, int *pos)
191 {
192 	const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
193 	const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
194 	struct hclge_desc *desc_src;
195 	u32 index, entry, i, cnt;
196 	int bd_num, min_num, ret;
197 	struct hclge_desc *desc;
198 
199 	ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
200 	if (ret)
201 		return ret;
202 
203 	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
204 	if (!desc_src)
205 		return -ENOMEM;
206 
207 	min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
208 
209 	for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
210 		*pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n",
211 				  cnt++, dfx_message->message);
212 
213 	for (i = 0; i < cnt; i++)
214 		*pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i);
215 
216 	*pos += scnprintf(buf + *pos, len - *pos, "\n");
217 
218 	for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
219 		dfx_message = reg_info->dfx_msg;
220 		desc = desc_src;
221 		ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num,
222 					 reg_msg->cmd);
223 		if (ret)
224 			break;
225 
226 		for (i = 0; i < min_num; i++, dfx_message++) {
227 			entry = i % HCLGE_DESC_DATA_LEN;
228 			if (i > 0 && !entry)
229 				desc++;
230 
231 			*pos += scnprintf(buf + *pos, len - *pos, "%#x\t",
232 					  le32_to_cpu(desc->data[entry]));
233 		}
234 		*pos += scnprintf(buf + *pos, len - *pos, "\n");
235 	}
236 
237 	kfree(desc_src);
238 	return ret;
239 }
240 
241 static int
242 hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
243 			  const struct hclge_dbg_reg_type_info *reg_info,
244 			  char *buf, int len, int *pos)
245 {
246 	const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
247 	const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
248 	struct hclge_desc *desc_src;
249 	int bd_num, min_num, ret;
250 	struct hclge_desc *desc;
251 	u32 entry, i;
252 
253 	ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
254 	if (ret)
255 		return ret;
256 
257 	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
258 	if (!desc_src)
259 		return -ENOMEM;
260 
261 	desc = desc_src;
262 
263 	ret = hclge_dbg_cmd_send(hdev, desc, 0, bd_num, reg_msg->cmd);
264 	if (ret) {
265 		kfree(desc);
266 		return ret;
267 	}
268 
269 	min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
270 
271 	for (i = 0; i < min_num; i++, dfx_message++) {
272 		entry = i % HCLGE_DESC_DATA_LEN;
273 		if (i > 0 && !entry)
274 			desc++;
275 		if (!dfx_message->flag)
276 			continue;
277 
278 		*pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
279 				  dfx_message->message,
280 				  le32_to_cpu(desc->data[entry]));
281 	}
282 
283 	kfree(desc_src);
284 	return 0;
285 }
286 
287 static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
288 	{HCLGE_MAC_TX_EN_B,  "mac_trans_en"},
289 	{HCLGE_MAC_RX_EN_B,  "mac_rcv_en"},
290 	{HCLGE_MAC_PAD_TX_B, "pad_trans_en"},
291 	{HCLGE_MAC_PAD_RX_B, "pad_rcv_en"},
292 	{HCLGE_MAC_1588_TX_B, "1588_trans_en"},
293 	{HCLGE_MAC_1588_RX_B, "1588_rcv_en"},
294 	{HCLGE_MAC_APP_LP_B,  "mac_app_loop_en"},
295 	{HCLGE_MAC_LINE_LP_B, "mac_line_loop_en"},
296 	{HCLGE_MAC_FCS_TX_B,  "mac_fcs_tx_en"},
297 	{HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, "mac_rx_oversize_truncate_en"},
298 	{HCLGE_MAC_RX_FCS_STRIP_B, "mac_rx_fcs_strip_en"},
299 	{HCLGE_MAC_RX_FCS_B, "mac_rx_fcs_en"},
300 	{HCLGE_MAC_TX_UNDER_MIN_ERR_B, "mac_tx_under_min_err_en"},
301 	{HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
302 };
303 
304 static int  hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
305 					     int len, int *pos)
306 {
307 	struct hclge_config_mac_mode_cmd *req;
308 	struct hclge_desc desc;
309 	u32 loop_en, i, offset;
310 	int ret;
311 
312 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
313 
314 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
315 	if (ret) {
316 		dev_err(&hdev->pdev->dev,
317 			"failed to dump mac enable status, ret = %d\n", ret);
318 		return ret;
319 	}
320 
321 	req = (struct hclge_config_mac_mode_cmd *)desc.data;
322 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
323 
324 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
325 		offset = hclge_dbg_mac_en_status[i].offset;
326 		*pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
327 				  hclge_dbg_mac_en_status[i].message,
328 				  hnae3_get_bit(loop_en, offset));
329 	}
330 
331 	return 0;
332 }
333 
334 static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
335 					 int len, int *pos)
336 {
337 	struct hclge_config_max_frm_size_cmd *req;
338 	struct hclge_desc desc;
339 	int ret;
340 
341 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
342 
343 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
344 	if (ret) {
345 		dev_err(&hdev->pdev->dev,
346 			"failed to dump mac frame size, ret = %d\n", ret);
347 		return ret;
348 	}
349 
350 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
351 
352 	*pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n",
353 			  le16_to_cpu(req->max_frm_size));
354 	*pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n",
355 			  req->min_frm_size);
356 
357 	return 0;
358 }
359 
360 static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
361 					   int len, int *pos)
362 {
363 #define HCLGE_MAC_SPEED_SHIFT	0
364 #define HCLGE_MAC_SPEED_MASK	GENMASK(5, 0)
365 #define HCLGE_MAC_DUPLEX_SHIFT	7
366 
367 	struct hclge_config_mac_speed_dup_cmd *req;
368 	struct hclge_desc desc;
369 	int ret;
370 
371 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
372 
373 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
374 	if (ret) {
375 		dev_err(&hdev->pdev->dev,
376 			"failed to dump mac speed duplex, ret = %d\n", ret);
377 		return ret;
378 	}
379 
380 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
381 
382 	*pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n",
383 			  hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
384 					  HCLGE_MAC_SPEED_SHIFT));
385 	*pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n",
386 			  hnae3_get_bit(req->speed_dup,
387 					HCLGE_MAC_DUPLEX_SHIFT));
388 	return 0;
389 }
390 
391 static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
392 {
393 	int pos = 0;
394 	int ret;
395 
396 	ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos);
397 	if (ret)
398 		return ret;
399 
400 	ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos);
401 	if (ret)
402 		return ret;
403 
404 	return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos);
405 }
406 
407 static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
408 				   int *pos)
409 {
410 	struct hclge_dbg_bitmap_cmd req;
411 	struct hclge_desc desc;
412 	u16 qset_id, qset_num;
413 	int ret;
414 
415 	ret = hclge_tm_get_qset_num(hdev, &qset_num);
416 	if (ret)
417 		return ret;
418 
419 	*pos += scnprintf(buf + *pos, len - *pos,
420 			  "qset_id  roce_qset_mask  nic_qset_mask  qset_shaping_pass  qset_bp_status\n");
421 	for (qset_id = 0; qset_id < qset_num; qset_id++) {
422 		ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
423 					 HCLGE_OPC_QSET_DFX_STS);
424 		if (ret)
425 			return ret;
426 
427 		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
428 
429 		*pos += scnprintf(buf + *pos, len - *pos,
430 				  "%04u           %#x            %#x             %#x               %#x\n",
431 				  qset_id, req.bit0, req.bit1, req.bit2,
432 				  req.bit3);
433 	}
434 
435 	return 0;
436 }
437 
438 static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
439 				  int *pos)
440 {
441 	struct hclge_dbg_bitmap_cmd req;
442 	struct hclge_desc desc;
443 	u8 pri_id, pri_num;
444 	int ret;
445 
446 	ret = hclge_tm_get_pri_num(hdev, &pri_num);
447 	if (ret)
448 		return ret;
449 
450 	*pos += scnprintf(buf + *pos, len - *pos,
451 			  "pri_id  pri_mask  pri_cshaping_pass  pri_pshaping_pass\n");
452 	for (pri_id = 0; pri_id < pri_num; pri_id++) {
453 		ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
454 					 HCLGE_OPC_PRI_DFX_STS);
455 		if (ret)
456 			return ret;
457 
458 		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
459 
460 		*pos += scnprintf(buf + *pos, len - *pos,
461 				  "%03u       %#x           %#x                %#x\n",
462 				  pri_id, req.bit0, req.bit1, req.bit2);
463 	}
464 
465 	return 0;
466 }
467 
468 static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
469 				 int *pos)
470 {
471 	struct hclge_dbg_bitmap_cmd req;
472 	struct hclge_desc desc;
473 	u8 pg_id;
474 	int ret;
475 
476 	*pos += scnprintf(buf + *pos, len - *pos,
477 			  "pg_id  pg_mask  pg_cshaping_pass  pg_pshaping_pass\n");
478 	for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
479 		ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
480 					 HCLGE_OPC_PG_DFX_STS);
481 		if (ret)
482 			return ret;
483 
484 		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
485 
486 		*pos += scnprintf(buf + *pos, len - *pos,
487 				  "%03u      %#x           %#x               %#x\n",
488 				  pg_id, req.bit0, req.bit1, req.bit2);
489 	}
490 
491 	return 0;
492 }
493 
494 static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
495 				    int *pos)
496 {
497 	struct hclge_desc desc;
498 	u16 nq_id;
499 	int ret;
500 
501 	*pos += scnprintf(buf + *pos, len - *pos,
502 			  "nq_id  sch_nic_queue_cnt  sch_roce_queue_cnt\n");
503 	for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
504 		ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
505 					 HCLGE_OPC_SCH_NQ_CNT);
506 		if (ret)
507 			return ret;
508 
509 		*pos += scnprintf(buf + *pos, len - *pos, "%04u           %#x",
510 				  nq_id, le32_to_cpu(desc.data[1]));
511 
512 		ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
513 					 HCLGE_OPC_SCH_RQ_CNT);
514 		if (ret)
515 			return ret;
516 
517 		*pos += scnprintf(buf + *pos, len - *pos,
518 				  "               %#x\n",
519 				  le32_to_cpu(desc.data[1]));
520 	}
521 
522 	return 0;
523 }
524 
525 static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
526 				   int *pos)
527 {
528 	struct hclge_dbg_bitmap_cmd req;
529 	struct hclge_desc desc;
530 	u8 port_id = 0;
531 	int ret;
532 
533 	ret = hclge_dbg_cmd_send(hdev, &desc, port_id, 1,
534 				 HCLGE_OPC_PORT_DFX_STS);
535 	if (ret)
536 		return ret;
537 
538 	req.bitmap = (u8)le32_to_cpu(desc.data[1]);
539 
540 	*pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
541 			 req.bit0);
542 	*pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
543 			 req.bit1);
544 
545 	return 0;
546 }
547 
548 static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
549 				 int *pos)
550 {
551 	struct hclge_desc desc[2];
552 	u8 port_id = 0;
553 	int ret;
554 
555 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
556 				 HCLGE_OPC_TM_INTERNAL_CNT);
557 	if (ret)
558 		return ret;
559 
560 	*pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n",
561 			  le32_to_cpu(desc[0].data[1]));
562 	*pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n",
563 			  le32_to_cpu(desc[0].data[2]));
564 
565 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
566 				 HCLGE_OPC_TM_INTERNAL_STS);
567 	if (ret)
568 		return ret;
569 
570 	*pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n",
571 			  le32_to_cpu(desc[0].data[1]));
572 	*pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n",
573 			  le32_to_cpu(desc[0].data[2]));
574 	*pos += scnprintf(buf + *pos, len - *pos,
575 			  "sch_roce_fifo_afull_gap: %#x\n",
576 			  le32_to_cpu(desc[0].data[3]));
577 	*pos += scnprintf(buf + *pos, len - *pos,
578 			  "tx_private_waterline: %#x\n",
579 			  le32_to_cpu(desc[0].data[4]));
580 	*pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n",
581 			  le32_to_cpu(desc[0].data[5]));
582 	*pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n",
583 			  le32_to_cpu(desc[1].data[0]));
584 	*pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n",
585 			  le32_to_cpu(desc[1].data[1]));
586 
587 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
588 		return 0;
589 
590 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
591 				 HCLGE_OPC_TM_INTERNAL_STS_1);
592 	if (ret)
593 		return ret;
594 
595 	*pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n",
596 			  le32_to_cpu(desc[0].data[1]));
597 	*pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n",
598 			  le32_to_cpu(desc[0].data[2]));
599 	*pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n",
600 			  le32_to_cpu(desc[0].data[3]));
601 	*pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n",
602 			  le32_to_cpu(desc[0].data[4]));
603 	*pos += scnprintf(buf + *pos, len - *pos,
604 			  "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
605 			  le32_to_cpu(desc[0].data[5]));
606 
607 	return 0;
608 }
609 
610 static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len)
611 {
612 	int pos = 0;
613 	int ret;
614 
615 	ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos);
616 	if (ret)
617 		return ret;
618 
619 	ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos);
620 	if (ret)
621 		return ret;
622 
623 	ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos);
624 	if (ret)
625 		return ret;
626 
627 	ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos);
628 	if (ret)
629 		return ret;
630 
631 	ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos);
632 	if (ret)
633 		return ret;
634 
635 	return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos);
636 }
637 
638 static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
639 				  enum hnae3_dbg_cmd cmd, char *buf, int len)
640 {
641 	const struct hclge_dbg_reg_type_info *reg_info;
642 	int pos = 0, ret = 0;
643 	int i;
644 
645 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
646 		reg_info = &hclge_dbg_reg_info[i];
647 		if (cmd == reg_info->cmd) {
648 			if (cmd == HNAE3_DBG_CMD_REG_TQP)
649 				return hclge_dbg_dump_reg_tqp(hdev, reg_info,
650 							      buf, len, &pos);
651 
652 			ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf,
653 							len, &pos);
654 			if (ret)
655 				break;
656 		}
657 	}
658 
659 	return ret;
660 }
661 
662 static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
663 {
664 	struct hclge_ets_tc_weight_cmd *ets_weight;
665 	struct hclge_desc desc;
666 	char *sch_mode_str;
667 	int pos = 0;
668 	int ret;
669 	u8 i;
670 
671 	if (!hnae3_dev_dcb_supported(hdev)) {
672 		dev_err(&hdev->pdev->dev,
673 			"Only DCB-supported dev supports tc\n");
674 		return -EOPNOTSUPP;
675 	}
676 
677 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
678 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
679 	if (ret) {
680 		dev_err(&hdev->pdev->dev, "failed to get tc weight, ret = %d\n",
681 			ret);
682 		return ret;
683 	}
684 
685 	ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
686 
687 	pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n",
688 			 hdev->tm_info.num_tc);
689 	pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n",
690 			 ets_weight->weight_offset);
691 
692 	pos += scnprintf(buf + pos, len - pos, "TC    MODE  WEIGHT\n");
693 	for (i = 0; i < HNAE3_MAX_TC; i++) {
694 		sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
695 		pos += scnprintf(buf + pos, len - pos, "%u     %4s    %3u\n",
696 				 i, sch_mode_str, ets_weight->tc_weight[i]);
697 	}
698 
699 	return 0;
700 }
701 
702 static const struct hclge_dbg_item tm_pg_items[] = {
703 	{ "ID", 2 },
704 	{ "PRI_MAP", 2 },
705 	{ "MODE", 2 },
706 	{ "DWRR", 2 },
707 	{ "C_IR_B", 2 },
708 	{ "C_IR_U", 2 },
709 	{ "C_IR_S", 2 },
710 	{ "C_BS_B", 2 },
711 	{ "C_BS_S", 2 },
712 	{ "C_FLAG", 2 },
713 	{ "C_RATE(Mbps)", 2 },
714 	{ "P_IR_B", 2 },
715 	{ "P_IR_U", 2 },
716 	{ "P_IR_S", 2 },
717 	{ "P_BS_B", 2 },
718 	{ "P_BS_S", 2 },
719 	{ "P_FLAG", 2 },
720 	{ "P_RATE(Mbps)", 0 }
721 };
722 
723 static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
724 					  char **result, u8 *index)
725 {
726 	sprintf(result[(*index)++], "%3u", para->ir_b);
727 	sprintf(result[(*index)++], "%3u", para->ir_u);
728 	sprintf(result[(*index)++], "%3u", para->ir_s);
729 	sprintf(result[(*index)++], "%3u", para->bs_b);
730 	sprintf(result[(*index)++], "%3u", para->bs_s);
731 	sprintf(result[(*index)++], "%3u", para->flag);
732 	sprintf(result[(*index)++], "%6u", para->rate);
733 }
734 
735 static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
736 				  char *buf, int len)
737 {
738 	struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
739 	char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
740 	u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
741 	char content[HCLGE_DBG_TM_INFO_LEN];
742 	int pos = 0;
743 	int ret;
744 
745 	for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
746 		result[i] = data_str;
747 		data_str += HCLGE_DBG_DATA_STR_LEN;
748 	}
749 
750 	hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
751 			       NULL, ARRAY_SIZE(tm_pg_items));
752 	pos += scnprintf(buf + pos, len - pos, "%s", content);
753 
754 	for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
755 		ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
756 		if (ret)
757 			return ret;
758 
759 		ret = hclge_tm_get_pg_sch_mode(hdev, pg_id, &sch_mode);
760 		if (ret)
761 			return ret;
762 
763 		ret = hclge_tm_get_pg_weight(hdev, pg_id, &weight);
764 		if (ret)
765 			return ret;
766 
767 		ret = hclge_tm_get_pg_shaper(hdev, pg_id,
768 					     HCLGE_OPC_TM_PG_C_SHAPPING,
769 					     &c_shaper_para);
770 		if (ret)
771 			return ret;
772 
773 		ret = hclge_tm_get_pg_shaper(hdev, pg_id,
774 					     HCLGE_OPC_TM_PG_P_SHAPPING,
775 					     &p_shaper_para);
776 		if (ret)
777 			return ret;
778 
779 		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
780 				       "sp";
781 
782 		j = 0;
783 		sprintf(result[j++], "%02u", pg_id);
784 		sprintf(result[j++], "0x%02x", pri_bit_map);
785 		sprintf(result[j++], "%4s", sch_mode_str);
786 		sprintf(result[j++], "%3u", weight);
787 		hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
788 		hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
789 
790 		hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
791 				       (const char **)result,
792 				       ARRAY_SIZE(tm_pg_items));
793 		pos += scnprintf(buf + pos, len - pos, "%s", content);
794 	}
795 
796 	return 0;
797 }
798 
799 static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
800 {
801 	char *data_str;
802 	int ret;
803 
804 	data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
805 			   HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
806 	if (!data_str)
807 		return -ENOMEM;
808 
809 	ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
810 
811 	kfree(data_str);
812 
813 	return ret;
814 }
815 
816 static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev,  char *buf, int len)
817 {
818 	struct hclge_tm_shaper_para shaper_para;
819 	int pos = 0;
820 	int ret;
821 
822 	ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
823 	if (ret)
824 		return ret;
825 
826 	pos += scnprintf(buf + pos, len - pos,
827 			 "IR_B  IR_U  IR_S  BS_B  BS_S  FLAG  RATE(Mbps)\n");
828 	pos += scnprintf(buf + pos, len - pos,
829 			 "%3u   %3u   %3u   %3u   %3u     %1u   %6u\n",
830 			 shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
831 			 shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
832 			 shaper_para.rate);
833 
834 	return 0;
835 }
836 
837 static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
838 					 char *buf, int len)
839 {
840 	u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
841 	struct hclge_bp_to_qs_map_cmd *map;
842 	struct hclge_desc desc;
843 	int pos = 0;
844 	u8 group_id;
845 	u8 grp_num;
846 	u16 i = 0;
847 	int ret;
848 
849 	grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ?
850 		  HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM;
851 	map = (struct hclge_bp_to_qs_map_cmd *)desc.data;
852 	for (group_id = 0; group_id < grp_num; group_id++) {
853 		hclge_cmd_setup_basic_desc(&desc,
854 					   HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
855 					   true);
856 		map->tc_id = tc_id;
857 		map->qs_group_id = group_id;
858 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
859 		if (ret) {
860 			dev_err(&hdev->pdev->dev,
861 				"failed to get bp to qset map, ret = %d\n",
862 				ret);
863 			return ret;
864 		}
865 
866 		qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
867 	}
868 
869 	pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n");
870 	for (group_id = 0; group_id < grp_num / 8; group_id++) {
871 		pos += scnprintf(buf + pos, len - pos,
872 			 "%04d  | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
873 			 group_id * 256, qset_mapping[i + 7],
874 			 qset_mapping[i + 6], qset_mapping[i + 5],
875 			 qset_mapping[i + 4], qset_mapping[i + 3],
876 			 qset_mapping[i + 2], qset_mapping[i + 1],
877 			 qset_mapping[i]);
878 		i += 8;
879 	}
880 
881 	return pos;
882 }
883 
884 static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
885 {
886 	u16 queue_id;
887 	u16 qset_id;
888 	u8 link_vld;
889 	int pos = 0;
890 	u8 pri_id;
891 	u8 tc_id;
892 	int ret;
893 
894 	for (queue_id = 0; queue_id < hdev->num_tqps; queue_id++) {
895 		ret = hclge_tm_get_q_to_qs_map(hdev, queue_id, &qset_id);
896 		if (ret)
897 			return ret;
898 
899 		ret = hclge_tm_get_qset_map_pri(hdev, qset_id, &pri_id,
900 						&link_vld);
901 		if (ret)
902 			return ret;
903 
904 		ret = hclge_tm_get_q_to_tc(hdev, queue_id, &tc_id);
905 		if (ret)
906 			return ret;
907 
908 		pos += scnprintf(buf + pos, len - pos,
909 				 "QUEUE_ID   QSET_ID   PRI_ID   TC_ID\n");
910 		pos += scnprintf(buf + pos, len - pos,
911 				 "%04u        %4u       %3u      %2u\n",
912 				 queue_id, qset_id, pri_id, tc_id);
913 
914 		if (!hnae3_dev_dcb_supported(hdev))
915 			continue;
916 
917 		ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos,
918 						    len - pos);
919 		if (ret < 0)
920 			return ret;
921 		pos += ret;
922 
923 		pos += scnprintf(buf + pos, len - pos, "\n");
924 	}
925 
926 	return 0;
927 }
928 
929 static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
930 {
931 	struct hclge_tm_nodes_cmd *nodes;
932 	struct hclge_desc desc;
933 	int pos = 0;
934 	int ret;
935 
936 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
937 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
938 	if (ret) {
939 		dev_err(&hdev->pdev->dev,
940 			"failed to dump tm nodes, ret = %d\n", ret);
941 		return ret;
942 	}
943 
944 	nodes = (struct hclge_tm_nodes_cmd *)desc.data;
945 
946 	pos += scnprintf(buf + pos, len - pos, "       BASE_ID  MAX_NUM\n");
947 	pos += scnprintf(buf + pos, len - pos, "PG      %4u      %4u\n",
948 			 nodes->pg_base_id, nodes->pg_num);
949 	pos += scnprintf(buf + pos, len - pos, "PRI     %4u      %4u\n",
950 			 nodes->pri_base_id, nodes->pri_num);
951 	pos += scnprintf(buf + pos, len - pos, "QSET    %4u      %4u\n",
952 			 le16_to_cpu(nodes->qset_base_id),
953 			 le16_to_cpu(nodes->qset_num));
954 	pos += scnprintf(buf + pos, len - pos, "QUEUE   %4u      %4u\n",
955 			 le16_to_cpu(nodes->queue_base_id),
956 			 le16_to_cpu(nodes->queue_num));
957 
958 	return 0;
959 }
960 
961 static const struct hclge_dbg_item tm_pri_items[] = {
962 	{ "ID", 4 },
963 	{ "MODE", 2 },
964 	{ "DWRR", 2 },
965 	{ "C_IR_B", 2 },
966 	{ "C_IR_U", 2 },
967 	{ "C_IR_S", 2 },
968 	{ "C_BS_B", 2 },
969 	{ "C_BS_S", 2 },
970 	{ "C_FLAG", 2 },
971 	{ "C_RATE(Mbps)", 2 },
972 	{ "P_IR_B", 2 },
973 	{ "P_IR_U", 2 },
974 	{ "P_IR_S", 2 },
975 	{ "P_BS_B", 2 },
976 	{ "P_BS_S", 2 },
977 	{ "P_FLAG", 2 },
978 	{ "P_RATE(Mbps)", 0 }
979 };
980 
981 static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
982 {
983 	char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN];
984 	struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
985 	char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
986 	char content[HCLGE_DBG_TM_INFO_LEN];
987 	u8 pri_num, sch_mode, weight, i, j;
988 	int pos, ret;
989 
990 	ret = hclge_tm_get_pri_num(hdev, &pri_num);
991 	if (ret)
992 		return ret;
993 
994 	for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
995 		result[i] = &data_str[i][0];
996 
997 	hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
998 			       NULL, ARRAY_SIZE(tm_pri_items));
999 	pos = scnprintf(buf, len, "%s", content);
1000 
1001 	for (i = 0; i < pri_num; i++) {
1002 		ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
1003 		if (ret)
1004 			return ret;
1005 
1006 		ret = hclge_tm_get_pri_weight(hdev, i, &weight);
1007 		if (ret)
1008 			return ret;
1009 
1010 		ret = hclge_tm_get_pri_shaper(hdev, i,
1011 					      HCLGE_OPC_TM_PRI_C_SHAPPING,
1012 					      &c_shaper_para);
1013 		if (ret)
1014 			return ret;
1015 
1016 		ret = hclge_tm_get_pri_shaper(hdev, i,
1017 					      HCLGE_OPC_TM_PRI_P_SHAPPING,
1018 					      &p_shaper_para);
1019 		if (ret)
1020 			return ret;
1021 
1022 		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1023 			       "sp";
1024 
1025 		j = 0;
1026 		sprintf(result[j++], "%04u", i);
1027 		sprintf(result[j++], "%4s", sch_mode_str);
1028 		sprintf(result[j++], "%3u", weight);
1029 		hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
1030 		hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
1031 		hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
1032 				       (const char **)result,
1033 				       ARRAY_SIZE(tm_pri_items));
1034 		pos += scnprintf(buf + pos, len - pos, "%s", content);
1035 	}
1036 
1037 	return 0;
1038 }
1039 
1040 static const struct hclge_dbg_item tm_qset_items[] = {
1041 	{ "ID", 4 },
1042 	{ "MAP_PRI", 2 },
1043 	{ "LINK_VLD", 2 },
1044 	{ "MODE", 2 },
1045 	{ "DWRR", 2 },
1046 	{ "IR_B", 2 },
1047 	{ "IR_U", 2 },
1048 	{ "IR_S", 2 },
1049 	{ "BS_B", 2 },
1050 	{ "BS_S", 2 },
1051 	{ "FLAG", 2 },
1052 	{ "RATE(Mbps)", 0 }
1053 };
1054 
1055 static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
1056 {
1057 	char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN];
1058 	char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str;
1059 	u8 priority, link_vld, sch_mode, weight;
1060 	struct hclge_tm_shaper_para shaper_para;
1061 	char content[HCLGE_DBG_TM_INFO_LEN];
1062 	u16 qset_num, i;
1063 	int ret, pos;
1064 	u8 j;
1065 
1066 	ret = hclge_tm_get_qset_num(hdev, &qset_num);
1067 	if (ret)
1068 		return ret;
1069 
1070 	for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++)
1071 		result[i] = &data_str[i][0];
1072 
1073 	hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1074 			       NULL, ARRAY_SIZE(tm_qset_items));
1075 	pos = scnprintf(buf, len, "%s", content);
1076 
1077 	for (i = 0; i < qset_num; i++) {
1078 		ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
1079 		if (ret)
1080 			return ret;
1081 
1082 		ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode);
1083 		if (ret)
1084 			return ret;
1085 
1086 		ret = hclge_tm_get_qset_weight(hdev, i, &weight);
1087 		if (ret)
1088 			return ret;
1089 
1090 		ret = hclge_tm_get_qset_shaper(hdev, i, &shaper_para);
1091 		if (ret)
1092 			return ret;
1093 
1094 		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1095 			       "sp";
1096 
1097 		j = 0;
1098 		sprintf(result[j++], "%04u", i);
1099 		sprintf(result[j++], "%4u", priority);
1100 		sprintf(result[j++], "%4u", link_vld);
1101 		sprintf(result[j++], "%4s", sch_mode_str);
1102 		sprintf(result[j++], "%3u", weight);
1103 		hclge_dbg_fill_shaper_content(&shaper_para, result, &j);
1104 
1105 		hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1106 				       (const char **)result,
1107 				       ARRAY_SIZE(tm_qset_items));
1108 		pos += scnprintf(buf + pos, len - pos, "%s", content);
1109 	}
1110 
1111 	return 0;
1112 }
1113 
1114 static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
1115 					int len)
1116 {
1117 	struct hclge_cfg_pause_param_cmd *pause_param;
1118 	struct hclge_desc desc;
1119 	int pos = 0;
1120 	int ret;
1121 
1122 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
1123 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1124 	if (ret) {
1125 		dev_err(&hdev->pdev->dev,
1126 			"failed to dump qos pause, ret = %d\n", ret);
1127 		return ret;
1128 	}
1129 
1130 	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
1131 
1132 	pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n",
1133 			 pause_param->pause_trans_gap);
1134 	pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n",
1135 			 le16_to_cpu(pause_param->pause_trans_time));
1136 	return 0;
1137 }
1138 
1139 #define HCLGE_DBG_TC_MASK		0x0F
1140 
1141 static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
1142 				      int len)
1143 {
1144 #define HCLGE_DBG_TC_BIT_WIDTH		4
1145 
1146 	struct hclge_qos_pri_map_cmd *pri_map;
1147 	struct hclge_desc desc;
1148 	int pos = 0;
1149 	u8 *pri_tc;
1150 	u8 tc, i;
1151 	int ret;
1152 
1153 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
1154 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1155 	if (ret) {
1156 		dev_err(&hdev->pdev->dev,
1157 			"failed to dump qos pri map, ret = %d\n", ret);
1158 		return ret;
1159 	}
1160 
1161 	pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
1162 
1163 	pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n",
1164 			 pri_map->vlan_pri);
1165 	pos += scnprintf(buf + pos, len - pos, "PRI  TC\n");
1166 
1167 	pri_tc = (u8 *)pri_map;
1168 	for (i = 0; i < HNAE3_MAX_TC; i++) {
1169 		tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
1170 		tc &= HCLGE_DBG_TC_MASK;
1171 		pos += scnprintf(buf + pos, len - pos, "%u     %u\n", i, tc);
1172 	}
1173 
1174 	return 0;
1175 }
1176 
1177 static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
1178 				       int len)
1179 {
1180 	struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo;
1181 	struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
1182 	u8 *req0 = (u8 *)desc[0].data;
1183 	u8 *req1 = (u8 *)desc[1].data;
1184 	u8 dscp_tc[HNAE3_MAX_DSCP];
1185 	int pos, ret;
1186 	u8 i, j;
1187 
1188 	pos = scnprintf(buf, len, "tc map mode: %s\n",
1189 			tc_map_mode_str[kinfo->tc_map_mode]);
1190 
1191 	if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
1192 		return 0;
1193 
1194 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, true);
1195 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1196 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, true);
1197 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
1198 	if (ret) {
1199 		dev_err(&hdev->pdev->dev,
1200 			"failed to dump qos dscp map, ret = %d\n", ret);
1201 		return ret;
1202 	}
1203 
1204 	pos += scnprintf(buf + pos, len - pos, "\nDSCP  PRIO  TC\n");
1205 
1206 	/* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
1207 	for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
1208 		j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
1209 		/* Each dscp setting has 4 bits, so each byte saves two dscp
1210 		 * setting
1211 		 */
1212 		dscp_tc[i] = req0[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
1213 		dscp_tc[j] = req1[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
1214 		dscp_tc[i] &= HCLGE_DBG_TC_MASK;
1215 		dscp_tc[j] &= HCLGE_DBG_TC_MASK;
1216 	}
1217 
1218 	for (i = 0; i < HNAE3_MAX_DSCP; i++) {
1219 		if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID)
1220 			continue;
1221 
1222 		pos += scnprintf(buf + pos, len - pos, " %2u    %u    %u\n",
1223 				 i, kinfo->dscp_prio[i], dscp_tc[i]);
1224 	}
1225 
1226 	return 0;
1227 }
1228 
1229 static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
1230 {
1231 	struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
1232 	struct hclge_desc desc;
1233 	int pos = 0;
1234 	int i, ret;
1235 
1236 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
1237 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1238 	if (ret) {
1239 		dev_err(&hdev->pdev->dev,
1240 			"failed to dump tx buf, ret = %d\n", ret);
1241 		return ret;
1242 	}
1243 
1244 	tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1245 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1246 		pos += scnprintf(buf + pos, len - pos,
1247 				 "tx_packet_buf_tc_%d: 0x%x\n", i,
1248 				 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
1249 
1250 	return pos;
1251 }
1252 
1253 static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
1254 					  int len)
1255 {
1256 	struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
1257 	struct hclge_desc desc;
1258 	int pos = 0;
1259 	int i, ret;
1260 
1261 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
1262 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1263 	if (ret) {
1264 		dev_err(&hdev->pdev->dev,
1265 			"failed to dump rx priv buf, ret = %d\n", ret);
1266 		return ret;
1267 	}
1268 
1269 	pos += scnprintf(buf + pos, len - pos, "\n");
1270 
1271 	rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
1272 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1273 		pos += scnprintf(buf + pos, len - pos,
1274 				 "rx_packet_buf_tc_%d: 0x%x\n", i,
1275 				 le16_to_cpu(rx_buf_cmd->buf_num[i]));
1276 
1277 	pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n",
1278 			 le16_to_cpu(rx_buf_cmd->shared_buf));
1279 
1280 	return pos;
1281 }
1282 
1283 static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
1284 					   int len)
1285 {
1286 	struct hclge_rx_com_wl *rx_com_wl;
1287 	struct hclge_desc desc;
1288 	int pos = 0;
1289 	int ret;
1290 
1291 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
1292 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1293 	if (ret) {
1294 		dev_err(&hdev->pdev->dev,
1295 			"failed to dump rx common wl, ret = %d\n", ret);
1296 		return ret;
1297 	}
1298 
1299 	rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
1300 	pos += scnprintf(buf + pos, len - pos, "\n");
1301 	pos += scnprintf(buf + pos, len - pos,
1302 			 "rx_com_wl: high: 0x%x, low: 0x%x\n",
1303 			 le16_to_cpu(rx_com_wl->com_wl.high),
1304 			 le16_to_cpu(rx_com_wl->com_wl.low));
1305 
1306 	return pos;
1307 }
1308 
1309 static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
1310 					    int len)
1311 {
1312 	struct hclge_rx_com_wl *rx_packet_cnt;
1313 	struct hclge_desc desc;
1314 	int pos = 0;
1315 	int ret;
1316 
1317 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
1318 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1319 	if (ret) {
1320 		dev_err(&hdev->pdev->dev,
1321 			"failed to dump rx global pkt cnt, ret = %d\n", ret);
1322 		return ret;
1323 	}
1324 
1325 	rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
1326 	pos += scnprintf(buf + pos, len - pos,
1327 			 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
1328 			 le16_to_cpu(rx_packet_cnt->com_wl.high),
1329 			 le16_to_cpu(rx_packet_cnt->com_wl.low));
1330 
1331 	return pos;
1332 }
1333 
1334 static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
1335 					     int len)
1336 {
1337 	struct hclge_rx_priv_wl_buf *rx_priv_wl;
1338 	struct hclge_desc desc[2];
1339 	int pos = 0;
1340 	int i, ret;
1341 
1342 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1343 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1344 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1345 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1346 	if (ret) {
1347 		dev_err(&hdev->pdev->dev,
1348 			"failed to dump rx priv wl buf, ret = %d\n", ret);
1349 		return ret;
1350 	}
1351 
1352 	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
1353 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1354 		pos += scnprintf(buf + pos, len - pos,
1355 			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
1356 			 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1357 			 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1358 
1359 	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
1360 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1361 		pos += scnprintf(buf + pos, len - pos,
1362 			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
1363 			 i + HCLGE_TC_NUM_ONE_DESC,
1364 			 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1365 			 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1366 
1367 	return pos;
1368 }
1369 
1370 static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
1371 						  char *buf, int len)
1372 {
1373 	struct hclge_rx_com_thrd *rx_com_thrd;
1374 	struct hclge_desc desc[2];
1375 	int pos = 0;
1376 	int i, ret;
1377 
1378 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1379 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1380 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1381 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1382 	if (ret) {
1383 		dev_err(&hdev->pdev->dev,
1384 			"failed to dump rx common threshold, ret = %d\n", ret);
1385 		return ret;
1386 	}
1387 
1388 	pos += scnprintf(buf + pos, len - pos, "\n");
1389 	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
1390 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1391 		pos += scnprintf(buf + pos, len - pos,
1392 			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
1393 			 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1394 			 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1395 
1396 	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
1397 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1398 		pos += scnprintf(buf + pos, len - pos,
1399 			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
1400 			 i + HCLGE_TC_NUM_ONE_DESC,
1401 			 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1402 			 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1403 
1404 	return pos;
1405 }
1406 
1407 static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf,
1408 				      int len)
1409 {
1410 	int pos = 0;
1411 	int ret;
1412 
1413 	ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos);
1414 	if (ret < 0)
1415 		return ret;
1416 	pos += ret;
1417 
1418 	ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos);
1419 	if (ret < 0)
1420 		return ret;
1421 	pos += ret;
1422 
1423 	ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos);
1424 	if (ret < 0)
1425 		return ret;
1426 	pos += ret;
1427 
1428 	ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos);
1429 	if (ret < 0)
1430 		return ret;
1431 	pos += ret;
1432 
1433 	pos += scnprintf(buf + pos, len - pos, "\n");
1434 	if (!hnae3_dev_dcb_supported(hdev))
1435 		return 0;
1436 
1437 	ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos);
1438 	if (ret < 0)
1439 		return ret;
1440 	pos += ret;
1441 
1442 	ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos,
1443 						     len - pos);
1444 	if (ret < 0)
1445 		return ret;
1446 
1447 	return 0;
1448 }
1449 
1450 static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
1451 {
1452 	struct hclge_mac_ethertype_idx_rd_cmd *req0;
1453 	struct hclge_desc desc;
1454 	u32 msg_egress_port;
1455 	int pos = 0;
1456 	int ret, i;
1457 
1458 	pos += scnprintf(buf + pos, len - pos,
1459 			 "entry  mac_addr          mask  ether  ");
1460 	pos += scnprintf(buf + pos, len - pos,
1461 			 "mask  vlan  mask  i_map  i_dir  e_type  ");
1462 	pos += scnprintf(buf + pos, len - pos, "pf_id  vf_id  q_id  drop\n");
1463 
1464 	for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
1465 		hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
1466 					   true);
1467 		req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
1468 		req0->index = cpu_to_le16(i);
1469 
1470 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1471 		if (ret) {
1472 			dev_err(&hdev->pdev->dev,
1473 				"failed to dump manage table, ret = %d\n", ret);
1474 			return ret;
1475 		}
1476 
1477 		if (!req0->resp_code)
1478 			continue;
1479 
1480 		pos += scnprintf(buf + pos, len - pos, "%02u     %pM ",
1481 				 le16_to_cpu(req0->index), req0->mac_addr);
1482 
1483 		pos += scnprintf(buf + pos, len - pos,
1484 				 "%x     %04x   %x     %04x  ",
1485 				 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
1486 				 le16_to_cpu(req0->ethter_type),
1487 				 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
1488 				 le16_to_cpu(req0->vlan_tag) &
1489 				 HCLGE_DBG_MNG_VLAN_TAG);
1490 
1491 		pos += scnprintf(buf + pos, len - pos,
1492 				 "%x     %02x     %02x     ",
1493 				 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
1494 				 req0->i_port_bitmap, req0->i_port_direction);
1495 
1496 		msg_egress_port = le16_to_cpu(req0->egress_port);
1497 		pos += scnprintf(buf + pos, len - pos,
1498 				 "%x       %x      %02x     %04x  %x\n",
1499 				 !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
1500 				 msg_egress_port & HCLGE_DBG_MNG_PF_ID,
1501 				 (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
1502 				 le16_to_cpu(req0->egress_queue),
1503 				 !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
1504 	}
1505 
1506 	return 0;
1507 }
1508 
1509 #define HCLGE_DBG_TCAM_BUF_SIZE 256
1510 
1511 static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
1512 				  char *tcam_buf,
1513 				  struct hclge_dbg_tcam_msg tcam_msg)
1514 {
1515 	struct hclge_fd_tcam_config_1_cmd *req1;
1516 	struct hclge_fd_tcam_config_2_cmd *req2;
1517 	struct hclge_fd_tcam_config_3_cmd *req3;
1518 	struct hclge_desc desc[3];
1519 	int pos = 0;
1520 	int ret, i;
1521 	u32 *req;
1522 
1523 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
1524 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1525 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
1526 	desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1527 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
1528 
1529 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
1530 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
1531 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
1532 
1533 	req1->stage  = tcam_msg.stage;
1534 	req1->xy_sel = sel_x ? 1 : 0;
1535 	req1->index  = cpu_to_le32(tcam_msg.loc);
1536 
1537 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
1538 	if (ret)
1539 		return ret;
1540 
1541 	pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1542 			 "read result tcam key %s(%u):\n", sel_x ? "x" : "y",
1543 			 tcam_msg.loc);
1544 
1545 	/* tcam_data0 ~ tcam_data1 */
1546 	req = (u32 *)req1->tcam_data;
1547 	for (i = 0; i < 2; i++)
1548 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1549 				 "%08x\n", *req++);
1550 
1551 	/* tcam_data2 ~ tcam_data7 */
1552 	req = (u32 *)req2->tcam_data;
1553 	for (i = 0; i < 6; i++)
1554 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1555 				 "%08x\n", *req++);
1556 
1557 	/* tcam_data8 ~ tcam_data12 */
1558 	req = (u32 *)req3->tcam_data;
1559 	for (i = 0; i < 5; i++)
1560 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1561 				 "%08x\n", *req++);
1562 
1563 	return ret;
1564 }
1565 
1566 static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
1567 {
1568 	struct hclge_fd_rule *rule;
1569 	struct hlist_node *node;
1570 	int cnt = 0;
1571 
1572 	spin_lock_bh(&hdev->fd_rule_lock);
1573 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
1574 		rule_locs[cnt] = rule->location;
1575 		cnt++;
1576 	}
1577 	spin_unlock_bh(&hdev->fd_rule_lock);
1578 
1579 	if (cnt != hdev->hclge_fd_rule_num || cnt == 0)
1580 		return -EINVAL;
1581 
1582 	return cnt;
1583 }
1584 
1585 static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
1586 {
1587 	u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
1588 	struct hclge_dbg_tcam_msg tcam_msg;
1589 	int i, ret, rule_cnt;
1590 	u16 *rule_locs;
1591 	char *tcam_buf;
1592 	int pos = 0;
1593 
1594 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
1595 		dev_err(&hdev->pdev->dev,
1596 			"Only FD-supported dev supports dump fd tcam\n");
1597 		return -EOPNOTSUPP;
1598 	}
1599 
1600 	if (!hdev->hclge_fd_rule_num || !rule_num)
1601 		return 0;
1602 
1603 	rule_locs = kcalloc(rule_num, sizeof(u16), GFP_KERNEL);
1604 	if (!rule_locs)
1605 		return -ENOMEM;
1606 
1607 	tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
1608 	if (!tcam_buf) {
1609 		kfree(rule_locs);
1610 		return -ENOMEM;
1611 	}
1612 
1613 	rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
1614 	if (rule_cnt < 0) {
1615 		ret = rule_cnt;
1616 		dev_err(&hdev->pdev->dev,
1617 			"failed to get rule number, ret = %d\n", ret);
1618 		goto out;
1619 	}
1620 
1621 	ret = 0;
1622 	for (i = 0; i < rule_cnt; i++) {
1623 		tcam_msg.stage = HCLGE_FD_STAGE_1;
1624 		tcam_msg.loc = rule_locs[i];
1625 
1626 		ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg);
1627 		if (ret) {
1628 			dev_err(&hdev->pdev->dev,
1629 				"failed to get fd tcam key x, ret = %d\n", ret);
1630 			goto out;
1631 		}
1632 
1633 		pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1634 
1635 		ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg);
1636 		if (ret) {
1637 			dev_err(&hdev->pdev->dev,
1638 				"failed to get fd tcam key y, ret = %d\n", ret);
1639 			goto out;
1640 		}
1641 
1642 		pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1643 	}
1644 
1645 out:
1646 	kfree(tcam_buf);
1647 	kfree(rule_locs);
1648 	return ret;
1649 }
1650 
1651 static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
1652 {
1653 	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
1654 	struct hclge_fd_ad_cnt_read_cmd *req;
1655 	char str_id[HCLGE_DBG_ID_LEN];
1656 	struct hclge_desc desc;
1657 	int pos = 0;
1658 	int ret;
1659 	u64 cnt;
1660 	u8 i;
1661 
1662 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
1663 		return -EOPNOTSUPP;
1664 
1665 	pos += scnprintf(buf + pos, len - pos,
1666 			 "func_id\thit_times\n");
1667 
1668 	for (i = 0; i < func_num; i++) {
1669 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
1670 		req = (struct hclge_fd_ad_cnt_read_cmd *)desc.data;
1671 		req->index = cpu_to_le16(i);
1672 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1673 		if (ret) {
1674 			dev_err(&hdev->pdev->dev, "failed to get fd counter, ret = %d\n",
1675 				ret);
1676 			return ret;
1677 		}
1678 		cnt = le64_to_cpu(req->cnt);
1679 		hclge_dbg_get_func_id_str(str_id, i);
1680 		pos += scnprintf(buf + pos, len - pos,
1681 				 "%s\t%llu\n", str_id, cnt);
1682 	}
1683 
1684 	return 0;
1685 }
1686 
1687 static const struct hclge_dbg_status_dfx_info hclge_dbg_rst_info[] = {
1688 	{HCLGE_MISC_VECTOR_REG_BASE, "vector0 interrupt enable status"},
1689 	{HCLGE_MISC_RESET_STS_REG,   "reset interrupt source"},
1690 	{HCLGE_MISC_VECTOR_INT_STS,  "reset interrupt status"},
1691 	{HCLGE_RAS_PF_OTHER_INT_STS_REG, "RAS interrupt status"},
1692 	{HCLGE_GLOBAL_RESET_REG,  "hardware reset status"},
1693 	{HCLGE_NIC_CSQ_DEPTH_REG, "handshake status"},
1694 	{HCLGE_FUN_RST_ING, "function reset status"}
1695 };
1696 
1697 int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
1698 {
1699 	u32 i, offset;
1700 	int pos = 0;
1701 
1702 	pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n",
1703 			 hdev->rst_stats.pf_rst_cnt);
1704 	pos += scnprintf(buf + pos, len - pos, "FLR reset count: %u\n",
1705 			 hdev->rst_stats.flr_rst_cnt);
1706 	pos += scnprintf(buf + pos, len - pos, "GLOBAL reset count: %u\n",
1707 			 hdev->rst_stats.global_rst_cnt);
1708 	pos += scnprintf(buf + pos, len - pos, "IMP reset count: %u\n",
1709 			 hdev->rst_stats.imp_rst_cnt);
1710 	pos += scnprintf(buf + pos, len - pos, "reset done count: %u\n",
1711 			 hdev->rst_stats.reset_done_cnt);
1712 	pos += scnprintf(buf + pos, len - pos, "HW reset done count: %u\n",
1713 			 hdev->rst_stats.hw_reset_done_cnt);
1714 	pos += scnprintf(buf + pos, len - pos, "reset count: %u\n",
1715 			 hdev->rst_stats.reset_cnt);
1716 	pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n",
1717 			 hdev->rst_stats.reset_fail_cnt);
1718 
1719 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
1720 		offset = hclge_dbg_rst_info[i].offset;
1721 		pos += scnprintf(buf + pos, len - pos, "%s: 0x%x\n",
1722 				 hclge_dbg_rst_info[i].message,
1723 				 hclge_read_dev(&hdev->hw, offset));
1724 	}
1725 
1726 	pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n",
1727 			 hdev->state);
1728 
1729 	return 0;
1730 }
1731 
1732 static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len)
1733 {
1734 	unsigned long rem_nsec;
1735 	int pos = 0;
1736 	u64 lc;
1737 
1738 	lc = local_clock();
1739 	rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);
1740 
1741 	pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n",
1742 			 (unsigned long)lc, rem_nsec / 1000);
1743 	pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n",
1744 			 jiffies_to_msecs(jiffies - hdev->last_serv_processed));
1745 	pos += scnprintf(buf + pos, len - pos,
1746 			 "last_service_task_processed: %lu(jiffies)\n",
1747 			 hdev->last_serv_processed);
1748 	pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n",
1749 			 hdev->serv_processed_cnt);
1750 
1751 	return 0;
1752 }
1753 
1754 static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len)
1755 {
1756 	int pos = 0;
1757 
1758 	pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n",
1759 			 hdev->num_nic_msi);
1760 	pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n",
1761 			 hdev->num_roce_msi);
1762 	pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n",
1763 			 hdev->num_msi_used);
1764 	pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n",
1765 			 hdev->num_msi_left);
1766 
1767 	return 0;
1768 }
1769 
1770 static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src,
1771 					  char *buf, int len, u32 bd_num)
1772 {
1773 #define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2
1774 
1775 	struct hclge_desc *desc_index = desc_src;
1776 	u32 offset = 0;
1777 	int pos = 0;
1778 	u32 i, j;
1779 
1780 	pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1781 
1782 	for (i = 0; i < bd_num; i++) {
1783 		j = 0;
1784 		while (j < HCLGE_DESC_DATA_LEN - 1) {
1785 			pos += scnprintf(buf + pos, len - pos, "0x%04x | ",
1786 					 offset);
1787 			pos += scnprintf(buf + pos, len - pos, "0x%08x  ",
1788 					 le32_to_cpu(desc_index->data[j++]));
1789 			pos += scnprintf(buf + pos, len - pos, "0x%08x\n",
1790 					 le32_to_cpu(desc_index->data[j++]));
1791 			offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
1792 		}
1793 		desc_index++;
1794 	}
1795 }
1796 
1797 static int
1798 hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
1799 {
1800 	struct hclge_get_imp_bd_cmd *req;
1801 	struct hclge_desc *desc_src;
1802 	struct hclge_desc desc;
1803 	u32 bd_num;
1804 	int ret;
1805 
1806 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_STATS_BD, true);
1807 
1808 	req = (struct hclge_get_imp_bd_cmd *)desc.data;
1809 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1810 	if (ret) {
1811 		dev_err(&hdev->pdev->dev,
1812 			"failed to get imp statistics bd number, ret = %d\n",
1813 			ret);
1814 		return ret;
1815 	}
1816 
1817 	bd_num = le32_to_cpu(req->bd_num);
1818 	if (!bd_num) {
1819 		dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
1820 		return -EINVAL;
1821 	}
1822 
1823 	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
1824 	if (!desc_src)
1825 		return -ENOMEM;
1826 
1827 	ret  = hclge_dbg_cmd_send(hdev, desc_src, 0, bd_num,
1828 				  HCLGE_OPC_IMP_STATS_INFO);
1829 	if (ret) {
1830 		kfree(desc_src);
1831 		dev_err(&hdev->pdev->dev,
1832 			"failed to get imp statistics, ret = %d\n", ret);
1833 		return ret;
1834 	}
1835 
1836 	hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num);
1837 
1838 	kfree(desc_src);
1839 
1840 	return 0;
1841 }
1842 
1843 #define HCLGE_CMD_NCL_CONFIG_BD_NUM	5
1844 #define HCLGE_MAX_NCL_CONFIG_LENGTH	16384
1845 
1846 static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
1847 					char *buf, int len, int *pos)
1848 {
1849 #define HCLGE_CMD_DATA_NUM		6
1850 
1851 	int offset = HCLGE_MAX_NCL_CONFIG_LENGTH - *index;
1852 	int i, j;
1853 
1854 	for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
1855 		for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
1856 			if (i == 0 && j == 0)
1857 				continue;
1858 
1859 			*pos += scnprintf(buf + *pos, len - *pos,
1860 					  "0x%04x | 0x%08x\n", offset,
1861 					  le32_to_cpu(desc[i].data[j]));
1862 
1863 			offset += sizeof(u32);
1864 			*index -= sizeof(u32);
1865 
1866 			if (*index <= 0)
1867 				return;
1868 		}
1869 	}
1870 }
1871 
1872 static int
1873 hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
1874 {
1875 #define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD	(20 + 24 * 4)
1876 
1877 	struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
1878 	int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
1879 	int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
1880 	int pos = 0;
1881 	u32 data0;
1882 	int ret;
1883 
1884 	pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1885 
1886 	while (index > 0) {
1887 		data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
1888 		if (index >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
1889 			data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
1890 		else
1891 			data0 |= (u32)index << 16;
1892 		ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
1893 					 HCLGE_OPC_QUERY_NCL_CONFIG);
1894 		if (ret)
1895 			return ret;
1896 
1897 		hclge_ncl_config_data_print(desc, &index, buf, len, &pos);
1898 	}
1899 
1900 	return 0;
1901 }
1902 
1903 static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
1904 {
1905 	struct phy_device *phydev = hdev->hw.mac.phydev;
1906 	struct hclge_config_mac_mode_cmd *req_app;
1907 	struct hclge_common_lb_cmd *req_common;
1908 	struct hclge_desc desc;
1909 	u8 loopback_en;
1910 	int pos = 0;
1911 	int ret;
1912 
1913 	req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
1914 	req_common = (struct hclge_common_lb_cmd *)desc.data;
1915 
1916 	pos += scnprintf(buf + pos, len - pos, "mac id: %u\n",
1917 			 hdev->hw.mac.mac_id);
1918 
1919 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
1920 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1921 	if (ret) {
1922 		dev_err(&hdev->pdev->dev,
1923 			"failed to dump app loopback status, ret = %d\n", ret);
1924 		return ret;
1925 	}
1926 
1927 	loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
1928 				    HCLGE_MAC_APP_LP_B);
1929 	pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
1930 			 state_str[loopback_en]);
1931 
1932 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
1933 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1934 	if (ret) {
1935 		dev_err(&hdev->pdev->dev,
1936 			"failed to dump common loopback status, ret = %d\n",
1937 			ret);
1938 		return ret;
1939 	}
1940 
1941 	loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
1942 	pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
1943 			 state_str[loopback_en]);
1944 
1945 	loopback_en = req_common->enable &
1946 			HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
1947 	pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
1948 			 state_str[loopback_en]);
1949 
1950 	if (phydev) {
1951 		loopback_en = phydev->loopback_enabled;
1952 		pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1953 				 state_str[loopback_en]);
1954 	} else if (hnae3_dev_phy_imp_supported(hdev)) {
1955 		loopback_en = req_common->enable &
1956 			      HCLGE_CMD_GE_PHY_INNER_LOOP_B;
1957 		pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1958 				 state_str[loopback_en]);
1959 	}
1960 
1961 	return 0;
1962 }
1963 
1964 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
1965  * @hdev: pointer to struct hclge_dev
1966  */
1967 static int
1968 hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len)
1969 {
1970 	struct hclge_mac_tnl_stats stats;
1971 	unsigned long rem_nsec;
1972 	int pos = 0;
1973 
1974 	pos += scnprintf(buf + pos, len - pos,
1975 			 "Recently generated mac tnl interruption:\n");
1976 
1977 	while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
1978 		rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
1979 
1980 		pos += scnprintf(buf + pos, len - pos,
1981 				 "[%07lu.%03lu] status = 0x%x\n",
1982 				 (unsigned long)stats.time, rem_nsec / 1000,
1983 				 stats.status);
1984 	}
1985 
1986 	return 0;
1987 }
1988 
1989 
1990 static const struct hclge_dbg_item mac_list_items[] = {
1991 	{ "FUNC_ID", 2 },
1992 	{ "MAC_ADDR", 12 },
1993 	{ "STATE", 2 },
1994 };
1995 
1996 static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
1997 				    bool is_unicast)
1998 {
1999 	char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN];
2000 	char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
2001 	char *result[ARRAY_SIZE(mac_list_items)];
2002 	struct hclge_mac_node *mac_node, *tmp;
2003 	struct hclge_vport *vport;
2004 	struct list_head *list;
2005 	u32 func_id;
2006 	int pos = 0;
2007 	int i;
2008 
2009 	for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
2010 		result[i] = &data_str[i][0];
2011 
2012 	pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n",
2013 			 is_unicast ? "UC" : "MC");
2014 	hclge_dbg_fill_content(content, sizeof(content), mac_list_items,
2015 			       NULL, ARRAY_SIZE(mac_list_items));
2016 	pos += scnprintf(buf + pos, len - pos, "%s", content);
2017 
2018 	for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
2019 		vport = &hdev->vport[func_id];
2020 		list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
2021 		spin_lock_bh(&vport->mac_list_lock);
2022 		list_for_each_entry_safe(mac_node, tmp, list, node) {
2023 			i = 0;
2024 			result[i++] = hclge_dbg_get_func_id_str(str_id,
2025 								func_id);
2026 			sprintf(result[i++], "%pM", mac_node->mac_addr);
2027 			sprintf(result[i++], "%5s",
2028 				hclge_mac_state_str[mac_node->state]);
2029 			hclge_dbg_fill_content(content, sizeof(content),
2030 					       mac_list_items,
2031 					       (const char **)result,
2032 					       ARRAY_SIZE(mac_list_items));
2033 			pos += scnprintf(buf + pos, len - pos, "%s", content);
2034 		}
2035 		spin_unlock_bh(&vport->mac_list_lock);
2036 	}
2037 }
2038 
2039 static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
2040 {
2041 	u8 func_num = pci_num_vf(hdev->pdev) + 1;
2042 	struct hclge_vport *vport;
2043 	int pos = 0;
2044 	u8 i;
2045 
2046 	pos += scnprintf(buf, len, "num_alloc_vport   : %u\n",
2047 			  hdev->num_alloc_vport);
2048 	pos += scnprintf(buf + pos, len - pos, "max_umv_size     : %u\n",
2049 			 hdev->max_umv_size);
2050 	pos += scnprintf(buf + pos, len - pos, "wanted_umv_size  : %u\n",
2051 			 hdev->wanted_umv_size);
2052 	pos += scnprintf(buf + pos, len - pos, "priv_umv_size    : %u\n",
2053 			 hdev->priv_umv_size);
2054 
2055 	mutex_lock(&hdev->vport_lock);
2056 	pos += scnprintf(buf + pos, len - pos, "share_umv_size   : %u\n",
2057 			 hdev->share_umv_size);
2058 	for (i = 0; i < func_num; i++) {
2059 		vport = &hdev->vport[i];
2060 		pos += scnprintf(buf + pos, len - pos,
2061 				 "vport(%u) used_umv_num : %u\n",
2062 				 i, vport->used_umv_num);
2063 	}
2064 	mutex_unlock(&hdev->vport_lock);
2065 
2066 	pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num  : %u\n",
2067 			 hdev->used_mc_mac_num);
2068 
2069 	return 0;
2070 }
2071 
2072 static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
2073 					 struct hclge_dbg_vlan_cfg *vlan_cfg)
2074 {
2075 	struct hclge_vport_vtag_rx_cfg_cmd *req;
2076 	struct hclge_desc desc;
2077 	u16 bmap_index;
2078 	u8 rx_cfg;
2079 	int ret;
2080 
2081 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true);
2082 
2083 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
2084 	req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2085 	bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2086 	req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2087 
2088 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2089 	if (ret) {
2090 		dev_err(&hdev->pdev->dev,
2091 			"failed to get vport%u rxvlan cfg, ret = %d\n",
2092 			vf_id, ret);
2093 		return ret;
2094 	}
2095 
2096 	rx_cfg = req->vport_vlan_cfg;
2097 	vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B);
2098 	vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B);
2099 	vlan_cfg->drop_tag1 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG1_EN_B);
2100 	vlan_cfg->drop_tag2 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG2_EN_B);
2101 	vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B);
2102 	vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B);
2103 
2104 	return 0;
2105 }
2106 
2107 static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
2108 					 struct hclge_dbg_vlan_cfg *vlan_cfg)
2109 {
2110 	struct hclge_vport_vtag_tx_cfg_cmd *req;
2111 	struct hclge_desc desc;
2112 	u16 bmap_index;
2113 	u8 tx_cfg;
2114 	int ret;
2115 
2116 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true);
2117 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
2118 	req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2119 	bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2120 	req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2121 
2122 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2123 	if (ret) {
2124 		dev_err(&hdev->pdev->dev,
2125 			"failed to get vport%u txvlan cfg, ret = %d\n",
2126 			vf_id, ret);
2127 		return ret;
2128 	}
2129 
2130 	tx_cfg = req->vport_vlan_cfg;
2131 	vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1);
2132 
2133 	vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B);
2134 	vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B);
2135 	vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B);
2136 	vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B);
2137 	vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B);
2138 	vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B);
2139 	vlan_cfg->shift_tag = hnae3_get_bit(tx_cfg, HCLGE_TAG_SHIFT_MODE_EN_B);
2140 
2141 	return 0;
2142 }
2143 
2144 static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev,
2145 					    u8 vlan_type, u8 vf_id,
2146 					    struct hclge_desc *desc)
2147 {
2148 	struct hclge_vlan_filter_ctrl_cmd *req;
2149 	int ret;
2150 
2151 	hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
2152 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data;
2153 	req->vlan_type = vlan_type;
2154 	req->vf_id = vf_id;
2155 
2156 	ret = hclge_cmd_send(&hdev->hw, desc, 1);
2157 	if (ret)
2158 		dev_err(&hdev->pdev->dev,
2159 			"failed to get vport%u vlan filter config, ret = %d.\n",
2160 			vf_id, ret);
2161 
2162 	return ret;
2163 }
2164 
2165 static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type,
2166 				       u8 vf_id, u8 *vlan_fe)
2167 {
2168 	struct hclge_vlan_filter_ctrl_cmd *req;
2169 	struct hclge_desc desc;
2170 	int ret;
2171 
2172 	ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc);
2173 	if (ret)
2174 		return ret;
2175 
2176 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
2177 	*vlan_fe = req->vlan_fe;
2178 
2179 	return 0;
2180 }
2181 
2182 static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
2183 						   u8 vf_id, u8 *bypass_en)
2184 {
2185 	struct hclge_port_vlan_filter_bypass_cmd *req;
2186 	struct hclge_desc desc;
2187 	int ret;
2188 
2189 	if (!test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
2190 		return 0;
2191 
2192 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, true);
2193 	req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
2194 	req->vf_id = vf_id;
2195 
2196 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2197 	if (ret) {
2198 		dev_err(&hdev->pdev->dev,
2199 			"failed to get vport%u port vlan filter bypass state, ret = %d.\n",
2200 			vf_id, ret);
2201 		return ret;
2202 	}
2203 
2204 	*bypass_en = hnae3_get_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B);
2205 
2206 	return 0;
2207 }
2208 
2209 static const struct hclge_dbg_item vlan_filter_items[] = {
2210 	{ "FUNC_ID", 2 },
2211 	{ "I_VF_VLAN_FILTER", 2 },
2212 	{ "E_VF_VLAN_FILTER", 2 },
2213 	{ "PORT_VLAN_FILTER_BYPASS", 0 }
2214 };
2215 
2216 static const struct hclge_dbg_item vlan_offload_items[] = {
2217 	{ "FUNC_ID", 2 },
2218 	{ "PVID", 4 },
2219 	{ "ACCEPT_TAG1", 2 },
2220 	{ "ACCEPT_TAG2", 2 },
2221 	{ "ACCEPT_UNTAG1", 2 },
2222 	{ "ACCEPT_UNTAG2", 2 },
2223 	{ "INSERT_TAG1", 2 },
2224 	{ "INSERT_TAG2", 2 },
2225 	{ "SHIFT_TAG", 2 },
2226 	{ "STRIP_TAG1", 2 },
2227 	{ "STRIP_TAG2", 2 },
2228 	{ "DROP_TAG1", 2 },
2229 	{ "DROP_TAG2", 2 },
2230 	{ "PRI_ONLY_TAG1", 2 },
2231 	{ "PRI_ONLY_TAG2", 0 }
2232 };
2233 
2234 static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
2235 					     int len, int *pos)
2236 {
2237 	char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
2238 	const char *result[ARRAY_SIZE(vlan_filter_items)];
2239 	u8 i, j, vlan_fe, bypass, ingress, egress;
2240 	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2241 	int ret;
2242 
2243 	ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
2244 					  &vlan_fe);
2245 	if (ret)
2246 		return ret;
2247 	ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2248 	egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2249 
2250 	*pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
2251 			  state_str[ingress]);
2252 	*pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
2253 			  state_str[egress]);
2254 
2255 	hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
2256 			       NULL, ARRAY_SIZE(vlan_filter_items));
2257 	*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2258 
2259 	for (i = 0; i < func_num; i++) {
2260 		ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
2261 						  &vlan_fe);
2262 		if (ret)
2263 			return ret;
2264 
2265 		ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2266 		egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2267 		ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
2268 		if (ret)
2269 			return ret;
2270 		j = 0;
2271 		result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2272 		result[j++] = state_str[ingress];
2273 		result[j++] = state_str[egress];
2274 		result[j++] =
2275 			test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
2276 				 hdev->ae_dev->caps) ? state_str[bypass] : "NA";
2277 		hclge_dbg_fill_content(content, sizeof(content),
2278 				       vlan_filter_items, result,
2279 				       ARRAY_SIZE(vlan_filter_items));
2280 		*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2281 	}
2282 	*pos += scnprintf(buf + *pos, len - *pos, "\n");
2283 
2284 	return 0;
2285 }
2286 
2287 static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
2288 					      int len, int *pos)
2289 {
2290 	char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
2291 	const char *result[ARRAY_SIZE(vlan_offload_items)];
2292 	char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
2293 	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2294 	struct hclge_dbg_vlan_cfg vlan_cfg;
2295 	int ret;
2296 	u8 i, j;
2297 
2298 	hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
2299 			       NULL, ARRAY_SIZE(vlan_offload_items));
2300 	*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2301 
2302 	for (i = 0; i < func_num; i++) {
2303 		ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
2304 		if (ret)
2305 			return ret;
2306 
2307 		ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg);
2308 		if (ret)
2309 			return ret;
2310 
2311 		sprintf(str_pvid, "%u", vlan_cfg.pvid);
2312 		j = 0;
2313 		result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2314 		result[j++] = str_pvid;
2315 		result[j++] = state_str[vlan_cfg.accept_tag1];
2316 		result[j++] = state_str[vlan_cfg.accept_tag2];
2317 		result[j++] = state_str[vlan_cfg.accept_untag1];
2318 		result[j++] = state_str[vlan_cfg.accept_untag2];
2319 		result[j++] = state_str[vlan_cfg.insert_tag1];
2320 		result[j++] = state_str[vlan_cfg.insert_tag2];
2321 		result[j++] = state_str[vlan_cfg.shift_tag];
2322 		result[j++] = state_str[vlan_cfg.strip_tag1];
2323 		result[j++] = state_str[vlan_cfg.strip_tag2];
2324 		result[j++] = state_str[vlan_cfg.drop_tag1];
2325 		result[j++] = state_str[vlan_cfg.drop_tag2];
2326 		result[j++] = state_str[vlan_cfg.pri_only1];
2327 		result[j++] = state_str[vlan_cfg.pri_only2];
2328 
2329 		hclge_dbg_fill_content(content, sizeof(content),
2330 				       vlan_offload_items, result,
2331 				       ARRAY_SIZE(vlan_offload_items));
2332 		*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2333 	}
2334 
2335 	return 0;
2336 }
2337 
2338 static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
2339 				      int len)
2340 {
2341 	int pos = 0;
2342 	int ret;
2343 
2344 	ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
2345 	if (ret)
2346 		return ret;
2347 
2348 	return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
2349 }
2350 
2351 static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
2352 {
2353 	struct hclge_ptp *ptp = hdev->ptp;
2354 	u32 sw_cfg = ptp->ptp_cfg;
2355 	unsigned int tx_start;
2356 	unsigned int last_rx;
2357 	int pos = 0;
2358 	u32 hw_cfg;
2359 	int ret;
2360 
2361 	pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
2362 			 ptp->info.name);
2363 	pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
2364 			 test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
2365 			 "yes" : "no");
2366 	pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
2367 			 test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
2368 			 "yes" : "no");
2369 	pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
2370 			 test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
2371 			 "yes" : "no");
2372 
2373 	last_rx = jiffies_to_msecs(ptp->last_rx);
2374 	pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
2375 			 last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
2376 	pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
2377 
2378 	tx_start = jiffies_to_msecs(ptp->tx_start);
2379 	pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
2380 			 tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
2381 	pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
2382 	pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
2383 			 ptp->tx_skipped);
2384 	pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
2385 			 ptp->tx_timeout);
2386 	pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
2387 			 ptp->last_tx_seqid);
2388 
2389 	ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
2390 	if (ret)
2391 		return ret;
2392 
2393 	pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
2394 			 sw_cfg, hw_cfg);
2395 
2396 	pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
2397 			 ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
2398 
2399 	return 0;
2400 }
2401 
2402 static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
2403 {
2404 	hclge_dbg_dump_mac_list(hdev, buf, len, true);
2405 
2406 	return 0;
2407 }
2408 
2409 static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
2410 {
2411 	hclge_dbg_dump_mac_list(hdev, buf, len, false);
2412 
2413 	return 0;
2414 }
2415 
2416 static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
2417 	{
2418 		.cmd = HNAE3_DBG_CMD_TM_NODES,
2419 		.dbg_dump = hclge_dbg_dump_tm_nodes,
2420 	},
2421 	{
2422 		.cmd = HNAE3_DBG_CMD_TM_PRI,
2423 		.dbg_dump = hclge_dbg_dump_tm_pri,
2424 	},
2425 	{
2426 		.cmd = HNAE3_DBG_CMD_TM_QSET,
2427 		.dbg_dump = hclge_dbg_dump_tm_qset,
2428 	},
2429 	{
2430 		.cmd = HNAE3_DBG_CMD_TM_MAP,
2431 		.dbg_dump = hclge_dbg_dump_tm_map,
2432 	},
2433 	{
2434 		.cmd = HNAE3_DBG_CMD_TM_PG,
2435 		.dbg_dump = hclge_dbg_dump_tm_pg,
2436 	},
2437 	{
2438 		.cmd = HNAE3_DBG_CMD_TM_PORT,
2439 		.dbg_dump = hclge_dbg_dump_tm_port,
2440 	},
2441 	{
2442 		.cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
2443 		.dbg_dump = hclge_dbg_dump_tc,
2444 	},
2445 	{
2446 		.cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
2447 		.dbg_dump = hclge_dbg_dump_qos_pause_cfg,
2448 	},
2449 	{
2450 		.cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
2451 		.dbg_dump = hclge_dbg_dump_qos_pri_map,
2452 	},
2453 	{
2454 		.cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
2455 		.dbg_dump = hclge_dbg_dump_qos_dscp_map,
2456 	},
2457 	{
2458 		.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
2459 		.dbg_dump = hclge_dbg_dump_qos_buf_cfg,
2460 	},
2461 	{
2462 		.cmd = HNAE3_DBG_CMD_MAC_UC,
2463 		.dbg_dump = hclge_dbg_dump_mac_uc,
2464 	},
2465 	{
2466 		.cmd = HNAE3_DBG_CMD_MAC_MC,
2467 		.dbg_dump = hclge_dbg_dump_mac_mc,
2468 	},
2469 	{
2470 		.cmd = HNAE3_DBG_CMD_MNG_TBL,
2471 		.dbg_dump = hclge_dbg_dump_mng_table,
2472 	},
2473 	{
2474 		.cmd = HNAE3_DBG_CMD_LOOPBACK,
2475 		.dbg_dump = hclge_dbg_dump_loopback,
2476 	},
2477 	{
2478 		.cmd = HNAE3_DBG_CMD_PTP_INFO,
2479 		.dbg_dump = hclge_dbg_dump_ptp_info,
2480 	},
2481 	{
2482 		.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
2483 		.dbg_dump = hclge_dbg_dump_interrupt,
2484 	},
2485 	{
2486 		.cmd = HNAE3_DBG_CMD_RESET_INFO,
2487 		.dbg_dump = hclge_dbg_dump_rst_info,
2488 	},
2489 	{
2490 		.cmd = HNAE3_DBG_CMD_IMP_INFO,
2491 		.dbg_dump = hclge_dbg_get_imp_stats_info,
2492 	},
2493 	{
2494 		.cmd = HNAE3_DBG_CMD_NCL_CONFIG,
2495 		.dbg_dump = hclge_dbg_dump_ncl_config,
2496 	},
2497 	{
2498 		.cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
2499 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2500 	},
2501 	{
2502 		.cmd = HNAE3_DBG_CMD_REG_SSU,
2503 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2504 	},
2505 	{
2506 		.cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
2507 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2508 	},
2509 	{
2510 		.cmd = HNAE3_DBG_CMD_REG_RPU,
2511 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2512 	},
2513 	{
2514 		.cmd = HNAE3_DBG_CMD_REG_NCSI,
2515 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2516 	},
2517 	{
2518 		.cmd = HNAE3_DBG_CMD_REG_RTC,
2519 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2520 	},
2521 	{
2522 		.cmd = HNAE3_DBG_CMD_REG_PPP,
2523 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2524 	},
2525 	{
2526 		.cmd = HNAE3_DBG_CMD_REG_RCB,
2527 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2528 	},
2529 	{
2530 		.cmd = HNAE3_DBG_CMD_REG_TQP,
2531 		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2532 	},
2533 	{
2534 		.cmd = HNAE3_DBG_CMD_REG_MAC,
2535 		.dbg_dump = hclge_dbg_dump_mac,
2536 	},
2537 	{
2538 		.cmd = HNAE3_DBG_CMD_REG_DCB,
2539 		.dbg_dump = hclge_dbg_dump_dcb,
2540 	},
2541 	{
2542 		.cmd = HNAE3_DBG_CMD_FD_TCAM,
2543 		.dbg_dump = hclge_dbg_dump_fd_tcam,
2544 	},
2545 	{
2546 		.cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
2547 		.dbg_dump = hclge_dbg_dump_mac_tnl_status,
2548 	},
2549 	{
2550 		.cmd = HNAE3_DBG_CMD_SERV_INFO,
2551 		.dbg_dump = hclge_dbg_dump_serv_info,
2552 	},
2553 	{
2554 		.cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
2555 		.dbg_dump = hclge_dbg_dump_vlan_config,
2556 	},
2557 	{
2558 		.cmd = HNAE3_DBG_CMD_FD_COUNTER,
2559 		.dbg_dump = hclge_dbg_dump_fd_counter,
2560 	},
2561 	{
2562 		.cmd = HNAE3_DBG_CMD_UMV_INFO,
2563 		.dbg_dump = hclge_dbg_dump_umv_info,
2564 	},
2565 };
2566 
2567 int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
2568 		       char *buf, int len)
2569 {
2570 	struct hclge_vport *vport = hclge_get_vport(handle);
2571 	const struct hclge_dbg_func *cmd_func;
2572 	struct hclge_dev *hdev = vport->back;
2573 	u32 i;
2574 
2575 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
2576 		if (cmd == hclge_dbg_cmd_func[i].cmd) {
2577 			cmd_func = &hclge_dbg_cmd_func[i];
2578 			if (cmd_func->dbg_dump)
2579 				return cmd_func->dbg_dump(hdev, buf, len);
2580 			else
2581 				return cmd_func->dbg_dump_reg(hdev, cmd, buf,
2582 							      len);
2583 		}
2584 	}
2585 
2586 	dev_err(&hdev->pdev->dev, "invalid command(%d)\n", cmd);
2587 	return -EINVAL;
2588 }
2589