1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018-2019 Hisilicon Limited. */
3 
4 #include <linux/device.h>
5 
6 #include "hclge_debugfs.h"
7 #include "hclge_main.h"
8 #include "hclge_tm.h"
9 #include "hnae3.h"
10 
11 static struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
12 	{ .reg_type = "bios common",
13 	  .dfx_msg = &hclge_dbg_bios_common_reg[0],
14 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
15 		       .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
16 		       .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
17 	{ .reg_type = "ssu",
18 	  .dfx_msg = &hclge_dbg_ssu_reg_0[0],
19 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
20 		       .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
21 		       .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
22 	{ .reg_type = "ssu",
23 	  .dfx_msg = &hclge_dbg_ssu_reg_1[0],
24 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
25 		       .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
26 		       .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
27 	{ .reg_type = "ssu",
28 	  .dfx_msg = &hclge_dbg_ssu_reg_2[0],
29 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
30 		       .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
31 		       .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
32 	{ .reg_type = "igu egu",
33 	  .dfx_msg = &hclge_dbg_igu_egu_reg[0],
34 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
35 		       .offset = HCLGE_DBG_DFX_IGU_OFFSET,
36 		       .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
37 	{ .reg_type = "rpu",
38 	  .dfx_msg = &hclge_dbg_rpu_reg_0[0],
39 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
40 		       .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
41 		       .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
42 	{ .reg_type = "rpu",
43 	  .dfx_msg = &hclge_dbg_rpu_reg_1[0],
44 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
45 		       .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
46 		       .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
47 	{ .reg_type = "ncsi",
48 	  .dfx_msg = &hclge_dbg_ncsi_reg[0],
49 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
50 		       .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
51 		       .cmd = HCLGE_OPC_DFX_NCSI_REG } },
52 	{ .reg_type = "rtc",
53 	  .dfx_msg = &hclge_dbg_rtc_reg[0],
54 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
55 		       .offset = HCLGE_DBG_DFX_RTC_OFFSET,
56 		       .cmd = HCLGE_OPC_DFX_RTC_REG } },
57 	{ .reg_type = "ppp",
58 	  .dfx_msg = &hclge_dbg_ppp_reg[0],
59 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
60 		       .offset = HCLGE_DBG_DFX_PPP_OFFSET,
61 		       .cmd = HCLGE_OPC_DFX_PPP_REG } },
62 	{ .reg_type = "rcb",
63 	  .dfx_msg = &hclge_dbg_rcb_reg[0],
64 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
65 		       .offset = HCLGE_DBG_DFX_RCB_OFFSET,
66 		       .cmd = HCLGE_OPC_DFX_RCB_REG } },
67 	{ .reg_type = "tqp",
68 	  .dfx_msg = &hclge_dbg_tqp_reg[0],
69 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
70 		       .offset = HCLGE_DBG_DFX_TQP_OFFSET,
71 		       .cmd = HCLGE_OPC_DFX_TQP_REG } },
72 };
73 
74 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
75 {
76 #define HCLGE_GET_DFX_REG_TYPE_CNT	4
77 
78 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
79 	int entries_per_desc;
80 	int index;
81 	int ret;
82 
83 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
84 	if (ret) {
85 		dev_err(&hdev->pdev->dev,
86 			"get dfx bdnum fail, ret = %d\n", ret);
87 		return ret;
88 	}
89 
90 	entries_per_desc = ARRAY_SIZE(desc[0].data);
91 	index = offset % entries_per_desc;
92 	return (int)desc[offset / entries_per_desc].data[index];
93 }
94 
95 static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
96 			      struct hclge_desc *desc_src,
97 			      int index, int bd_num,
98 			      enum hclge_opcode_type cmd)
99 {
100 	struct hclge_desc *desc = desc_src;
101 	int ret, i;
102 
103 	hclge_cmd_setup_basic_desc(desc, cmd, true);
104 	desc->data[0] = cpu_to_le32(index);
105 
106 	for (i = 1; i < bd_num; i++) {
107 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
108 		desc++;
109 		hclge_cmd_setup_basic_desc(desc, cmd, true);
110 	}
111 
112 	ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
113 	if (ret)
114 		dev_err(&hdev->pdev->dev,
115 			"cmd(0x%x) send fail, ret = %d\n", cmd, ret);
116 	return ret;
117 }
118 
119 static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
120 				      struct hclge_dbg_reg_type_info *reg_info,
121 				      const char *cmd_buf)
122 {
123 #define IDX_OFFSET	1
124 
125 	const char *s = &cmd_buf[strlen(reg_info->reg_type) + IDX_OFFSET];
126 	struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
127 	struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
128 	struct hclge_desc *desc_src;
129 	struct hclge_desc *desc;
130 	int entries_per_desc;
131 	int bd_num, buf_len;
132 	int index = 0;
133 	int min_num;
134 	int ret, i;
135 
136 	if (*s) {
137 		ret = kstrtouint(s, 0, &index);
138 		index = (ret != 0) ? 0 : index;
139 	}
140 
141 	bd_num = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset);
142 	if (bd_num <= 0) {
143 		dev_err(&hdev->pdev->dev, "get cmd(%d) bd num(%d) failed\n",
144 			reg_msg->offset, bd_num);
145 		return;
146 	}
147 
148 	buf_len	= sizeof(struct hclge_desc) * bd_num;
149 	desc_src = kzalloc(buf_len, GFP_KERNEL);
150 	if (!desc_src) {
151 		dev_err(&hdev->pdev->dev, "call kzalloc failed\n");
152 		return;
153 	}
154 
155 	desc = desc_src;
156 	ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
157 	if (ret) {
158 		kfree(desc_src);
159 		return;
160 	}
161 
162 	entries_per_desc = ARRAY_SIZE(desc->data);
163 	min_num = min_t(int, bd_num * entries_per_desc, reg_msg->msg_num);
164 
165 	desc = desc_src;
166 	for (i = 0; i < min_num; i++) {
167 		if (i > 0 && (i % entries_per_desc) == 0)
168 			desc++;
169 		if (dfx_message->flag)
170 			dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
171 				 dfx_message->message,
172 				 le32_to_cpu(desc->data[i % entries_per_desc]));
173 
174 		dfx_message++;
175 	}
176 
177 	kfree(desc_src);
178 }
179 
180 static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
181 {
182 	struct device *dev = &hdev->pdev->dev;
183 	struct hclge_dbg_bitmap_cmd *bitmap;
184 	int rq_id, pri_id, qset_id;
185 	int port_id, nq_id, pg_id;
186 	struct hclge_desc desc[2];
187 
188 	int cnt, ret;
189 
190 	cnt = sscanf(cmd_buf, "%i %i %i %i %i %i",
191 		     &port_id, &pri_id, &pg_id, &rq_id, &nq_id, &qset_id);
192 	if (cnt != 6) {
193 		dev_err(&hdev->pdev->dev,
194 			"dump dcb: bad command parameter, cnt=%d\n", cnt);
195 		return;
196 	}
197 
198 	ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1,
199 				 HCLGE_OPC_QSET_DFX_STS);
200 	if (ret)
201 		return;
202 
203 	bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
204 	dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0);
205 	dev_info(dev, "nic_qs_mask: 0x%x\n", bitmap->bit1);
206 	dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2);
207 	dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3);
208 
209 	ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, HCLGE_OPC_PRI_DFX_STS);
210 	if (ret)
211 		return;
212 
213 	bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
214 	dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0);
215 	dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1);
216 	dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2);
217 
218 	ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, HCLGE_OPC_PG_DFX_STS);
219 	if (ret)
220 		return;
221 
222 	bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
223 	dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0);
224 	dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1);
225 	dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2);
226 
227 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
228 				 HCLGE_OPC_PORT_DFX_STS);
229 	if (ret)
230 		return;
231 
232 	bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
233 	dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0);
234 	dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1);
235 
236 	ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_NQ_CNT);
237 	if (ret)
238 		return;
239 
240 	dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
241 
242 	ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT);
243 	if (ret)
244 		return;
245 
246 	dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
247 
248 	ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS);
249 	if (ret)
250 		return;
251 
252 	dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1]));
253 	dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2]));
254 	dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n",
255 		 le32_to_cpu(desc[0].data[3]));
256 	dev_info(dev, "tx_private_waterline: 0x%x\n",
257 		 le32_to_cpu(desc[0].data[4]));
258 	dev_info(dev, "tm_bypass_en: 0x%x\n", le32_to_cpu(desc[0].data[5]));
259 	dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0]));
260 	dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1]));
261 
262 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
263 				 HCLGE_OPC_TM_INTERNAL_CNT);
264 	if (ret)
265 		return;
266 
267 	dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1]));
268 	dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2]));
269 
270 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
271 				 HCLGE_OPC_TM_INTERNAL_STS_1);
272 	if (ret)
273 		return;
274 
275 	dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1]));
276 	dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2]));
277 	dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[3]));
278 	dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n",
279 		 le32_to_cpu(desc[0].data[4]));
280 	dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n",
281 		 le32_to_cpu(desc[0].data[5]));
282 }
283 
284 static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
285 {
286 	struct hclge_dbg_reg_type_info *reg_info;
287 	bool has_dump = false;
288 	int i;
289 
290 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
291 		reg_info = &hclge_dbg_reg_info[i];
292 		if (!strncmp(cmd_buf, reg_info->reg_type,
293 			     strlen(reg_info->reg_type))) {
294 			hclge_dbg_dump_reg_common(hdev, reg_info, cmd_buf);
295 			has_dump = true;
296 		}
297 	}
298 
299 	if (strncmp(cmd_buf, "dcb", 3) == 0) {
300 		hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]);
301 		has_dump = true;
302 	}
303 
304 	if (!has_dump) {
305 		dev_info(&hdev->pdev->dev, "unknown command\n");
306 		return;
307 	}
308 }
309 
310 static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index,
311 				  char *title_buf, char *true_buf,
312 				  char *false_buf)
313 {
314 	if (flag)
315 		dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
316 			 true_buf);
317 	else
318 		dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
319 			 false_buf);
320 }
321 
322 static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
323 {
324 	struct hclge_ets_tc_weight_cmd *ets_weight;
325 	struct hclge_desc desc;
326 	int i, ret;
327 
328 	if (!hnae3_dev_dcb_supported(hdev)) {
329 		dev_info(&hdev->pdev->dev,
330 			 "Only DCB-supported dev supports tc\n");
331 		return;
332 	}
333 
334 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
335 
336 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
337 	if (ret) {
338 		dev_err(&hdev->pdev->dev, "dump tc fail, ret = %d\n", ret);
339 		return;
340 	}
341 
342 	ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
343 
344 	dev_info(&hdev->pdev->dev, "dump tc\n");
345 	dev_info(&hdev->pdev->dev, "weight_offset: %u\n",
346 		 ets_weight->weight_offset);
347 
348 	for (i = 0; i < HNAE3_MAX_TC; i++)
349 		hclge_title_idx_print(hdev, ets_weight->tc_weight[i], i,
350 				      "tc", "no sp mode", "sp mode");
351 }
352 
353 static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
354 {
355 	struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
356 	struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
357 	struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd;
358 	enum hclge_opcode_type cmd;
359 	struct hclge_desc desc;
360 	int ret;
361 
362 	cmd = HCLGE_OPC_TM_PG_C_SHAPPING;
363 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
364 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
365 	if (ret)
366 		goto err_tm_pg_cmd_send;
367 
368 	pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
369 	dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
370 	dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n",
371 		 le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
372 
373 	cmd = HCLGE_OPC_TM_PG_P_SHAPPING;
374 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
375 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
376 	if (ret)
377 		goto err_tm_pg_cmd_send;
378 
379 	pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
380 	dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
381 	dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
382 		 le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
383 
384 	cmd = HCLGE_OPC_TM_PORT_SHAPPING;
385 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
386 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
387 	if (ret)
388 		goto err_tm_pg_cmd_send;
389 
390 	port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
391 	dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
392 		 le32_to_cpu(port_shap_cfg_cmd->port_shapping_para));
393 
394 	cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
395 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
396 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
397 	if (ret)
398 		goto err_tm_pg_cmd_send;
399 
400 	dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n",
401 		 le32_to_cpu(desc.data[0]));
402 
403 	cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG;
404 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
405 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
406 	if (ret)
407 		goto err_tm_pg_cmd_send;
408 
409 	dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n",
410 		 le32_to_cpu(desc.data[0]));
411 
412 	cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
413 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
414 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
415 	if (ret)
416 		goto err_tm_pg_cmd_send;
417 
418 	dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n",
419 		 le32_to_cpu(desc.data[0]));
420 
421 	if (!hnae3_dev_dcb_supported(hdev)) {
422 		dev_info(&hdev->pdev->dev,
423 			 "Only DCB-supported dev supports tm mapping\n");
424 		return;
425 	}
426 
427 	cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
428 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
429 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
430 	if (ret)
431 		goto err_tm_pg_cmd_send;
432 
433 	bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
434 	dev_info(&hdev->pdev->dev, "BP_TO_QSET tc_id: %u\n",
435 		 bp_to_qs_map_cmd->tc_id);
436 	dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
437 		 bp_to_qs_map_cmd->qs_group_id);
438 	dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
439 		 le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map));
440 	return;
441 
442 err_tm_pg_cmd_send:
443 	dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), ret = %d\n",
444 		cmd, ret);
445 }
446 
447 static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
448 {
449 	struct hclge_priority_weight_cmd *priority_weight;
450 	struct hclge_pg_to_pri_link_cmd *pg_to_pri_map;
451 	struct hclge_qs_to_pri_link_cmd *qs_to_pri_map;
452 	struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
453 	struct hclge_pri_shapping_cmd *shap_cfg_cmd;
454 	struct hclge_pg_weight_cmd *pg_weight;
455 	struct hclge_qs_weight_cmd *qs_weight;
456 	enum hclge_opcode_type cmd;
457 	struct hclge_desc desc;
458 	int ret;
459 
460 	cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK;
461 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
462 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
463 	if (ret)
464 		goto err_tm_cmd_send;
465 
466 	pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
467 	dev_info(&hdev->pdev->dev, "dump tm\n");
468 	dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n",
469 		 pg_to_pri_map->pg_id);
470 	dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n",
471 		 pg_to_pri_map->pri_bit_map);
472 
473 	cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
474 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
475 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
476 	if (ret)
477 		goto err_tm_cmd_send;
478 
479 	qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
480 	dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n",
481 		 le16_to_cpu(qs_to_pri_map->qs_id));
482 	dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n",
483 		 qs_to_pri_map->priority);
484 	dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n",
485 		 qs_to_pri_map->link_vld);
486 
487 	cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
488 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
489 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
490 	if (ret)
491 		goto err_tm_cmd_send;
492 
493 	nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
494 	dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n",
495 		 le16_to_cpu(nq_to_qs_map->nq_id));
496 	dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
497 		 le16_to_cpu(nq_to_qs_map->qset_id));
498 
499 	cmd = HCLGE_OPC_TM_PG_WEIGHT;
500 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
501 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
502 	if (ret)
503 		goto err_tm_cmd_send;
504 
505 	pg_weight = (struct hclge_pg_weight_cmd *)desc.data;
506 	dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id);
507 	dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr);
508 
509 	cmd = HCLGE_OPC_TM_QS_WEIGHT;
510 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
511 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
512 	if (ret)
513 		goto err_tm_cmd_send;
514 
515 	qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
516 	dev_info(&hdev->pdev->dev, "QS qs_id: %u\n",
517 		 le16_to_cpu(qs_weight->qs_id));
518 	dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr);
519 
520 	cmd = HCLGE_OPC_TM_PRI_WEIGHT;
521 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
522 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
523 	if (ret)
524 		goto err_tm_cmd_send;
525 
526 	priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
527 	dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id);
528 	dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr);
529 
530 	cmd = HCLGE_OPC_TM_PRI_C_SHAPPING;
531 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
532 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
533 	if (ret)
534 		goto err_tm_cmd_send;
535 
536 	shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
537 	dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
538 	dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
539 		 le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
540 
541 	cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
542 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
543 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
544 	if (ret)
545 		goto err_tm_cmd_send;
546 
547 	shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
548 	dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
549 	dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
550 		 le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
551 
552 	hclge_dbg_dump_tm_pg(hdev);
553 
554 	return;
555 
556 err_tm_cmd_send:
557 	dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), ret = %d\n",
558 		cmd, ret);
559 }
560 
561 static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
562 				  const char *cmd_buf)
563 {
564 	struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
565 	struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
566 	struct hclge_qs_to_pri_link_cmd *map;
567 	struct hclge_tqp_tx_queue_tc_cmd *tc;
568 	enum hclge_opcode_type cmd;
569 	struct hclge_desc desc;
570 	int queue_id, group_id;
571 	u32 qset_maping[32];
572 	int tc_id, qset_id;
573 	int pri_id, ret;
574 	u32 i;
575 
576 	ret = kstrtouint(cmd_buf, 0, &queue_id);
577 	queue_id = (ret != 0) ? 0 : queue_id;
578 
579 	cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
580 	nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
581 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
582 	nq_to_qs_map->nq_id = cpu_to_le16(queue_id);
583 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
584 	if (ret)
585 		goto err_tm_map_cmd_send;
586 	qset_id = nq_to_qs_map->qset_id & 0x3FF;
587 
588 	cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
589 	map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
590 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
591 	map->qs_id = cpu_to_le16(qset_id);
592 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
593 	if (ret)
594 		goto err_tm_map_cmd_send;
595 	pri_id = map->priority;
596 
597 	cmd = HCLGE_OPC_TQP_TX_QUEUE_TC;
598 	tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
599 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
600 	tc->queue_id = cpu_to_le16(queue_id);
601 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
602 	if (ret)
603 		goto err_tm_map_cmd_send;
604 	tc_id = tc->tc_id & 0x7;
605 
606 	dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n");
607 	dev_info(&hdev->pdev->dev, "%04d     | %04d    | %02d     | %02d\n",
608 		 queue_id, qset_id, pri_id, tc_id);
609 
610 	if (!hnae3_dev_dcb_supported(hdev)) {
611 		dev_info(&hdev->pdev->dev,
612 			 "Only DCB-supported dev supports tm mapping\n");
613 		return;
614 	}
615 
616 	cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
617 	bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
618 	for (group_id = 0; group_id < 32; group_id++) {
619 		hclge_cmd_setup_basic_desc(&desc, cmd, true);
620 		bp_to_qs_map_cmd->tc_id = tc_id;
621 		bp_to_qs_map_cmd->qs_group_id = group_id;
622 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
623 		if (ret)
624 			goto err_tm_map_cmd_send;
625 
626 		qset_maping[group_id] = bp_to_qs_map_cmd->qs_bit_map;
627 	}
628 
629 	dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n");
630 
631 	i = 0;
632 	for (group_id = 0; group_id < 4; group_id++) {
633 		dev_info(&hdev->pdev->dev,
634 			 "%04d  | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
635 			 group_id * 256, qset_maping[(u32)(i + 7)],
636 			 qset_maping[(u32)(i + 6)], qset_maping[(u32)(i + 5)],
637 			 qset_maping[(u32)(i + 4)], qset_maping[(u32)(i + 3)],
638 			 qset_maping[(u32)(i + 2)], qset_maping[(u32)(i + 1)],
639 			 qset_maping[i]);
640 		i += 8;
641 	}
642 
643 	return;
644 
645 err_tm_map_cmd_send:
646 	dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), ret = %d\n",
647 		cmd, ret);
648 }
649 
650 static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
651 {
652 	struct hclge_cfg_pause_param_cmd *pause_param;
653 	struct hclge_desc desc;
654 	int ret;
655 
656 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
657 
658 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
659 	if (ret) {
660 		dev_err(&hdev->pdev->dev, "dump checksum fail, ret = %d\n",
661 			ret);
662 		return;
663 	}
664 
665 	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
666 	dev_info(&hdev->pdev->dev, "dump qos pause cfg\n");
667 	dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n",
668 		 pause_param->pause_trans_gap);
669 	dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n",
670 		 le16_to_cpu(pause_param->pause_trans_time));
671 }
672 
673 static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
674 {
675 	struct hclge_qos_pri_map_cmd *pri_map;
676 	struct hclge_desc desc;
677 	int ret;
678 
679 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
680 
681 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
682 	if (ret) {
683 		dev_err(&hdev->pdev->dev,
684 			"dump qos pri map fail, ret = %d\n", ret);
685 		return;
686 	}
687 
688 	pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
689 	dev_info(&hdev->pdev->dev, "dump qos pri map\n");
690 	dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
691 	dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc);
692 	dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc);
693 	dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc);
694 	dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc);
695 	dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc);
696 	dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc);
697 	dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc);
698 	dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
699 }
700 
701 static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
702 {
703 	struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
704 	struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
705 	struct hclge_rx_priv_wl_buf *rx_priv_wl;
706 	struct hclge_rx_com_wl *rx_packet_cnt;
707 	struct hclge_rx_com_thrd *rx_com_thrd;
708 	struct hclge_rx_com_wl *rx_com_wl;
709 	enum hclge_opcode_type cmd;
710 	struct hclge_desc desc[2];
711 	int i, ret;
712 
713 	cmd = HCLGE_OPC_TX_BUFF_ALLOC;
714 	hclge_cmd_setup_basic_desc(desc, cmd, true);
715 	ret = hclge_cmd_send(&hdev->hw, desc, 1);
716 	if (ret)
717 		goto err_qos_cmd_send;
718 
719 	dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
720 
721 	tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
722 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
723 		dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
724 			 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
725 
726 	cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
727 	hclge_cmd_setup_basic_desc(desc, cmd, true);
728 	ret = hclge_cmd_send(&hdev->hw, desc, 1);
729 	if (ret)
730 		goto err_qos_cmd_send;
731 
732 	dev_info(&hdev->pdev->dev, "\n");
733 	rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
734 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
735 		dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
736 			 le16_to_cpu(rx_buf_cmd->buf_num[i]));
737 
738 	dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
739 		 le16_to_cpu(rx_buf_cmd->shared_buf));
740 
741 	cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
742 	hclge_cmd_setup_basic_desc(desc, cmd, true);
743 	ret = hclge_cmd_send(&hdev->hw, desc, 1);
744 	if (ret)
745 		goto err_qos_cmd_send;
746 
747 	rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
748 	dev_info(&hdev->pdev->dev, "\n");
749 	dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
750 		 le16_to_cpu(rx_com_wl->com_wl.high),
751 		 le16_to_cpu(rx_com_wl->com_wl.low));
752 
753 	cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
754 	hclge_cmd_setup_basic_desc(desc, cmd, true);
755 	ret = hclge_cmd_send(&hdev->hw, desc, 1);
756 	if (ret)
757 		goto err_qos_cmd_send;
758 
759 	rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
760 	dev_info(&hdev->pdev->dev,
761 		 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
762 		 le16_to_cpu(rx_packet_cnt->com_wl.high),
763 		 le16_to_cpu(rx_packet_cnt->com_wl.low));
764 	dev_info(&hdev->pdev->dev, "\n");
765 
766 	if (!hnae3_dev_dcb_supported(hdev)) {
767 		dev_info(&hdev->pdev->dev,
768 			 "Only DCB-supported dev supports rx priv wl\n");
769 		return;
770 	}
771 	cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
772 	hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
773 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
774 	hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
775 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
776 	if (ret)
777 		goto err_qos_cmd_send;
778 
779 	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
780 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
781 		dev_info(&hdev->pdev->dev,
782 			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
783 			 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
784 			 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
785 
786 	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
787 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
788 		dev_info(&hdev->pdev->dev,
789 			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
790 			 i + HCLGE_TC_NUM_ONE_DESC,
791 			 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
792 			 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
793 
794 	cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
795 	hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
796 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
797 	hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
798 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
799 	if (ret)
800 		goto err_qos_cmd_send;
801 
802 	dev_info(&hdev->pdev->dev, "\n");
803 	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
804 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
805 		dev_info(&hdev->pdev->dev,
806 			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
807 			 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
808 			 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
809 
810 	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
811 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
812 		dev_info(&hdev->pdev->dev,
813 			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
814 			 i + HCLGE_TC_NUM_ONE_DESC,
815 			 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
816 			 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
817 	return;
818 
819 err_qos_cmd_send:
820 	dev_err(&hdev->pdev->dev,
821 		"dump qos buf cfg fail(0x%x), ret = %d\n", cmd, ret);
822 }
823 
824 static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
825 {
826 	struct hclge_mac_ethertype_idx_rd_cmd *req0;
827 	char printf_buf[HCLGE_DBG_BUF_LEN];
828 	struct hclge_desc desc;
829 	int ret, i;
830 
831 	dev_info(&hdev->pdev->dev, "mng tab:\n");
832 	memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
833 	strncat(printf_buf,
834 		"entry|mac_addr         |mask|ether|mask|vlan|mask",
835 		HCLGE_DBG_BUF_LEN - 1);
836 	strncat(printf_buf + strlen(printf_buf),
837 		"|i_map|i_dir|e_type|pf_id|vf_id|q_id|drop\n",
838 		HCLGE_DBG_BUF_LEN - strlen(printf_buf) - 1);
839 
840 	dev_info(&hdev->pdev->dev, "%s", printf_buf);
841 
842 	for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
843 		hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
844 					   true);
845 		req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
846 		req0->index = cpu_to_le16(i);
847 
848 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
849 		if (ret) {
850 			dev_err(&hdev->pdev->dev,
851 				"call hclge_cmd_send fail, ret = %d\n", ret);
852 			return;
853 		}
854 
855 		if (!req0->resp_code)
856 			continue;
857 
858 		memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
859 		snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
860 			 "%02u   |%02x:%02x:%02x:%02x:%02x:%02x|",
861 			 le16_to_cpu(req0->index),
862 			 req0->mac_addr[0], req0->mac_addr[1],
863 			 req0->mac_addr[2], req0->mac_addr[3],
864 			 req0->mac_addr[4], req0->mac_addr[5]);
865 
866 		snprintf(printf_buf + strlen(printf_buf),
867 			 HCLGE_DBG_BUF_LEN - strlen(printf_buf),
868 			 "%x   |%04x |%x   |%04x|%x   |%02x   |%02x   |",
869 			 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
870 			 req0->ethter_type,
871 			 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
872 			 req0->vlan_tag & HCLGE_DBG_MNG_VLAN_TAG,
873 			 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
874 			 req0->i_port_bitmap, req0->i_port_direction);
875 
876 		snprintf(printf_buf + strlen(printf_buf),
877 			 HCLGE_DBG_BUF_LEN - strlen(printf_buf),
878 			 "%d     |%d    |%02d   |%04d|%x\n",
879 			 !!(req0->egress_port & HCLGE_DBG_MNG_E_TYPE_B),
880 			 req0->egress_port & HCLGE_DBG_MNG_PF_ID,
881 			 (req0->egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
882 			 req0->egress_queue,
883 			 !!(req0->egress_port & HCLGE_DBG_MNG_DROP_B));
884 
885 		dev_info(&hdev->pdev->dev, "%s", printf_buf);
886 	}
887 }
888 
889 static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
890 				   bool sel_x, u32 loc)
891 {
892 	struct hclge_fd_tcam_config_1_cmd *req1;
893 	struct hclge_fd_tcam_config_2_cmd *req2;
894 	struct hclge_fd_tcam_config_3_cmd *req3;
895 	struct hclge_desc desc[3];
896 	int ret, i;
897 	u32 *req;
898 
899 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
900 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
901 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
902 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
903 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
904 
905 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
906 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
907 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
908 
909 	req1->stage  = stage;
910 	req1->xy_sel = sel_x ? 1 : 0;
911 	req1->index  = cpu_to_le32(loc);
912 
913 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
914 	if (ret)
915 		return;
916 
917 	dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
918 		 sel_x ? "x" : "y", loc);
919 
920 	/* tcam_data0 ~ tcam_data1 */
921 	req = (u32 *)req1->tcam_data;
922 	for (i = 0; i < 2; i++)
923 		dev_info(&hdev->pdev->dev, "%08x\n", *req++);
924 
925 	/* tcam_data2 ~ tcam_data7 */
926 	req = (u32 *)req2->tcam_data;
927 	for (i = 0; i < 6; i++)
928 		dev_info(&hdev->pdev->dev, "%08x\n", *req++);
929 
930 	/* tcam_data8 ~ tcam_data12 */
931 	req = (u32 *)req3->tcam_data;
932 	for (i = 0; i < 5; i++)
933 		dev_info(&hdev->pdev->dev, "%08x\n", *req++);
934 }
935 
936 static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
937 {
938 	u32 i;
939 
940 	for (i = 0; i < hdev->fd_cfg.rule_num[0]; i++) {
941 		hclge_dbg_fd_tcam_read(hdev, 0, true, i);
942 		hclge_dbg_fd_tcam_read(hdev, 0, false, i);
943 	}
944 }
945 
946 void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
947 {
948 	dev_info(&hdev->pdev->dev, "PF reset count: %u\n",
949 		 hdev->rst_stats.pf_rst_cnt);
950 	dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
951 		 hdev->rst_stats.flr_rst_cnt);
952 	dev_info(&hdev->pdev->dev, "GLOBAL reset count: %u\n",
953 		 hdev->rst_stats.global_rst_cnt);
954 	dev_info(&hdev->pdev->dev, "IMP reset count: %u\n",
955 		 hdev->rst_stats.imp_rst_cnt);
956 	dev_info(&hdev->pdev->dev, "reset done count: %u\n",
957 		 hdev->rst_stats.reset_done_cnt);
958 	dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
959 		 hdev->rst_stats.hw_reset_done_cnt);
960 	dev_info(&hdev->pdev->dev, "reset count: %u\n",
961 		 hdev->rst_stats.reset_cnt);
962 	dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
963 		 hdev->rst_stats.reset_fail_cnt);
964 	dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
965 		 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE));
966 	dev_info(&hdev->pdev->dev, "reset interrupt source: 0x%x\n",
967 		 hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG));
968 	dev_info(&hdev->pdev->dev, "reset interrupt status: 0x%x\n",
969 		 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS));
970 	dev_info(&hdev->pdev->dev, "hardware reset status: 0x%x\n",
971 		 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
972 	dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
973 		 hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
974 	dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
975 		 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
976 	dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
977 }
978 
979 static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
980 {
981 	struct hclge_desc *desc_src, *desc_tmp;
982 	struct hclge_get_m7_bd_cmd *req;
983 	struct hclge_desc desc;
984 	u32 bd_num, buf_len;
985 	int ret, i;
986 
987 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_STATS_BD, true);
988 
989 	req = (struct hclge_get_m7_bd_cmd *)desc.data;
990 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
991 	if (ret) {
992 		dev_err(&hdev->pdev->dev,
993 			"get firmware statistics bd number failed, ret = %d\n",
994 			ret);
995 		return;
996 	}
997 
998 	bd_num = le32_to_cpu(req->bd_num);
999 
1000 	buf_len	 = sizeof(struct hclge_desc) * bd_num;
1001 	desc_src = kzalloc(buf_len, GFP_KERNEL);
1002 	if (!desc_src) {
1003 		dev_err(&hdev->pdev->dev,
1004 			"allocate desc for get_m7_stats failed\n");
1005 		return;
1006 	}
1007 
1008 	desc_tmp = desc_src;
1009 	ret  = hclge_dbg_cmd_send(hdev, desc_tmp, 0, bd_num,
1010 				  HCLGE_OPC_M7_STATS_INFO);
1011 	if (ret) {
1012 		kfree(desc_src);
1013 		dev_err(&hdev->pdev->dev,
1014 			"get firmware statistics failed, ret = %d\n", ret);
1015 		return;
1016 	}
1017 
1018 	for (i = 0; i < bd_num; i++) {
1019 		dev_info(&hdev->pdev->dev, "0x%08x  0x%08x  0x%08x\n",
1020 			 le32_to_cpu(desc_tmp->data[0]),
1021 			 le32_to_cpu(desc_tmp->data[1]),
1022 			 le32_to_cpu(desc_tmp->data[2]));
1023 		dev_info(&hdev->pdev->dev, "0x%08x  0x%08x  0x%08x\n",
1024 			 le32_to_cpu(desc_tmp->data[3]),
1025 			 le32_to_cpu(desc_tmp->data[4]),
1026 			 le32_to_cpu(desc_tmp->data[5]));
1027 
1028 		desc_tmp++;
1029 	}
1030 
1031 	kfree(desc_src);
1032 }
1033 
1034 #define HCLGE_CMD_NCL_CONFIG_BD_NUM	5
1035 
1036 static void hclge_ncl_config_data_print(struct hclge_dev *hdev,
1037 					struct hclge_desc *desc, int *offset,
1038 					int *length)
1039 {
1040 #define HCLGE_CMD_DATA_NUM		6
1041 
1042 	int i;
1043 	int j;
1044 
1045 	for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
1046 		for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
1047 			if (i == 0 && j == 0)
1048 				continue;
1049 
1050 			dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
1051 				 *offset,
1052 				 le32_to_cpu(desc[i].data[j]));
1053 			*offset += sizeof(u32);
1054 			*length -= sizeof(u32);
1055 			if (*length <= 0)
1056 				return;
1057 		}
1058 	}
1059 }
1060 
1061 /* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
1062  * @hdev: pointer to struct hclge_dev
1063  * @cmd_buf: string that contains offset and length
1064  */
1065 static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
1066 				      const char *cmd_buf)
1067 {
1068 #define HCLGE_MAX_NCL_CONFIG_OFFSET	4096
1069 #define HCLGE_MAX_NCL_CONFIG_LENGTH	(20 + 24 * 4)
1070 
1071 	struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
1072 	int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
1073 	int offset;
1074 	int length;
1075 	int data0;
1076 	int ret;
1077 
1078 	ret = sscanf(cmd_buf, "%x %x", &offset, &length);
1079 	if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
1080 	    length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
1081 		dev_err(&hdev->pdev->dev, "Invalid offset or length.\n");
1082 		return;
1083 	}
1084 	if (offset < 0 || length <= 0) {
1085 		dev_err(&hdev->pdev->dev, "Non-positive offset or length.\n");
1086 		return;
1087 	}
1088 
1089 	dev_info(&hdev->pdev->dev, "offset |    data\n");
1090 
1091 	while (length > 0) {
1092 		data0 = offset;
1093 		if (length >= HCLGE_MAX_NCL_CONFIG_LENGTH)
1094 			data0 |= HCLGE_MAX_NCL_CONFIG_LENGTH << 16;
1095 		else
1096 			data0 |= length << 16;
1097 		ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
1098 					 HCLGE_OPC_QUERY_NCL_CONFIG);
1099 		if (ret)
1100 			return;
1101 
1102 		hclge_ncl_config_data_print(hdev, desc, &offset, &length);
1103 	}
1104 }
1105 
1106 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
1107  * @hdev: pointer to struct hclge_dev
1108  */
1109 static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
1110 {
1111 #define HCLGE_BILLION_NANO_SECONDS 1000000000
1112 
1113 	struct hclge_mac_tnl_stats stats;
1114 	unsigned long rem_nsec;
1115 
1116 	dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n");
1117 
1118 	while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
1119 		rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
1120 		dev_info(&hdev->pdev->dev, "[%07lu.%03lu] status = 0x%x\n",
1121 			 (unsigned long)stats.time, rem_nsec / 1000,
1122 			 stats.status);
1123 	}
1124 }
1125 
1126 static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid)
1127 {
1128 	struct hclge_qs_shapping_cmd *shap_cfg_cmd;
1129 	u8 ir_u, ir_b, ir_s, bs_b, bs_s;
1130 	struct hclge_desc desc;
1131 	u32 shapping_para;
1132 	int ret;
1133 
1134 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
1135 
1136 	shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
1137 	shap_cfg_cmd->qs_id = cpu_to_le16(qsid);
1138 
1139 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1140 	if (ret) {
1141 		dev_err(&hdev->pdev->dev,
1142 			"qs%u failed to get tx_rate, ret=%d\n",
1143 			qsid, ret);
1144 		return;
1145 	}
1146 
1147 	shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
1148 	ir_b = hclge_tm_get_field(shapping_para, IR_B);
1149 	ir_u = hclge_tm_get_field(shapping_para, IR_U);
1150 	ir_s = hclge_tm_get_field(shapping_para, IR_S);
1151 	bs_b = hclge_tm_get_field(shapping_para, BS_B);
1152 	bs_s = hclge_tm_get_field(shapping_para, BS_S);
1153 
1154 	dev_info(&hdev->pdev->dev,
1155 		 "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u\n",
1156 		 qsid, ir_b, ir_u, ir_s, bs_b, bs_s);
1157 }
1158 
1159 static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev)
1160 {
1161 	struct hnae3_knic_private_info *kinfo;
1162 	struct hclge_vport *vport;
1163 	int vport_id, i;
1164 
1165 	for (vport_id = 0; vport_id <= pci_num_vf(hdev->pdev); vport_id++) {
1166 		vport = &hdev->vport[vport_id];
1167 		kinfo = &vport->nic.kinfo;
1168 
1169 		dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id);
1170 
1171 		for (i = 0; i < kinfo->num_tc; i++) {
1172 			u16 qsid = vport->qs_offset + i;
1173 
1174 			hclge_dbg_dump_qs_shaper_single(hdev, qsid);
1175 		}
1176 	}
1177 }
1178 
1179 static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
1180 				     const char *cmd_buf)
1181 {
1182 #define HCLGE_MAX_QSET_NUM 1024
1183 
1184 	u16 qsid;
1185 	int ret;
1186 
1187 	ret = kstrtou16(cmd_buf, 0, &qsid);
1188 	if (ret) {
1189 		hclge_dbg_dump_qs_shaper_all(hdev);
1190 		return;
1191 	}
1192 
1193 	if (qsid >= HCLGE_MAX_QSET_NUM) {
1194 		dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-1023]\n",
1195 			qsid);
1196 		return;
1197 	}
1198 
1199 	hclge_dbg_dump_qs_shaper_single(hdev, qsid);
1200 }
1201 
1202 int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
1203 {
1204 #define DUMP_REG	"dump reg"
1205 #define DUMP_TM_MAP	"dump tm map"
1206 
1207 	struct hclge_vport *vport = hclge_get_vport(handle);
1208 	struct hclge_dev *hdev = vport->back;
1209 
1210 	if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) {
1211 		hclge_dbg_fd_tcam(hdev);
1212 	} else if (strncmp(cmd_buf, "dump tc", 7) == 0) {
1213 		hclge_dbg_dump_tc(hdev);
1214 	} else if (strncmp(cmd_buf, DUMP_TM_MAP, strlen(DUMP_TM_MAP)) == 0) {
1215 		hclge_dbg_dump_tm_map(hdev, &cmd_buf[sizeof(DUMP_TM_MAP)]);
1216 	} else if (strncmp(cmd_buf, "dump tm", 7) == 0) {
1217 		hclge_dbg_dump_tm(hdev);
1218 	} else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) {
1219 		hclge_dbg_dump_qos_pause_cfg(hdev);
1220 	} else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) {
1221 		hclge_dbg_dump_qos_pri_map(hdev);
1222 	} else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) {
1223 		hclge_dbg_dump_qos_buf_cfg(hdev);
1224 	} else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) {
1225 		hclge_dbg_dump_mng_table(hdev);
1226 	} else if (strncmp(cmd_buf, DUMP_REG, strlen(DUMP_REG)) == 0) {
1227 		hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]);
1228 	} else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
1229 		hclge_dbg_dump_rst_info(hdev);
1230 	} else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) {
1231 		hclge_dbg_get_m7_stats_info(hdev);
1232 	} else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
1233 		hclge_dbg_dump_ncl_config(hdev,
1234 					  &cmd_buf[sizeof("dump ncl_config")]);
1235 	} else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
1236 		hclge_dbg_dump_mac_tnl_status(hdev);
1237 	} else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
1238 		hclge_dbg_dump_qs_shaper(hdev,
1239 					 &cmd_buf[sizeof("dump qs shaper")]);
1240 	} else {
1241 		dev_info(&hdev->pdev->dev, "unknown command\n");
1242 		return -EINVAL;
1243 	}
1244 
1245 	return 0;
1246 }
1247