1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018-2019 Hisilicon Limited. */
3
4 #include <linux/device.h>
5 #include <linux/sched/clock.h>
6
7 #include "hclge_debugfs.h"
8 #include "hclge_err.h"
9 #include "hclge_main.h"
10 #include "hclge_regs.h"
11 #include "hclge_tm.h"
12 #include "hnae3.h"
13
14 static const char * const state_str[] = { "off", "on" };
15 static const char * const hclge_mac_state_str[] = {
16 "TO_ADD", "TO_DEL", "ACTIVE"
17 };
18
19 static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" };
20
21 static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
22 { .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
23 .dfx_msg = &hclge_dbg_bios_common_reg[0],
24 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
25 .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
26 .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
27 { .cmd = HNAE3_DBG_CMD_REG_SSU,
28 .dfx_msg = &hclge_dbg_ssu_reg_0[0],
29 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
30 .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
31 .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
32 { .cmd = HNAE3_DBG_CMD_REG_SSU,
33 .dfx_msg = &hclge_dbg_ssu_reg_1[0],
34 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
35 .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
36 .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
37 { .cmd = HNAE3_DBG_CMD_REG_SSU,
38 .dfx_msg = &hclge_dbg_ssu_reg_2[0],
39 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
40 .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
41 .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
42 { .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
43 .dfx_msg = &hclge_dbg_igu_egu_reg[0],
44 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
45 .offset = HCLGE_DBG_DFX_IGU_OFFSET,
46 .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
47 { .cmd = HNAE3_DBG_CMD_REG_RPU,
48 .dfx_msg = &hclge_dbg_rpu_reg_0[0],
49 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
50 .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
51 .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
52 { .cmd = HNAE3_DBG_CMD_REG_RPU,
53 .dfx_msg = &hclge_dbg_rpu_reg_1[0],
54 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
55 .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
56 .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
57 { .cmd = HNAE3_DBG_CMD_REG_NCSI,
58 .dfx_msg = &hclge_dbg_ncsi_reg[0],
59 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
60 .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
61 .cmd = HCLGE_OPC_DFX_NCSI_REG } },
62 { .cmd = HNAE3_DBG_CMD_REG_RTC,
63 .dfx_msg = &hclge_dbg_rtc_reg[0],
64 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
65 .offset = HCLGE_DBG_DFX_RTC_OFFSET,
66 .cmd = HCLGE_OPC_DFX_RTC_REG } },
67 { .cmd = HNAE3_DBG_CMD_REG_PPP,
68 .dfx_msg = &hclge_dbg_ppp_reg[0],
69 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
70 .offset = HCLGE_DBG_DFX_PPP_OFFSET,
71 .cmd = HCLGE_OPC_DFX_PPP_REG } },
72 { .cmd = HNAE3_DBG_CMD_REG_RCB,
73 .dfx_msg = &hclge_dbg_rcb_reg[0],
74 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
75 .offset = HCLGE_DBG_DFX_RCB_OFFSET,
76 .cmd = HCLGE_OPC_DFX_RCB_REG } },
77 { .cmd = HNAE3_DBG_CMD_REG_TQP,
78 .dfx_msg = &hclge_dbg_tqp_reg[0],
79 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
80 .offset = HCLGE_DBG_DFX_TQP_OFFSET,
81 .cmd = HCLGE_OPC_DFX_TQP_REG } },
82 };
83
84 /* make sure: len(name) + interval >= maxlen(item data) + 2,
85 * for example, name = "pkt_num"(len: 7), the prototype of item data is u32,
86 * and print as "%u"(maxlen: 10), so the interval should be at least 5.
87 */
hclge_dbg_fill_content(char * content,u16 len,const struct hclge_dbg_item * items,const char ** result,u16 size)88 static void hclge_dbg_fill_content(char *content, u16 len,
89 const struct hclge_dbg_item *items,
90 const char **result, u16 size)
91 {
92 #define HCLGE_DBG_LINE_END_LEN 2
93 char *pos = content;
94 u16 item_len;
95 u16 i;
96
97 if (!len) {
98 return;
99 } else if (len <= HCLGE_DBG_LINE_END_LEN) {
100 *pos++ = '\0';
101 return;
102 }
103
104 memset(content, ' ', len);
105 len -= HCLGE_DBG_LINE_END_LEN;
106
107 for (i = 0; i < size; i++) {
108 item_len = strlen(items[i].name) + items[i].interval;
109 if (len < item_len)
110 break;
111
112 if (result) {
113 if (item_len < strlen(result[i]))
114 break;
115 memcpy(pos, result[i], strlen(result[i]));
116 } else {
117 memcpy(pos, items[i].name, strlen(items[i].name));
118 }
119 pos += item_len;
120 len -= item_len;
121 }
122 *pos++ = '\n';
123 *pos++ = '\0';
124 }
125
hclge_dbg_get_func_id_str(char * buf,u8 id)126 static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
127 {
128 if (id)
129 sprintf(buf, "vf%u", id - 1U);
130 else
131 sprintf(buf, "pf");
132
133 return buf;
134 }
135
hclge_dbg_get_dfx_bd_num(struct hclge_dev * hdev,int offset,u32 * bd_num)136 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset,
137 u32 *bd_num)
138 {
139 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
140 int entries_per_desc;
141 int index;
142 int ret;
143
144 ret = hclge_query_bd_num_cmd_send(hdev, desc);
145 if (ret) {
146 dev_err(&hdev->pdev->dev,
147 "failed to get dfx bd_num, offset = %d, ret = %d\n",
148 offset, ret);
149 return ret;
150 }
151
152 entries_per_desc = ARRAY_SIZE(desc[0].data);
153 index = offset % entries_per_desc;
154
155 *bd_num = le32_to_cpu(desc[offset / entries_per_desc].data[index]);
156 if (!(*bd_num)) {
157 dev_err(&hdev->pdev->dev, "The value of dfx bd_num is 0!\n");
158 return -EINVAL;
159 }
160
161 return 0;
162 }
163
hclge_dbg_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc_src,int index,int bd_num,enum hclge_opcode_type cmd)164 static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
165 struct hclge_desc *desc_src,
166 int index, int bd_num,
167 enum hclge_opcode_type cmd)
168 {
169 struct hclge_desc *desc = desc_src;
170 int ret, i;
171
172 hclge_cmd_setup_basic_desc(desc, cmd, true);
173 desc->data[0] = cpu_to_le32(index);
174
175 for (i = 1; i < bd_num; i++) {
176 desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
177 desc++;
178 hclge_cmd_setup_basic_desc(desc, cmd, true);
179 }
180
181 ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
182 if (ret)
183 dev_err(&hdev->pdev->dev,
184 "cmd(0x%x) send fail, ret = %d\n", cmd, ret);
185 return ret;
186 }
187
188 static int
hclge_dbg_dump_reg_tqp(struct hclge_dev * hdev,const struct hclge_dbg_reg_type_info * reg_info,char * buf,int len,int * pos)189 hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
190 const struct hclge_dbg_reg_type_info *reg_info,
191 char *buf, int len, int *pos)
192 {
193 const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
194 const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg;
195 struct hclge_desc *desc_src;
196 u32 index, entry, i, cnt;
197 int bd_num, min_num, ret;
198 struct hclge_desc *desc;
199
200 ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
201 if (ret)
202 return ret;
203
204 desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
205 if (!desc_src)
206 return -ENOMEM;
207
208 min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
209
210 for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
211 *pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n",
212 cnt++, dfx_message->message);
213
214 for (i = 0; i < cnt; i++)
215 *pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i);
216
217 *pos += scnprintf(buf + *pos, len - *pos, "\n");
218
219 for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
220 dfx_message = reg_info->dfx_msg;
221 desc = desc_src;
222 ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num,
223 reg_msg->cmd);
224 if (ret)
225 break;
226
227 for (i = 0; i < min_num; i++, dfx_message++) {
228 entry = i % HCLGE_DESC_DATA_LEN;
229 if (i > 0 && !entry)
230 desc++;
231
232 *pos += scnprintf(buf + *pos, len - *pos, "%#x\t",
233 le32_to_cpu(desc->data[entry]));
234 }
235 *pos += scnprintf(buf + *pos, len - *pos, "\n");
236 }
237
238 kfree(desc_src);
239 return ret;
240 }
241
242 static int
hclge_dbg_dump_reg_common(struct hclge_dev * hdev,const struct hclge_dbg_reg_type_info * reg_info,char * buf,int len,int * pos)243 hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
244 const struct hclge_dbg_reg_type_info *reg_info,
245 char *buf, int len, int *pos)
246 {
247 const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg;
248 const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
249 struct hclge_desc *desc_src;
250 int bd_num, min_num, ret;
251 struct hclge_desc *desc;
252 u32 entry, i;
253
254 ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
255 if (ret)
256 return ret;
257
258 desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
259 if (!desc_src)
260 return -ENOMEM;
261
262 desc = desc_src;
263
264 ret = hclge_dbg_cmd_send(hdev, desc, 0, bd_num, reg_msg->cmd);
265 if (ret) {
266 kfree(desc);
267 return ret;
268 }
269
270 min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
271
272 for (i = 0; i < min_num; i++, dfx_message++) {
273 entry = i % HCLGE_DESC_DATA_LEN;
274 if (i > 0 && !entry)
275 desc++;
276 if (!dfx_message->flag)
277 continue;
278
279 *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
280 dfx_message->message,
281 le32_to_cpu(desc->data[entry]));
282 }
283
284 kfree(desc_src);
285 return 0;
286 }
287
288 static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
289 {HCLGE_MAC_TX_EN_B, "mac_trans_en"},
290 {HCLGE_MAC_RX_EN_B, "mac_rcv_en"},
291 {HCLGE_MAC_PAD_TX_B, "pad_trans_en"},
292 {HCLGE_MAC_PAD_RX_B, "pad_rcv_en"},
293 {HCLGE_MAC_1588_TX_B, "1588_trans_en"},
294 {HCLGE_MAC_1588_RX_B, "1588_rcv_en"},
295 {HCLGE_MAC_APP_LP_B, "mac_app_loop_en"},
296 {HCLGE_MAC_LINE_LP_B, "mac_line_loop_en"},
297 {HCLGE_MAC_FCS_TX_B, "mac_fcs_tx_en"},
298 {HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, "mac_rx_oversize_truncate_en"},
299 {HCLGE_MAC_RX_FCS_STRIP_B, "mac_rx_fcs_strip_en"},
300 {HCLGE_MAC_RX_FCS_B, "mac_rx_fcs_en"},
301 {HCLGE_MAC_TX_UNDER_MIN_ERR_B, "mac_tx_under_min_err_en"},
302 {HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
303 };
304
hclge_dbg_dump_mac_enable_status(struct hclge_dev * hdev,char * buf,int len,int * pos)305 static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
306 int len, int *pos)
307 {
308 struct hclge_config_mac_mode_cmd *req;
309 struct hclge_desc desc;
310 u32 loop_en, i, offset;
311 int ret;
312
313 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
314
315 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
316 if (ret) {
317 dev_err(&hdev->pdev->dev,
318 "failed to dump mac enable status, ret = %d\n", ret);
319 return ret;
320 }
321
322 req = (struct hclge_config_mac_mode_cmd *)desc.data;
323 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
324
325 for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
326 offset = hclge_dbg_mac_en_status[i].offset;
327 *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
328 hclge_dbg_mac_en_status[i].message,
329 hnae3_get_bit(loop_en, offset));
330 }
331
332 return 0;
333 }
334
hclge_dbg_dump_mac_frame_size(struct hclge_dev * hdev,char * buf,int len,int * pos)335 static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
336 int len, int *pos)
337 {
338 struct hclge_config_max_frm_size_cmd *req;
339 struct hclge_desc desc;
340 int ret;
341
342 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
343
344 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
345 if (ret) {
346 dev_err(&hdev->pdev->dev,
347 "failed to dump mac frame size, ret = %d\n", ret);
348 return ret;
349 }
350
351 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
352
353 *pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n",
354 le16_to_cpu(req->max_frm_size));
355 *pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n",
356 req->min_frm_size);
357
358 return 0;
359 }
360
hclge_dbg_dump_mac_speed_duplex(struct hclge_dev * hdev,char * buf,int len,int * pos)361 static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
362 int len, int *pos)
363 {
364 #define HCLGE_MAC_SPEED_SHIFT 0
365 #define HCLGE_MAC_SPEED_MASK GENMASK(5, 0)
366 #define HCLGE_MAC_DUPLEX_SHIFT 7
367
368 struct hclge_config_mac_speed_dup_cmd *req;
369 struct hclge_desc desc;
370 int ret;
371
372 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
373
374 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
375 if (ret) {
376 dev_err(&hdev->pdev->dev,
377 "failed to dump mac speed duplex, ret = %d\n", ret);
378 return ret;
379 }
380
381 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
382
383 *pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n",
384 hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
385 HCLGE_MAC_SPEED_SHIFT));
386 *pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n",
387 hnae3_get_bit(req->speed_dup,
388 HCLGE_MAC_DUPLEX_SHIFT));
389 return 0;
390 }
391
hclge_dbg_dump_mac(struct hclge_dev * hdev,char * buf,int len)392 static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
393 {
394 int pos = 0;
395 int ret;
396
397 ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos);
398 if (ret)
399 return ret;
400
401 ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos);
402 if (ret)
403 return ret;
404
405 return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos);
406 }
407
hclge_dbg_dump_dcb_qset(struct hclge_dev * hdev,char * buf,int len,int * pos)408 static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
409 int *pos)
410 {
411 struct hclge_dbg_bitmap_cmd req;
412 struct hclge_desc desc;
413 u16 qset_id, qset_num;
414 int ret;
415
416 ret = hclge_tm_get_qset_num(hdev, &qset_num);
417 if (ret)
418 return ret;
419
420 *pos += scnprintf(buf + *pos, len - *pos,
421 "qset_id roce_qset_mask nic_qset_mask qset_shaping_pass qset_bp_status\n");
422 for (qset_id = 0; qset_id < qset_num; qset_id++) {
423 ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
424 HCLGE_OPC_QSET_DFX_STS);
425 if (ret)
426 return ret;
427
428 req.bitmap = (u8)le32_to_cpu(desc.data[1]);
429
430 *pos += scnprintf(buf + *pos, len - *pos,
431 "%04u %#x %#x %#x %#x\n",
432 qset_id, req.bit0, req.bit1, req.bit2,
433 req.bit3);
434 }
435
436 return 0;
437 }
438
hclge_dbg_dump_dcb_pri(struct hclge_dev * hdev,char * buf,int len,int * pos)439 static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
440 int *pos)
441 {
442 struct hclge_dbg_bitmap_cmd req;
443 struct hclge_desc desc;
444 u8 pri_id, pri_num;
445 int ret;
446
447 ret = hclge_tm_get_pri_num(hdev, &pri_num);
448 if (ret)
449 return ret;
450
451 *pos += scnprintf(buf + *pos, len - *pos,
452 "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n");
453 for (pri_id = 0; pri_id < pri_num; pri_id++) {
454 ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
455 HCLGE_OPC_PRI_DFX_STS);
456 if (ret)
457 return ret;
458
459 req.bitmap = (u8)le32_to_cpu(desc.data[1]);
460
461 *pos += scnprintf(buf + *pos, len - *pos,
462 "%03u %#x %#x %#x\n",
463 pri_id, req.bit0, req.bit1, req.bit2);
464 }
465
466 return 0;
467 }
468
hclge_dbg_dump_dcb_pg(struct hclge_dev * hdev,char * buf,int len,int * pos)469 static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
470 int *pos)
471 {
472 struct hclge_dbg_bitmap_cmd req;
473 struct hclge_desc desc;
474 u8 pg_id;
475 int ret;
476
477 *pos += scnprintf(buf + *pos, len - *pos,
478 "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n");
479 for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
480 ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
481 HCLGE_OPC_PG_DFX_STS);
482 if (ret)
483 return ret;
484
485 req.bitmap = (u8)le32_to_cpu(desc.data[1]);
486
487 *pos += scnprintf(buf + *pos, len - *pos,
488 "%03u %#x %#x %#x\n",
489 pg_id, req.bit0, req.bit1, req.bit2);
490 }
491
492 return 0;
493 }
494
hclge_dbg_dump_dcb_queue(struct hclge_dev * hdev,char * buf,int len,int * pos)495 static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
496 int *pos)
497 {
498 struct hclge_desc desc;
499 u16 nq_id;
500 int ret;
501
502 *pos += scnprintf(buf + *pos, len - *pos,
503 "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n");
504 for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
505 ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
506 HCLGE_OPC_SCH_NQ_CNT);
507 if (ret)
508 return ret;
509
510 *pos += scnprintf(buf + *pos, len - *pos, "%04u %#x",
511 nq_id, le32_to_cpu(desc.data[1]));
512
513 ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
514 HCLGE_OPC_SCH_RQ_CNT);
515 if (ret)
516 return ret;
517
518 *pos += scnprintf(buf + *pos, len - *pos,
519 " %#x\n",
520 le32_to_cpu(desc.data[1]));
521 }
522
523 return 0;
524 }
525
hclge_dbg_dump_dcb_port(struct hclge_dev * hdev,char * buf,int len,int * pos)526 static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
527 int *pos)
528 {
529 struct hclge_dbg_bitmap_cmd req;
530 struct hclge_desc desc;
531 u8 port_id = 0;
532 int ret;
533
534 ret = hclge_dbg_cmd_send(hdev, &desc, port_id, 1,
535 HCLGE_OPC_PORT_DFX_STS);
536 if (ret)
537 return ret;
538
539 req.bitmap = (u8)le32_to_cpu(desc.data[1]);
540
541 *pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
542 req.bit0);
543 *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
544 req.bit1);
545
546 return 0;
547 }
548
hclge_dbg_dump_dcb_tm(struct hclge_dev * hdev,char * buf,int len,int * pos)549 static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
550 int *pos)
551 {
552 struct hclge_desc desc[2];
553 u8 port_id = 0;
554 int ret;
555
556 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
557 HCLGE_OPC_TM_INTERNAL_CNT);
558 if (ret)
559 return ret;
560
561 *pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n",
562 le32_to_cpu(desc[0].data[1]));
563 *pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n",
564 le32_to_cpu(desc[0].data[2]));
565
566 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
567 HCLGE_OPC_TM_INTERNAL_STS);
568 if (ret)
569 return ret;
570
571 *pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n",
572 le32_to_cpu(desc[0].data[1]));
573 *pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n",
574 le32_to_cpu(desc[0].data[2]));
575 *pos += scnprintf(buf + *pos, len - *pos,
576 "sch_roce_fifo_afull_gap: %#x\n",
577 le32_to_cpu(desc[0].data[3]));
578 *pos += scnprintf(buf + *pos, len - *pos,
579 "tx_private_waterline: %#x\n",
580 le32_to_cpu(desc[0].data[4]));
581 *pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n",
582 le32_to_cpu(desc[0].data[5]));
583 *pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n",
584 le32_to_cpu(desc[1].data[0]));
585 *pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n",
586 le32_to_cpu(desc[1].data[1]));
587
588 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
589 return 0;
590
591 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
592 HCLGE_OPC_TM_INTERNAL_STS_1);
593 if (ret)
594 return ret;
595
596 *pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n",
597 le32_to_cpu(desc[0].data[1]));
598 *pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n",
599 le32_to_cpu(desc[0].data[2]));
600 *pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n",
601 le32_to_cpu(desc[0].data[3]));
602 *pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n",
603 le32_to_cpu(desc[0].data[4]));
604 *pos += scnprintf(buf + *pos, len - *pos,
605 "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
606 le32_to_cpu(desc[0].data[5]));
607
608 return 0;
609 }
610
hclge_dbg_dump_dcb(struct hclge_dev * hdev,char * buf,int len)611 static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len)
612 {
613 int pos = 0;
614 int ret;
615
616 ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos);
617 if (ret)
618 return ret;
619
620 ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos);
621 if (ret)
622 return ret;
623
624 ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos);
625 if (ret)
626 return ret;
627
628 ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos);
629 if (ret)
630 return ret;
631
632 ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos);
633 if (ret)
634 return ret;
635
636 return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos);
637 }
638
hclge_dbg_dump_reg_cmd(struct hclge_dev * hdev,enum hnae3_dbg_cmd cmd,char * buf,int len)639 static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
640 enum hnae3_dbg_cmd cmd, char *buf, int len)
641 {
642 const struct hclge_dbg_reg_type_info *reg_info;
643 int pos = 0, ret = 0;
644 int i;
645
646 for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
647 reg_info = &hclge_dbg_reg_info[i];
648 if (cmd == reg_info->cmd) {
649 if (cmd == HNAE3_DBG_CMD_REG_TQP)
650 return hclge_dbg_dump_reg_tqp(hdev, reg_info,
651 buf, len, &pos);
652
653 ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf,
654 len, &pos);
655 if (ret)
656 break;
657 }
658 }
659
660 return ret;
661 }
662
hclge_dbg_dump_tc(struct hclge_dev * hdev,char * buf,int len)663 static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
664 {
665 struct hclge_ets_tc_weight_cmd *ets_weight;
666 struct hclge_desc desc;
667 char *sch_mode_str;
668 int pos = 0;
669 int ret;
670 u8 i;
671
672 if (!hnae3_dev_dcb_supported(hdev)) {
673 dev_err(&hdev->pdev->dev,
674 "Only DCB-supported dev supports tc\n");
675 return -EOPNOTSUPP;
676 }
677
678 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
679 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
680 if (ret) {
681 dev_err(&hdev->pdev->dev, "failed to get tc weight, ret = %d\n",
682 ret);
683 return ret;
684 }
685
686 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
687
688 pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n",
689 hdev->tm_info.num_tc);
690 pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n",
691 ets_weight->weight_offset);
692
693 pos += scnprintf(buf + pos, len - pos, "TC MODE WEIGHT\n");
694 for (i = 0; i < HNAE3_MAX_TC; i++) {
695 sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
696 pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n",
697 i, sch_mode_str, ets_weight->tc_weight[i]);
698 }
699
700 return 0;
701 }
702
703 static const struct hclge_dbg_item tm_pg_items[] = {
704 { "ID", 2 },
705 { "PRI_MAP", 2 },
706 { "MODE", 2 },
707 { "DWRR", 2 },
708 { "C_IR_B", 2 },
709 { "C_IR_U", 2 },
710 { "C_IR_S", 2 },
711 { "C_BS_B", 2 },
712 { "C_BS_S", 2 },
713 { "C_FLAG", 2 },
714 { "C_RATE(Mbps)", 2 },
715 { "P_IR_B", 2 },
716 { "P_IR_U", 2 },
717 { "P_IR_S", 2 },
718 { "P_BS_B", 2 },
719 { "P_BS_S", 2 },
720 { "P_FLAG", 2 },
721 { "P_RATE(Mbps)", 0 }
722 };
723
hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para * para,char ** result,u8 * index)724 static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
725 char **result, u8 *index)
726 {
727 sprintf(result[(*index)++], "%3u", para->ir_b);
728 sprintf(result[(*index)++], "%3u", para->ir_u);
729 sprintf(result[(*index)++], "%3u", para->ir_s);
730 sprintf(result[(*index)++], "%3u", para->bs_b);
731 sprintf(result[(*index)++], "%3u", para->bs_s);
732 sprintf(result[(*index)++], "%3u", para->flag);
733 sprintf(result[(*index)++], "%6u", para->rate);
734 }
735
__hclge_dbg_dump_tm_pg(struct hclge_dev * hdev,char * data_str,char * buf,int len)736 static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
737 char *buf, int len)
738 {
739 struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
740 char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
741 u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
742 char content[HCLGE_DBG_TM_INFO_LEN];
743 int pos = 0;
744 int ret;
745
746 for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
747 result[i] = data_str;
748 data_str += HCLGE_DBG_DATA_STR_LEN;
749 }
750
751 hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
752 NULL, ARRAY_SIZE(tm_pg_items));
753 pos += scnprintf(buf + pos, len - pos, "%s", content);
754
755 for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
756 ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
757 if (ret)
758 return ret;
759
760 ret = hclge_tm_get_pg_sch_mode(hdev, pg_id, &sch_mode);
761 if (ret)
762 return ret;
763
764 ret = hclge_tm_get_pg_weight(hdev, pg_id, &weight);
765 if (ret)
766 return ret;
767
768 ret = hclge_tm_get_pg_shaper(hdev, pg_id,
769 HCLGE_OPC_TM_PG_C_SHAPPING,
770 &c_shaper_para);
771 if (ret)
772 return ret;
773
774 ret = hclge_tm_get_pg_shaper(hdev, pg_id,
775 HCLGE_OPC_TM_PG_P_SHAPPING,
776 &p_shaper_para);
777 if (ret)
778 return ret;
779
780 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
781 "sp";
782
783 j = 0;
784 sprintf(result[j++], "%02u", pg_id);
785 sprintf(result[j++], "0x%02x", pri_bit_map);
786 sprintf(result[j++], "%4s", sch_mode_str);
787 sprintf(result[j++], "%3u", weight);
788 hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
789 hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
790
791 hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
792 (const char **)result,
793 ARRAY_SIZE(tm_pg_items));
794 pos += scnprintf(buf + pos, len - pos, "%s", content);
795 }
796
797 return 0;
798 }
799
hclge_dbg_dump_tm_pg(struct hclge_dev * hdev,char * buf,int len)800 static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
801 {
802 char *data_str;
803 int ret;
804
805 data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
806 HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
807 if (!data_str)
808 return -ENOMEM;
809
810 ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
811
812 kfree(data_str);
813
814 return ret;
815 }
816
hclge_dbg_dump_tm_port(struct hclge_dev * hdev,char * buf,int len)817 static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev, char *buf, int len)
818 {
819 struct hclge_tm_shaper_para shaper_para;
820 int pos = 0;
821 int ret;
822
823 ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
824 if (ret)
825 return ret;
826
827 pos += scnprintf(buf + pos, len - pos,
828 "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n");
829 pos += scnprintf(buf + pos, len - pos,
830 "%3u %3u %3u %3u %3u %1u %6u\n",
831 shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
832 shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
833 shaper_para.rate);
834
835 return 0;
836 }
837
hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev * hdev,u8 tc_id,char * buf,int len)838 static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
839 char *buf, int len)
840 {
841 u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
842 struct hclge_bp_to_qs_map_cmd *map;
843 struct hclge_desc desc;
844 int pos = 0;
845 u8 group_id;
846 u8 grp_num;
847 u16 i = 0;
848 int ret;
849
850 grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ?
851 HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM;
852 map = (struct hclge_bp_to_qs_map_cmd *)desc.data;
853 for (group_id = 0; group_id < grp_num; group_id++) {
854 hclge_cmd_setup_basic_desc(&desc,
855 HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
856 true);
857 map->tc_id = tc_id;
858 map->qs_group_id = group_id;
859 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
860 if (ret) {
861 dev_err(&hdev->pdev->dev,
862 "failed to get bp to qset map, ret = %d\n",
863 ret);
864 return ret;
865 }
866
867 qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
868 }
869
870 pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n");
871 for (group_id = 0; group_id < grp_num / 8; group_id++) {
872 pos += scnprintf(buf + pos, len - pos,
873 "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
874 group_id * 256, qset_mapping[i + 7],
875 qset_mapping[i + 6], qset_mapping[i + 5],
876 qset_mapping[i + 4], qset_mapping[i + 3],
877 qset_mapping[i + 2], qset_mapping[i + 1],
878 qset_mapping[i]);
879 i += 8;
880 }
881
882 return pos;
883 }
884
hclge_dbg_dump_tm_map(struct hclge_dev * hdev,char * buf,int len)885 static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
886 {
887 u16 queue_id;
888 u16 qset_id;
889 u8 link_vld;
890 int pos = 0;
891 u8 pri_id;
892 u8 tc_id;
893 int ret;
894
895 for (queue_id = 0; queue_id < hdev->num_tqps; queue_id++) {
896 ret = hclge_tm_get_q_to_qs_map(hdev, queue_id, &qset_id);
897 if (ret)
898 return ret;
899
900 ret = hclge_tm_get_qset_map_pri(hdev, qset_id, &pri_id,
901 &link_vld);
902 if (ret)
903 return ret;
904
905 ret = hclge_tm_get_q_to_tc(hdev, queue_id, &tc_id);
906 if (ret)
907 return ret;
908
909 pos += scnprintf(buf + pos, len - pos,
910 "QUEUE_ID QSET_ID PRI_ID TC_ID\n");
911 pos += scnprintf(buf + pos, len - pos,
912 "%04u %4u %3u %2u\n",
913 queue_id, qset_id, pri_id, tc_id);
914
915 if (!hnae3_dev_dcb_supported(hdev))
916 continue;
917
918 ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos,
919 len - pos);
920 if (ret < 0)
921 return ret;
922 pos += ret;
923
924 pos += scnprintf(buf + pos, len - pos, "\n");
925 }
926
927 return 0;
928 }
929
hclge_dbg_dump_tm_nodes(struct hclge_dev * hdev,char * buf,int len)930 static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
931 {
932 struct hclge_tm_nodes_cmd *nodes;
933 struct hclge_desc desc;
934 int pos = 0;
935 int ret;
936
937 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
938 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
939 if (ret) {
940 dev_err(&hdev->pdev->dev,
941 "failed to dump tm nodes, ret = %d\n", ret);
942 return ret;
943 }
944
945 nodes = (struct hclge_tm_nodes_cmd *)desc.data;
946
947 pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n");
948 pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n",
949 nodes->pg_base_id, nodes->pg_num);
950 pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n",
951 nodes->pri_base_id, nodes->pri_num);
952 pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n",
953 le16_to_cpu(nodes->qset_base_id),
954 le16_to_cpu(nodes->qset_num));
955 pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n",
956 le16_to_cpu(nodes->queue_base_id),
957 le16_to_cpu(nodes->queue_num));
958
959 return 0;
960 }
961
962 static const struct hclge_dbg_item tm_pri_items[] = {
963 { "ID", 4 },
964 { "MODE", 2 },
965 { "DWRR", 2 },
966 { "C_IR_B", 2 },
967 { "C_IR_U", 2 },
968 { "C_IR_S", 2 },
969 { "C_BS_B", 2 },
970 { "C_BS_S", 2 },
971 { "C_FLAG", 2 },
972 { "C_RATE(Mbps)", 2 },
973 { "P_IR_B", 2 },
974 { "P_IR_U", 2 },
975 { "P_IR_S", 2 },
976 { "P_BS_B", 2 },
977 { "P_BS_S", 2 },
978 { "P_FLAG", 2 },
979 { "P_RATE(Mbps)", 0 }
980 };
981
hclge_dbg_dump_tm_pri(struct hclge_dev * hdev,char * buf,int len)982 static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
983 {
984 char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN];
985 struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
986 char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
987 char content[HCLGE_DBG_TM_INFO_LEN];
988 u8 pri_num, sch_mode, weight, i, j;
989 int pos, ret;
990
991 ret = hclge_tm_get_pri_num(hdev, &pri_num);
992 if (ret)
993 return ret;
994
995 for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
996 result[i] = &data_str[i][0];
997
998 hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
999 NULL, ARRAY_SIZE(tm_pri_items));
1000 pos = scnprintf(buf, len, "%s", content);
1001
1002 for (i = 0; i < pri_num; i++) {
1003 ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
1004 if (ret)
1005 return ret;
1006
1007 ret = hclge_tm_get_pri_weight(hdev, i, &weight);
1008 if (ret)
1009 return ret;
1010
1011 ret = hclge_tm_get_pri_shaper(hdev, i,
1012 HCLGE_OPC_TM_PRI_C_SHAPPING,
1013 &c_shaper_para);
1014 if (ret)
1015 return ret;
1016
1017 ret = hclge_tm_get_pri_shaper(hdev, i,
1018 HCLGE_OPC_TM_PRI_P_SHAPPING,
1019 &p_shaper_para);
1020 if (ret)
1021 return ret;
1022
1023 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1024 "sp";
1025
1026 j = 0;
1027 sprintf(result[j++], "%04u", i);
1028 sprintf(result[j++], "%4s", sch_mode_str);
1029 sprintf(result[j++], "%3u", weight);
1030 hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
1031 hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
1032 hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
1033 (const char **)result,
1034 ARRAY_SIZE(tm_pri_items));
1035 pos += scnprintf(buf + pos, len - pos, "%s", content);
1036 }
1037
1038 return 0;
1039 }
1040
1041 static const struct hclge_dbg_item tm_qset_items[] = {
1042 { "ID", 4 },
1043 { "MAP_PRI", 2 },
1044 { "LINK_VLD", 2 },
1045 { "MODE", 2 },
1046 { "DWRR", 2 },
1047 { "IR_B", 2 },
1048 { "IR_U", 2 },
1049 { "IR_S", 2 },
1050 { "BS_B", 2 },
1051 { "BS_S", 2 },
1052 { "FLAG", 2 },
1053 { "RATE(Mbps)", 0 }
1054 };
1055
hclge_dbg_dump_tm_qset(struct hclge_dev * hdev,char * buf,int len)1056 static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
1057 {
1058 char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN];
1059 char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str;
1060 u8 priority, link_vld, sch_mode, weight;
1061 struct hclge_tm_shaper_para shaper_para;
1062 char content[HCLGE_DBG_TM_INFO_LEN];
1063 u16 qset_num, i;
1064 int ret, pos;
1065 u8 j;
1066
1067 ret = hclge_tm_get_qset_num(hdev, &qset_num);
1068 if (ret)
1069 return ret;
1070
1071 for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++)
1072 result[i] = &data_str[i][0];
1073
1074 hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1075 NULL, ARRAY_SIZE(tm_qset_items));
1076 pos = scnprintf(buf, len, "%s", content);
1077
1078 for (i = 0; i < qset_num; i++) {
1079 ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
1080 if (ret)
1081 return ret;
1082
1083 ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode);
1084 if (ret)
1085 return ret;
1086
1087 ret = hclge_tm_get_qset_weight(hdev, i, &weight);
1088 if (ret)
1089 return ret;
1090
1091 ret = hclge_tm_get_qset_shaper(hdev, i, &shaper_para);
1092 if (ret)
1093 return ret;
1094
1095 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1096 "sp";
1097
1098 j = 0;
1099 sprintf(result[j++], "%04u", i);
1100 sprintf(result[j++], "%4u", priority);
1101 sprintf(result[j++], "%4u", link_vld);
1102 sprintf(result[j++], "%4s", sch_mode_str);
1103 sprintf(result[j++], "%3u", weight);
1104 hclge_dbg_fill_shaper_content(&shaper_para, result, &j);
1105
1106 hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1107 (const char **)result,
1108 ARRAY_SIZE(tm_qset_items));
1109 pos += scnprintf(buf + pos, len - pos, "%s", content);
1110 }
1111
1112 return 0;
1113 }
1114
hclge_dbg_dump_qos_pause_cfg(struct hclge_dev * hdev,char * buf,int len)1115 static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
1116 int len)
1117 {
1118 struct hclge_cfg_pause_param_cmd *pause_param;
1119 struct hclge_desc desc;
1120 int pos = 0;
1121 int ret;
1122
1123 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
1124 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1125 if (ret) {
1126 dev_err(&hdev->pdev->dev,
1127 "failed to dump qos pause, ret = %d\n", ret);
1128 return ret;
1129 }
1130
1131 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
1132
1133 pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n",
1134 pause_param->pause_trans_gap);
1135 pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n",
1136 le16_to_cpu(pause_param->pause_trans_time));
1137 return 0;
1138 }
1139
1140 #define HCLGE_DBG_TC_MASK 0x0F
1141
hclge_dbg_dump_qos_pri_map(struct hclge_dev * hdev,char * buf,int len)1142 static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
1143 int len)
1144 {
1145 #define HCLGE_DBG_TC_BIT_WIDTH 4
1146
1147 struct hclge_qos_pri_map_cmd *pri_map;
1148 struct hclge_desc desc;
1149 int pos = 0;
1150 u8 *pri_tc;
1151 u8 tc, i;
1152 int ret;
1153
1154 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
1155 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1156 if (ret) {
1157 dev_err(&hdev->pdev->dev,
1158 "failed to dump qos pri map, ret = %d\n", ret);
1159 return ret;
1160 }
1161
1162 pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
1163
1164 pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n",
1165 pri_map->vlan_pri);
1166 pos += scnprintf(buf + pos, len - pos, "PRI TC\n");
1167
1168 pri_tc = (u8 *)pri_map;
1169 for (i = 0; i < HNAE3_MAX_TC; i++) {
1170 tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
1171 tc &= HCLGE_DBG_TC_MASK;
1172 pos += scnprintf(buf + pos, len - pos, "%u %u\n", i, tc);
1173 }
1174
1175 return 0;
1176 }
1177
hclge_dbg_dump_qos_dscp_map(struct hclge_dev * hdev,char * buf,int len)1178 static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
1179 int len)
1180 {
1181 struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo;
1182 struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
1183 u8 *req0 = (u8 *)desc[0].data;
1184 u8 *req1 = (u8 *)desc[1].data;
1185 u8 dscp_tc[HNAE3_MAX_DSCP];
1186 int pos, ret;
1187 u8 i, j;
1188
1189 pos = scnprintf(buf, len, "tc map mode: %s\n",
1190 tc_map_mode_str[kinfo->tc_map_mode]);
1191
1192 if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
1193 return 0;
1194
1195 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, true);
1196 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1197 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, true);
1198 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
1199 if (ret) {
1200 dev_err(&hdev->pdev->dev,
1201 "failed to dump qos dscp map, ret = %d\n", ret);
1202 return ret;
1203 }
1204
1205 pos += scnprintf(buf + pos, len - pos, "\nDSCP PRIO TC\n");
1206
1207 /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
1208 for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
1209 j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
1210 /* Each dscp setting has 4 bits, so each byte saves two dscp
1211 * setting
1212 */
1213 dscp_tc[i] = req0[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
1214 dscp_tc[j] = req1[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
1215 dscp_tc[i] &= HCLGE_DBG_TC_MASK;
1216 dscp_tc[j] &= HCLGE_DBG_TC_MASK;
1217 }
1218
1219 for (i = 0; i < HNAE3_MAX_DSCP; i++) {
1220 if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID)
1221 continue;
1222
1223 pos += scnprintf(buf + pos, len - pos, " %2u %u %u\n",
1224 i, kinfo->dscp_prio[i], dscp_tc[i]);
1225 }
1226
1227 return 0;
1228 }
1229
hclge_dbg_dump_tx_buf_cfg(struct hclge_dev * hdev,char * buf,int len)1230 static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
1231 {
1232 struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
1233 struct hclge_desc desc;
1234 int pos = 0;
1235 int i, ret;
1236
1237 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
1238 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1239 if (ret) {
1240 dev_err(&hdev->pdev->dev,
1241 "failed to dump tx buf, ret = %d\n", ret);
1242 return ret;
1243 }
1244
1245 tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1246 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1247 pos += scnprintf(buf + pos, len - pos,
1248 "tx_packet_buf_tc_%d: 0x%x\n", i,
1249 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
1250
1251 return pos;
1252 }
1253
hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev * hdev,char * buf,int len)1254 static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
1255 int len)
1256 {
1257 struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
1258 struct hclge_desc desc;
1259 int pos = 0;
1260 int i, ret;
1261
1262 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
1263 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1264 if (ret) {
1265 dev_err(&hdev->pdev->dev,
1266 "failed to dump rx priv buf, ret = %d\n", ret);
1267 return ret;
1268 }
1269
1270 pos += scnprintf(buf + pos, len - pos, "\n");
1271
1272 rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
1273 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1274 pos += scnprintf(buf + pos, len - pos,
1275 "rx_packet_buf_tc_%d: 0x%x\n", i,
1276 le16_to_cpu(rx_buf_cmd->buf_num[i]));
1277
1278 pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n",
1279 le16_to_cpu(rx_buf_cmd->shared_buf));
1280
1281 return pos;
1282 }
1283
hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev * hdev,char * buf,int len)1284 static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
1285 int len)
1286 {
1287 struct hclge_rx_com_wl *rx_com_wl;
1288 struct hclge_desc desc;
1289 int pos = 0;
1290 int ret;
1291
1292 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
1293 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1294 if (ret) {
1295 dev_err(&hdev->pdev->dev,
1296 "failed to dump rx common wl, ret = %d\n", ret);
1297 return ret;
1298 }
1299
1300 rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
1301 pos += scnprintf(buf + pos, len - pos, "\n");
1302 pos += scnprintf(buf + pos, len - pos,
1303 "rx_com_wl: high: 0x%x, low: 0x%x\n",
1304 le16_to_cpu(rx_com_wl->com_wl.high),
1305 le16_to_cpu(rx_com_wl->com_wl.low));
1306
1307 return pos;
1308 }
1309
hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev * hdev,char * buf,int len)1310 static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
1311 int len)
1312 {
1313 struct hclge_rx_com_wl *rx_packet_cnt;
1314 struct hclge_desc desc;
1315 int pos = 0;
1316 int ret;
1317
1318 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
1319 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1320 if (ret) {
1321 dev_err(&hdev->pdev->dev,
1322 "failed to dump rx global pkt cnt, ret = %d\n", ret);
1323 return ret;
1324 }
1325
1326 rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
1327 pos += scnprintf(buf + pos, len - pos,
1328 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
1329 le16_to_cpu(rx_packet_cnt->com_wl.high),
1330 le16_to_cpu(rx_packet_cnt->com_wl.low));
1331
1332 return pos;
1333 }
1334
hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev * hdev,char * buf,int len)1335 static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
1336 int len)
1337 {
1338 struct hclge_rx_priv_wl_buf *rx_priv_wl;
1339 struct hclge_desc desc[2];
1340 int pos = 0;
1341 int i, ret;
1342
1343 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1344 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1345 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1346 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1347 if (ret) {
1348 dev_err(&hdev->pdev->dev,
1349 "failed to dump rx priv wl buf, ret = %d\n", ret);
1350 return ret;
1351 }
1352
1353 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
1354 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1355 pos += scnprintf(buf + pos, len - pos,
1356 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
1357 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1358 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1359
1360 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
1361 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1362 pos += scnprintf(buf + pos, len - pos,
1363 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
1364 i + HCLGE_TC_NUM_ONE_DESC,
1365 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1366 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1367
1368 return pos;
1369 }
1370
hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev * hdev,char * buf,int len)1371 static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
1372 char *buf, int len)
1373 {
1374 struct hclge_rx_com_thrd *rx_com_thrd;
1375 struct hclge_desc desc[2];
1376 int pos = 0;
1377 int i, ret;
1378
1379 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1380 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1381 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1382 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1383 if (ret) {
1384 dev_err(&hdev->pdev->dev,
1385 "failed to dump rx common threshold, ret = %d\n", ret);
1386 return ret;
1387 }
1388
1389 pos += scnprintf(buf + pos, len - pos, "\n");
1390 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
1391 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1392 pos += scnprintf(buf + pos, len - pos,
1393 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
1394 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1395 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1396
1397 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
1398 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1399 pos += scnprintf(buf + pos, len - pos,
1400 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
1401 i + HCLGE_TC_NUM_ONE_DESC,
1402 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1403 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1404
1405 return pos;
1406 }
1407
hclge_dbg_dump_qos_buf_cfg(struct hclge_dev * hdev,char * buf,int len)1408 static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf,
1409 int len)
1410 {
1411 int pos = 0;
1412 int ret;
1413
1414 ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos);
1415 if (ret < 0)
1416 return ret;
1417 pos += ret;
1418
1419 ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos);
1420 if (ret < 0)
1421 return ret;
1422 pos += ret;
1423
1424 ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos);
1425 if (ret < 0)
1426 return ret;
1427 pos += ret;
1428
1429 ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos);
1430 if (ret < 0)
1431 return ret;
1432 pos += ret;
1433
1434 pos += scnprintf(buf + pos, len - pos, "\n");
1435 if (!hnae3_dev_dcb_supported(hdev))
1436 return 0;
1437
1438 ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos);
1439 if (ret < 0)
1440 return ret;
1441 pos += ret;
1442
1443 ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos,
1444 len - pos);
1445 if (ret < 0)
1446 return ret;
1447
1448 return 0;
1449 }
1450
hclge_dbg_dump_mng_table(struct hclge_dev * hdev,char * buf,int len)1451 static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
1452 {
1453 struct hclge_mac_ethertype_idx_rd_cmd *req0;
1454 struct hclge_desc desc;
1455 u32 msg_egress_port;
1456 int pos = 0;
1457 int ret, i;
1458
1459 pos += scnprintf(buf + pos, len - pos,
1460 "entry mac_addr mask ether ");
1461 pos += scnprintf(buf + pos, len - pos,
1462 "mask vlan mask i_map i_dir e_type ");
1463 pos += scnprintf(buf + pos, len - pos, "pf_id vf_id q_id drop\n");
1464
1465 for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
1466 hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
1467 true);
1468 req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
1469 req0->index = cpu_to_le16(i);
1470
1471 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1472 if (ret) {
1473 dev_err(&hdev->pdev->dev,
1474 "failed to dump manage table, ret = %d\n", ret);
1475 return ret;
1476 }
1477
1478 if (!req0->resp_code)
1479 continue;
1480
1481 pos += scnprintf(buf + pos, len - pos, "%02u %pM ",
1482 le16_to_cpu(req0->index), req0->mac_addr);
1483
1484 pos += scnprintf(buf + pos, len - pos,
1485 "%x %04x %x %04x ",
1486 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
1487 le16_to_cpu(req0->ethter_type),
1488 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
1489 le16_to_cpu(req0->vlan_tag) &
1490 HCLGE_DBG_MNG_VLAN_TAG);
1491
1492 pos += scnprintf(buf + pos, len - pos,
1493 "%x %02x %02x ",
1494 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
1495 req0->i_port_bitmap, req0->i_port_direction);
1496
1497 msg_egress_port = le16_to_cpu(req0->egress_port);
1498 pos += scnprintf(buf + pos, len - pos,
1499 "%x %x %02x %04x %x\n",
1500 !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
1501 msg_egress_port & HCLGE_DBG_MNG_PF_ID,
1502 (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
1503 le16_to_cpu(req0->egress_queue),
1504 !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
1505 }
1506
1507 return 0;
1508 }
1509
1510 #define HCLGE_DBG_TCAM_BUF_SIZE 256
1511
hclge_dbg_fd_tcam_read(struct hclge_dev * hdev,bool sel_x,char * tcam_buf,struct hclge_dbg_tcam_msg tcam_msg)1512 static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
1513 char *tcam_buf,
1514 struct hclge_dbg_tcam_msg tcam_msg)
1515 {
1516 struct hclge_fd_tcam_config_1_cmd *req1;
1517 struct hclge_fd_tcam_config_2_cmd *req2;
1518 struct hclge_fd_tcam_config_3_cmd *req3;
1519 struct hclge_desc desc[3];
1520 int pos = 0;
1521 int ret, i;
1522 __le32 *req;
1523
1524 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
1525 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1526 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
1527 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1528 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
1529
1530 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
1531 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
1532 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
1533
1534 req1->stage = tcam_msg.stage;
1535 req1->xy_sel = sel_x ? 1 : 0;
1536 req1->index = cpu_to_le32(tcam_msg.loc);
1537
1538 ret = hclge_cmd_send(&hdev->hw, desc, 3);
1539 if (ret)
1540 return ret;
1541
1542 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1543 "read result tcam key %s(%u):\n", sel_x ? "x" : "y",
1544 tcam_msg.loc);
1545
1546 /* tcam_data0 ~ tcam_data1 */
1547 req = (__le32 *)req1->tcam_data;
1548 for (i = 0; i < 2; i++)
1549 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1550 "%08x\n", le32_to_cpu(*req++));
1551
1552 /* tcam_data2 ~ tcam_data7 */
1553 req = (__le32 *)req2->tcam_data;
1554 for (i = 0; i < 6; i++)
1555 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1556 "%08x\n", le32_to_cpu(*req++));
1557
1558 /* tcam_data8 ~ tcam_data12 */
1559 req = (__le32 *)req3->tcam_data;
1560 for (i = 0; i < 5; i++)
1561 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1562 "%08x\n", le32_to_cpu(*req++));
1563
1564 return ret;
1565 }
1566
hclge_dbg_get_rules_location(struct hclge_dev * hdev,u16 * rule_locs)1567 static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
1568 {
1569 struct hclge_fd_rule *rule;
1570 struct hlist_node *node;
1571 int cnt = 0;
1572
1573 spin_lock_bh(&hdev->fd_rule_lock);
1574 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
1575 rule_locs[cnt] = rule->location;
1576 cnt++;
1577 }
1578 spin_unlock_bh(&hdev->fd_rule_lock);
1579
1580 if (cnt != hdev->hclge_fd_rule_num || cnt == 0)
1581 return -EINVAL;
1582
1583 return cnt;
1584 }
1585
hclge_dbg_dump_fd_tcam(struct hclge_dev * hdev,char * buf,int len)1586 static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
1587 {
1588 u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
1589 struct hclge_dbg_tcam_msg tcam_msg;
1590 int i, ret, rule_cnt;
1591 u16 *rule_locs;
1592 char *tcam_buf;
1593 int pos = 0;
1594
1595 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
1596 dev_err(&hdev->pdev->dev,
1597 "Only FD-supported dev supports dump fd tcam\n");
1598 return -EOPNOTSUPP;
1599 }
1600
1601 if (!hdev->hclge_fd_rule_num || !rule_num)
1602 return 0;
1603
1604 rule_locs = kcalloc(rule_num, sizeof(u16), GFP_KERNEL);
1605 if (!rule_locs)
1606 return -ENOMEM;
1607
1608 tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
1609 if (!tcam_buf) {
1610 kfree(rule_locs);
1611 return -ENOMEM;
1612 }
1613
1614 rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
1615 if (rule_cnt < 0) {
1616 ret = rule_cnt;
1617 dev_err(&hdev->pdev->dev,
1618 "failed to get rule number, ret = %d\n", ret);
1619 goto out;
1620 }
1621
1622 ret = 0;
1623 for (i = 0; i < rule_cnt; i++) {
1624 tcam_msg.stage = HCLGE_FD_STAGE_1;
1625 tcam_msg.loc = rule_locs[i];
1626
1627 ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg);
1628 if (ret) {
1629 dev_err(&hdev->pdev->dev,
1630 "failed to get fd tcam key x, ret = %d\n", ret);
1631 goto out;
1632 }
1633
1634 pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1635
1636 ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg);
1637 if (ret) {
1638 dev_err(&hdev->pdev->dev,
1639 "failed to get fd tcam key y, ret = %d\n", ret);
1640 goto out;
1641 }
1642
1643 pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1644 }
1645
1646 out:
1647 kfree(tcam_buf);
1648 kfree(rule_locs);
1649 return ret;
1650 }
1651
hclge_dbg_dump_fd_counter(struct hclge_dev * hdev,char * buf,int len)1652 static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
1653 {
1654 u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
1655 struct hclge_fd_ad_cnt_read_cmd *req;
1656 char str_id[HCLGE_DBG_ID_LEN];
1657 struct hclge_desc desc;
1658 int pos = 0;
1659 int ret;
1660 u64 cnt;
1661 u8 i;
1662
1663 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
1664 return -EOPNOTSUPP;
1665
1666 pos += scnprintf(buf + pos, len - pos,
1667 "func_id\thit_times\n");
1668
1669 for (i = 0; i < func_num; i++) {
1670 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
1671 req = (struct hclge_fd_ad_cnt_read_cmd *)desc.data;
1672 req->index = cpu_to_le16(i);
1673 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1674 if (ret) {
1675 dev_err(&hdev->pdev->dev, "failed to get fd counter, ret = %d\n",
1676 ret);
1677 return ret;
1678 }
1679 cnt = le64_to_cpu(req->cnt);
1680 hclge_dbg_get_func_id_str(str_id, i);
1681 pos += scnprintf(buf + pos, len - pos,
1682 "%s\t%llu\n", str_id, cnt);
1683 }
1684
1685 return 0;
1686 }
1687
1688 static const struct hclge_dbg_status_dfx_info hclge_dbg_rst_info[] = {
1689 {HCLGE_MISC_VECTOR_REG_BASE, "vector0 interrupt enable status"},
1690 {HCLGE_MISC_RESET_STS_REG, "reset interrupt source"},
1691 {HCLGE_MISC_VECTOR_INT_STS, "reset interrupt status"},
1692 {HCLGE_RAS_PF_OTHER_INT_STS_REG, "RAS interrupt status"},
1693 {HCLGE_GLOBAL_RESET_REG, "hardware reset status"},
1694 {HCLGE_NIC_CSQ_DEPTH_REG, "handshake status"},
1695 {HCLGE_FUN_RST_ING, "function reset status"}
1696 };
1697
hclge_dbg_dump_rst_info(struct hclge_dev * hdev,char * buf,int len)1698 int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
1699 {
1700 u32 i, offset;
1701 int pos = 0;
1702
1703 pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n",
1704 hdev->rst_stats.pf_rst_cnt);
1705 pos += scnprintf(buf + pos, len - pos, "FLR reset count: %u\n",
1706 hdev->rst_stats.flr_rst_cnt);
1707 pos += scnprintf(buf + pos, len - pos, "GLOBAL reset count: %u\n",
1708 hdev->rst_stats.global_rst_cnt);
1709 pos += scnprintf(buf + pos, len - pos, "IMP reset count: %u\n",
1710 hdev->rst_stats.imp_rst_cnt);
1711 pos += scnprintf(buf + pos, len - pos, "reset done count: %u\n",
1712 hdev->rst_stats.reset_done_cnt);
1713 pos += scnprintf(buf + pos, len - pos, "HW reset done count: %u\n",
1714 hdev->rst_stats.hw_reset_done_cnt);
1715 pos += scnprintf(buf + pos, len - pos, "reset count: %u\n",
1716 hdev->rst_stats.reset_cnt);
1717 pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n",
1718 hdev->rst_stats.reset_fail_cnt);
1719
1720 for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
1721 offset = hclge_dbg_rst_info[i].offset;
1722 pos += scnprintf(buf + pos, len - pos, "%s: 0x%x\n",
1723 hclge_dbg_rst_info[i].message,
1724 hclge_read_dev(&hdev->hw, offset));
1725 }
1726
1727 pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n",
1728 hdev->state);
1729
1730 return 0;
1731 }
1732
hclge_dbg_dump_serv_info(struct hclge_dev * hdev,char * buf,int len)1733 static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len)
1734 {
1735 unsigned long rem_nsec;
1736 int pos = 0;
1737 u64 lc;
1738
1739 lc = local_clock();
1740 rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);
1741
1742 pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n",
1743 (unsigned long)lc, rem_nsec / 1000);
1744 pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n",
1745 jiffies_to_msecs(jiffies - hdev->last_serv_processed));
1746 pos += scnprintf(buf + pos, len - pos,
1747 "last_service_task_processed: %lu(jiffies)\n",
1748 hdev->last_serv_processed);
1749 pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n",
1750 hdev->serv_processed_cnt);
1751
1752 return 0;
1753 }
1754
hclge_dbg_dump_interrupt(struct hclge_dev * hdev,char * buf,int len)1755 static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len)
1756 {
1757 int pos = 0;
1758
1759 pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n",
1760 hdev->num_nic_msi);
1761 pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n",
1762 hdev->num_roce_msi);
1763 pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n",
1764 hdev->num_msi_used);
1765 pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n",
1766 hdev->num_msi_left);
1767
1768 return 0;
1769 }
1770
hclge_dbg_imp_info_data_print(struct hclge_desc * desc_src,char * buf,int len,u32 bd_num)1771 static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src,
1772 char *buf, int len, u32 bd_num)
1773 {
1774 #define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2
1775
1776 struct hclge_desc *desc_index = desc_src;
1777 u32 offset = 0;
1778 int pos = 0;
1779 u32 i, j;
1780
1781 pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1782
1783 for (i = 0; i < bd_num; i++) {
1784 j = 0;
1785 while (j < HCLGE_DESC_DATA_LEN - 1) {
1786 pos += scnprintf(buf + pos, len - pos, "0x%04x | ",
1787 offset);
1788 pos += scnprintf(buf + pos, len - pos, "0x%08x ",
1789 le32_to_cpu(desc_index->data[j++]));
1790 pos += scnprintf(buf + pos, len - pos, "0x%08x\n",
1791 le32_to_cpu(desc_index->data[j++]));
1792 offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
1793 }
1794 desc_index++;
1795 }
1796 }
1797
1798 static int
hclge_dbg_get_imp_stats_info(struct hclge_dev * hdev,char * buf,int len)1799 hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
1800 {
1801 struct hclge_get_imp_bd_cmd *req;
1802 struct hclge_desc *desc_src;
1803 struct hclge_desc desc;
1804 u32 bd_num;
1805 int ret;
1806
1807 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_STATS_BD, true);
1808
1809 req = (struct hclge_get_imp_bd_cmd *)desc.data;
1810 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1811 if (ret) {
1812 dev_err(&hdev->pdev->dev,
1813 "failed to get imp statistics bd number, ret = %d\n",
1814 ret);
1815 return ret;
1816 }
1817
1818 bd_num = le32_to_cpu(req->bd_num);
1819 if (!bd_num) {
1820 dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
1821 return -EINVAL;
1822 }
1823
1824 desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
1825 if (!desc_src)
1826 return -ENOMEM;
1827
1828 ret = hclge_dbg_cmd_send(hdev, desc_src, 0, bd_num,
1829 HCLGE_OPC_IMP_STATS_INFO);
1830 if (ret) {
1831 kfree(desc_src);
1832 dev_err(&hdev->pdev->dev,
1833 "failed to get imp statistics, ret = %d\n", ret);
1834 return ret;
1835 }
1836
1837 hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num);
1838
1839 kfree(desc_src);
1840
1841 return 0;
1842 }
1843
1844 #define HCLGE_CMD_NCL_CONFIG_BD_NUM 5
1845 #define HCLGE_MAX_NCL_CONFIG_LENGTH 16384
1846
hclge_ncl_config_data_print(struct hclge_desc * desc,int * index,char * buf,int len,int * pos)1847 static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
1848 char *buf, int len, int *pos)
1849 {
1850 #define HCLGE_CMD_DATA_NUM 6
1851
1852 int offset = HCLGE_MAX_NCL_CONFIG_LENGTH - *index;
1853 int i, j;
1854
1855 for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
1856 for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
1857 if (i == 0 && j == 0)
1858 continue;
1859
1860 *pos += scnprintf(buf + *pos, len - *pos,
1861 "0x%04x | 0x%08x\n", offset,
1862 le32_to_cpu(desc[i].data[j]));
1863
1864 offset += sizeof(u32);
1865 *index -= sizeof(u32);
1866
1867 if (*index <= 0)
1868 return;
1869 }
1870 }
1871 }
1872
1873 static int
hclge_dbg_dump_ncl_config(struct hclge_dev * hdev,char * buf,int len)1874 hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
1875 {
1876 #define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
1877
1878 struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
1879 int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
1880 int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
1881 int pos = 0;
1882 u32 data0;
1883 int ret;
1884
1885 pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1886
1887 while (index > 0) {
1888 data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
1889 if (index >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
1890 data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
1891 else
1892 data0 |= (u32)index << 16;
1893 ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
1894 HCLGE_OPC_QUERY_NCL_CONFIG);
1895 if (ret)
1896 return ret;
1897
1898 hclge_ncl_config_data_print(desc, &index, buf, len, &pos);
1899 }
1900
1901 return 0;
1902 }
1903
hclge_dbg_dump_loopback(struct hclge_dev * hdev,char * buf,int len)1904 static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
1905 {
1906 struct phy_device *phydev = hdev->hw.mac.phydev;
1907 struct hclge_config_mac_mode_cmd *req_app;
1908 struct hclge_common_lb_cmd *req_common;
1909 struct hclge_desc desc;
1910 u8 loopback_en;
1911 int pos = 0;
1912 int ret;
1913
1914 req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
1915 req_common = (struct hclge_common_lb_cmd *)desc.data;
1916
1917 pos += scnprintf(buf + pos, len - pos, "mac id: %u\n",
1918 hdev->hw.mac.mac_id);
1919
1920 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
1921 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1922 if (ret) {
1923 dev_err(&hdev->pdev->dev,
1924 "failed to dump app loopback status, ret = %d\n", ret);
1925 return ret;
1926 }
1927
1928 loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
1929 HCLGE_MAC_APP_LP_B);
1930 pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
1931 state_str[loopback_en]);
1932
1933 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
1934 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1935 if (ret) {
1936 dev_err(&hdev->pdev->dev,
1937 "failed to dump common loopback status, ret = %d\n",
1938 ret);
1939 return ret;
1940 }
1941
1942 loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
1943 pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
1944 state_str[loopback_en]);
1945
1946 loopback_en = req_common->enable &
1947 HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
1948 pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
1949 state_str[loopback_en]);
1950
1951 if (phydev) {
1952 loopback_en = phydev->loopback_enabled;
1953 pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1954 state_str[loopback_en]);
1955 } else if (hnae3_dev_phy_imp_supported(hdev)) {
1956 loopback_en = req_common->enable &
1957 HCLGE_CMD_GE_PHY_INNER_LOOP_B;
1958 pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1959 state_str[loopback_en]);
1960 }
1961
1962 return 0;
1963 }
1964
1965 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
1966 * @hdev: pointer to struct hclge_dev
1967 */
1968 static int
hclge_dbg_dump_mac_tnl_status(struct hclge_dev * hdev,char * buf,int len)1969 hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len)
1970 {
1971 struct hclge_mac_tnl_stats stats;
1972 unsigned long rem_nsec;
1973 int pos = 0;
1974
1975 pos += scnprintf(buf + pos, len - pos,
1976 "Recently generated mac tnl interruption:\n");
1977
1978 while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
1979 rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
1980
1981 pos += scnprintf(buf + pos, len - pos,
1982 "[%07lu.%03lu] status = 0x%x\n",
1983 (unsigned long)stats.time, rem_nsec / 1000,
1984 stats.status);
1985 }
1986
1987 return 0;
1988 }
1989
1990
1991 static const struct hclge_dbg_item mac_list_items[] = {
1992 { "FUNC_ID", 2 },
1993 { "MAC_ADDR", 12 },
1994 { "STATE", 2 },
1995 };
1996
hclge_dbg_dump_mac_list(struct hclge_dev * hdev,char * buf,int len,bool is_unicast)1997 static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
1998 bool is_unicast)
1999 {
2000 char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN];
2001 char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
2002 char *result[ARRAY_SIZE(mac_list_items)];
2003 struct hclge_mac_node *mac_node, *tmp;
2004 struct hclge_vport *vport;
2005 struct list_head *list;
2006 u32 func_id;
2007 int pos = 0;
2008 int i;
2009
2010 for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
2011 result[i] = &data_str[i][0];
2012
2013 pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n",
2014 is_unicast ? "UC" : "MC");
2015 hclge_dbg_fill_content(content, sizeof(content), mac_list_items,
2016 NULL, ARRAY_SIZE(mac_list_items));
2017 pos += scnprintf(buf + pos, len - pos, "%s", content);
2018
2019 for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
2020 vport = &hdev->vport[func_id];
2021 list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
2022 spin_lock_bh(&vport->mac_list_lock);
2023 list_for_each_entry_safe(mac_node, tmp, list, node) {
2024 i = 0;
2025 result[i++] = hclge_dbg_get_func_id_str(str_id,
2026 func_id);
2027 sprintf(result[i++], "%pM", mac_node->mac_addr);
2028 sprintf(result[i++], "%5s",
2029 hclge_mac_state_str[mac_node->state]);
2030 hclge_dbg_fill_content(content, sizeof(content),
2031 mac_list_items,
2032 (const char **)result,
2033 ARRAY_SIZE(mac_list_items));
2034 pos += scnprintf(buf + pos, len - pos, "%s", content);
2035 }
2036 spin_unlock_bh(&vport->mac_list_lock);
2037 }
2038 }
2039
hclge_dbg_dump_umv_info(struct hclge_dev * hdev,char * buf,int len)2040 static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
2041 {
2042 u8 func_num = pci_num_vf(hdev->pdev) + 1;
2043 struct hclge_vport *vport;
2044 int pos = 0;
2045 u8 i;
2046
2047 pos += scnprintf(buf, len, "num_alloc_vport : %u\n",
2048 hdev->num_alloc_vport);
2049 pos += scnprintf(buf + pos, len - pos, "max_umv_size : %u\n",
2050 hdev->max_umv_size);
2051 pos += scnprintf(buf + pos, len - pos, "wanted_umv_size : %u\n",
2052 hdev->wanted_umv_size);
2053 pos += scnprintf(buf + pos, len - pos, "priv_umv_size : %u\n",
2054 hdev->priv_umv_size);
2055
2056 mutex_lock(&hdev->vport_lock);
2057 pos += scnprintf(buf + pos, len - pos, "share_umv_size : %u\n",
2058 hdev->share_umv_size);
2059 for (i = 0; i < func_num; i++) {
2060 vport = &hdev->vport[i];
2061 pos += scnprintf(buf + pos, len - pos,
2062 "vport(%u) used_umv_num : %u\n",
2063 i, vport->used_umv_num);
2064 }
2065 mutex_unlock(&hdev->vport_lock);
2066
2067 pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n",
2068 hdev->used_mc_mac_num);
2069
2070 return 0;
2071 }
2072
hclge_get_vlan_rx_offload_cfg(struct hclge_dev * hdev,u8 vf_id,struct hclge_dbg_vlan_cfg * vlan_cfg)2073 static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
2074 struct hclge_dbg_vlan_cfg *vlan_cfg)
2075 {
2076 struct hclge_vport_vtag_rx_cfg_cmd *req;
2077 struct hclge_desc desc;
2078 u16 bmap_index;
2079 u8 rx_cfg;
2080 int ret;
2081
2082 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true);
2083
2084 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
2085 req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2086 bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2087 req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2088
2089 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2090 if (ret) {
2091 dev_err(&hdev->pdev->dev,
2092 "failed to get vport%u rxvlan cfg, ret = %d\n",
2093 vf_id, ret);
2094 return ret;
2095 }
2096
2097 rx_cfg = req->vport_vlan_cfg;
2098 vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B);
2099 vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B);
2100 vlan_cfg->drop_tag1 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG1_EN_B);
2101 vlan_cfg->drop_tag2 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG2_EN_B);
2102 vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B);
2103 vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B);
2104
2105 return 0;
2106 }
2107
hclge_get_vlan_tx_offload_cfg(struct hclge_dev * hdev,u8 vf_id,struct hclge_dbg_vlan_cfg * vlan_cfg)2108 static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
2109 struct hclge_dbg_vlan_cfg *vlan_cfg)
2110 {
2111 struct hclge_vport_vtag_tx_cfg_cmd *req;
2112 struct hclge_desc desc;
2113 u16 bmap_index;
2114 u8 tx_cfg;
2115 int ret;
2116
2117 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true);
2118 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
2119 req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2120 bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2121 req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2122
2123 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2124 if (ret) {
2125 dev_err(&hdev->pdev->dev,
2126 "failed to get vport%u txvlan cfg, ret = %d\n",
2127 vf_id, ret);
2128 return ret;
2129 }
2130
2131 tx_cfg = req->vport_vlan_cfg;
2132 vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1);
2133
2134 vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B);
2135 vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B);
2136 vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B);
2137 vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B);
2138 vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B);
2139 vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B);
2140 vlan_cfg->shift_tag = hnae3_get_bit(tx_cfg, HCLGE_TAG_SHIFT_MODE_EN_B);
2141
2142 return 0;
2143 }
2144
hclge_get_vlan_filter_config_cmd(struct hclge_dev * hdev,u8 vlan_type,u8 vf_id,struct hclge_desc * desc)2145 static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev,
2146 u8 vlan_type, u8 vf_id,
2147 struct hclge_desc *desc)
2148 {
2149 struct hclge_vlan_filter_ctrl_cmd *req;
2150 int ret;
2151
2152 hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
2153 req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data;
2154 req->vlan_type = vlan_type;
2155 req->vf_id = vf_id;
2156
2157 ret = hclge_cmd_send(&hdev->hw, desc, 1);
2158 if (ret)
2159 dev_err(&hdev->pdev->dev,
2160 "failed to get vport%u vlan filter config, ret = %d.\n",
2161 vf_id, ret);
2162
2163 return ret;
2164 }
2165
hclge_get_vlan_filter_state(struct hclge_dev * hdev,u8 vlan_type,u8 vf_id,u8 * vlan_fe)2166 static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type,
2167 u8 vf_id, u8 *vlan_fe)
2168 {
2169 struct hclge_vlan_filter_ctrl_cmd *req;
2170 struct hclge_desc desc;
2171 int ret;
2172
2173 ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc);
2174 if (ret)
2175 return ret;
2176
2177 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
2178 *vlan_fe = req->vlan_fe;
2179
2180 return 0;
2181 }
2182
hclge_get_port_vlan_filter_bypass_state(struct hclge_dev * hdev,u8 vf_id,u8 * bypass_en)2183 static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
2184 u8 vf_id, u8 *bypass_en)
2185 {
2186 struct hclge_port_vlan_filter_bypass_cmd *req;
2187 struct hclge_desc desc;
2188 int ret;
2189
2190 if (!test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
2191 return 0;
2192
2193 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, true);
2194 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
2195 req->vf_id = vf_id;
2196
2197 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2198 if (ret) {
2199 dev_err(&hdev->pdev->dev,
2200 "failed to get vport%u port vlan filter bypass state, ret = %d.\n",
2201 vf_id, ret);
2202 return ret;
2203 }
2204
2205 *bypass_en = hnae3_get_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B);
2206
2207 return 0;
2208 }
2209
2210 static const struct hclge_dbg_item vlan_filter_items[] = {
2211 { "FUNC_ID", 2 },
2212 { "I_VF_VLAN_FILTER", 2 },
2213 { "E_VF_VLAN_FILTER", 2 },
2214 { "PORT_VLAN_FILTER_BYPASS", 0 }
2215 };
2216
2217 static const struct hclge_dbg_item vlan_offload_items[] = {
2218 { "FUNC_ID", 2 },
2219 { "PVID", 4 },
2220 { "ACCEPT_TAG1", 2 },
2221 { "ACCEPT_TAG2", 2 },
2222 { "ACCEPT_UNTAG1", 2 },
2223 { "ACCEPT_UNTAG2", 2 },
2224 { "INSERT_TAG1", 2 },
2225 { "INSERT_TAG2", 2 },
2226 { "SHIFT_TAG", 2 },
2227 { "STRIP_TAG1", 2 },
2228 { "STRIP_TAG2", 2 },
2229 { "DROP_TAG1", 2 },
2230 { "DROP_TAG2", 2 },
2231 { "PRI_ONLY_TAG1", 2 },
2232 { "PRI_ONLY_TAG2", 0 }
2233 };
2234
hclge_dbg_dump_vlan_filter_config(struct hclge_dev * hdev,char * buf,int len,int * pos)2235 static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
2236 int len, int *pos)
2237 {
2238 char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
2239 const char *result[ARRAY_SIZE(vlan_filter_items)];
2240 u8 i, j, vlan_fe, bypass, ingress, egress;
2241 u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2242 int ret;
2243
2244 ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
2245 &vlan_fe);
2246 if (ret)
2247 return ret;
2248 ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2249 egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2250
2251 *pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
2252 state_str[ingress]);
2253 *pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
2254 state_str[egress]);
2255
2256 hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
2257 NULL, ARRAY_SIZE(vlan_filter_items));
2258 *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2259
2260 for (i = 0; i < func_num; i++) {
2261 ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
2262 &vlan_fe);
2263 if (ret)
2264 return ret;
2265
2266 ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2267 egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2268 ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
2269 if (ret)
2270 return ret;
2271 j = 0;
2272 result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2273 result[j++] = state_str[ingress];
2274 result[j++] = state_str[egress];
2275 result[j++] =
2276 test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
2277 hdev->ae_dev->caps) ? state_str[bypass] : "NA";
2278 hclge_dbg_fill_content(content, sizeof(content),
2279 vlan_filter_items, result,
2280 ARRAY_SIZE(vlan_filter_items));
2281 *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2282 }
2283 *pos += scnprintf(buf + *pos, len - *pos, "\n");
2284
2285 return 0;
2286 }
2287
hclge_dbg_dump_vlan_offload_config(struct hclge_dev * hdev,char * buf,int len,int * pos)2288 static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
2289 int len, int *pos)
2290 {
2291 char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
2292 const char *result[ARRAY_SIZE(vlan_offload_items)];
2293 char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
2294 u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2295 struct hclge_dbg_vlan_cfg vlan_cfg;
2296 int ret;
2297 u8 i, j;
2298
2299 hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
2300 NULL, ARRAY_SIZE(vlan_offload_items));
2301 *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2302
2303 for (i = 0; i < func_num; i++) {
2304 ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
2305 if (ret)
2306 return ret;
2307
2308 ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg);
2309 if (ret)
2310 return ret;
2311
2312 sprintf(str_pvid, "%u", vlan_cfg.pvid);
2313 j = 0;
2314 result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2315 result[j++] = str_pvid;
2316 result[j++] = state_str[vlan_cfg.accept_tag1];
2317 result[j++] = state_str[vlan_cfg.accept_tag2];
2318 result[j++] = state_str[vlan_cfg.accept_untag1];
2319 result[j++] = state_str[vlan_cfg.accept_untag2];
2320 result[j++] = state_str[vlan_cfg.insert_tag1];
2321 result[j++] = state_str[vlan_cfg.insert_tag2];
2322 result[j++] = state_str[vlan_cfg.shift_tag];
2323 result[j++] = state_str[vlan_cfg.strip_tag1];
2324 result[j++] = state_str[vlan_cfg.strip_tag2];
2325 result[j++] = state_str[vlan_cfg.drop_tag1];
2326 result[j++] = state_str[vlan_cfg.drop_tag2];
2327 result[j++] = state_str[vlan_cfg.pri_only1];
2328 result[j++] = state_str[vlan_cfg.pri_only2];
2329
2330 hclge_dbg_fill_content(content, sizeof(content),
2331 vlan_offload_items, result,
2332 ARRAY_SIZE(vlan_offload_items));
2333 *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2334 }
2335
2336 return 0;
2337 }
2338
hclge_dbg_dump_vlan_config(struct hclge_dev * hdev,char * buf,int len)2339 static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
2340 int len)
2341 {
2342 int pos = 0;
2343 int ret;
2344
2345 ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
2346 if (ret)
2347 return ret;
2348
2349 return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
2350 }
2351
hclge_dbg_dump_ptp_info(struct hclge_dev * hdev,char * buf,int len)2352 static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
2353 {
2354 struct hclge_ptp *ptp = hdev->ptp;
2355 u32 sw_cfg = ptp->ptp_cfg;
2356 unsigned int tx_start;
2357 unsigned int last_rx;
2358 int pos = 0;
2359 u32 hw_cfg;
2360 int ret;
2361
2362 pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
2363 ptp->info.name);
2364 pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
2365 test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
2366 "yes" : "no");
2367 pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
2368 test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
2369 "yes" : "no");
2370 pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
2371 test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
2372 "yes" : "no");
2373
2374 last_rx = jiffies_to_msecs(ptp->last_rx);
2375 pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
2376 last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
2377 pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
2378
2379 tx_start = jiffies_to_msecs(ptp->tx_start);
2380 pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
2381 tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
2382 pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
2383 pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
2384 ptp->tx_skipped);
2385 pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
2386 ptp->tx_timeout);
2387 pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
2388 ptp->last_tx_seqid);
2389
2390 ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
2391 if (ret)
2392 return ret;
2393
2394 pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
2395 sw_cfg, hw_cfg);
2396
2397 pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
2398 ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
2399
2400 return 0;
2401 }
2402
hclge_dbg_dump_mac_uc(struct hclge_dev * hdev,char * buf,int len)2403 static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
2404 {
2405 hclge_dbg_dump_mac_list(hdev, buf, len, true);
2406
2407 return 0;
2408 }
2409
hclge_dbg_dump_mac_mc(struct hclge_dev * hdev,char * buf,int len)2410 static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
2411 {
2412 hclge_dbg_dump_mac_list(hdev, buf, len, false);
2413
2414 return 0;
2415 }
2416
2417 static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
2418 {
2419 .cmd = HNAE3_DBG_CMD_TM_NODES,
2420 .dbg_dump = hclge_dbg_dump_tm_nodes,
2421 },
2422 {
2423 .cmd = HNAE3_DBG_CMD_TM_PRI,
2424 .dbg_dump = hclge_dbg_dump_tm_pri,
2425 },
2426 {
2427 .cmd = HNAE3_DBG_CMD_TM_QSET,
2428 .dbg_dump = hclge_dbg_dump_tm_qset,
2429 },
2430 {
2431 .cmd = HNAE3_DBG_CMD_TM_MAP,
2432 .dbg_dump = hclge_dbg_dump_tm_map,
2433 },
2434 {
2435 .cmd = HNAE3_DBG_CMD_TM_PG,
2436 .dbg_dump = hclge_dbg_dump_tm_pg,
2437 },
2438 {
2439 .cmd = HNAE3_DBG_CMD_TM_PORT,
2440 .dbg_dump = hclge_dbg_dump_tm_port,
2441 },
2442 {
2443 .cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
2444 .dbg_dump = hclge_dbg_dump_tc,
2445 },
2446 {
2447 .cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
2448 .dbg_dump = hclge_dbg_dump_qos_pause_cfg,
2449 },
2450 {
2451 .cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
2452 .dbg_dump = hclge_dbg_dump_qos_pri_map,
2453 },
2454 {
2455 .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
2456 .dbg_dump = hclge_dbg_dump_qos_dscp_map,
2457 },
2458 {
2459 .cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
2460 .dbg_dump = hclge_dbg_dump_qos_buf_cfg,
2461 },
2462 {
2463 .cmd = HNAE3_DBG_CMD_MAC_UC,
2464 .dbg_dump = hclge_dbg_dump_mac_uc,
2465 },
2466 {
2467 .cmd = HNAE3_DBG_CMD_MAC_MC,
2468 .dbg_dump = hclge_dbg_dump_mac_mc,
2469 },
2470 {
2471 .cmd = HNAE3_DBG_CMD_MNG_TBL,
2472 .dbg_dump = hclge_dbg_dump_mng_table,
2473 },
2474 {
2475 .cmd = HNAE3_DBG_CMD_LOOPBACK,
2476 .dbg_dump = hclge_dbg_dump_loopback,
2477 },
2478 {
2479 .cmd = HNAE3_DBG_CMD_PTP_INFO,
2480 .dbg_dump = hclge_dbg_dump_ptp_info,
2481 },
2482 {
2483 .cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
2484 .dbg_dump = hclge_dbg_dump_interrupt,
2485 },
2486 {
2487 .cmd = HNAE3_DBG_CMD_RESET_INFO,
2488 .dbg_dump = hclge_dbg_dump_rst_info,
2489 },
2490 {
2491 .cmd = HNAE3_DBG_CMD_IMP_INFO,
2492 .dbg_dump = hclge_dbg_get_imp_stats_info,
2493 },
2494 {
2495 .cmd = HNAE3_DBG_CMD_NCL_CONFIG,
2496 .dbg_dump = hclge_dbg_dump_ncl_config,
2497 },
2498 {
2499 .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
2500 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2501 },
2502 {
2503 .cmd = HNAE3_DBG_CMD_REG_SSU,
2504 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2505 },
2506 {
2507 .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
2508 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2509 },
2510 {
2511 .cmd = HNAE3_DBG_CMD_REG_RPU,
2512 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2513 },
2514 {
2515 .cmd = HNAE3_DBG_CMD_REG_NCSI,
2516 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2517 },
2518 {
2519 .cmd = HNAE3_DBG_CMD_REG_RTC,
2520 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2521 },
2522 {
2523 .cmd = HNAE3_DBG_CMD_REG_PPP,
2524 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2525 },
2526 {
2527 .cmd = HNAE3_DBG_CMD_REG_RCB,
2528 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2529 },
2530 {
2531 .cmd = HNAE3_DBG_CMD_REG_TQP,
2532 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2533 },
2534 {
2535 .cmd = HNAE3_DBG_CMD_REG_MAC,
2536 .dbg_dump = hclge_dbg_dump_mac,
2537 },
2538 {
2539 .cmd = HNAE3_DBG_CMD_REG_DCB,
2540 .dbg_dump = hclge_dbg_dump_dcb,
2541 },
2542 {
2543 .cmd = HNAE3_DBG_CMD_FD_TCAM,
2544 .dbg_dump = hclge_dbg_dump_fd_tcam,
2545 },
2546 {
2547 .cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
2548 .dbg_dump = hclge_dbg_dump_mac_tnl_status,
2549 },
2550 {
2551 .cmd = HNAE3_DBG_CMD_SERV_INFO,
2552 .dbg_dump = hclge_dbg_dump_serv_info,
2553 },
2554 {
2555 .cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
2556 .dbg_dump = hclge_dbg_dump_vlan_config,
2557 },
2558 {
2559 .cmd = HNAE3_DBG_CMD_FD_COUNTER,
2560 .dbg_dump = hclge_dbg_dump_fd_counter,
2561 },
2562 {
2563 .cmd = HNAE3_DBG_CMD_UMV_INFO,
2564 .dbg_dump = hclge_dbg_dump_umv_info,
2565 },
2566 };
2567
hclge_dbg_read_cmd(struct hnae3_handle * handle,enum hnae3_dbg_cmd cmd,char * buf,int len)2568 int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
2569 char *buf, int len)
2570 {
2571 struct hclge_vport *vport = hclge_get_vport(handle);
2572 const struct hclge_dbg_func *cmd_func;
2573 struct hclge_dev *hdev = vport->back;
2574 u32 i;
2575
2576 for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
2577 if (cmd == hclge_dbg_cmd_func[i].cmd) {
2578 cmd_func = &hclge_dbg_cmd_func[i];
2579 if (cmd_func->dbg_dump)
2580 return cmd_func->dbg_dump(hdev, buf, len);
2581 else
2582 return cmd_func->dbg_dump_reg(hdev, cmd, buf,
2583 len);
2584 }
2585 }
2586
2587 dev_err(&hdev->pdev->dev, "invalid command(%d)\n", cmd);
2588 return -EINVAL;
2589 }
2590