1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2019 Marvell.
5  *
6  */
7 
8 #ifdef CONFIG_DEBUG_FS
9 
10 #include <linux/fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 #include "npc.h"
21 #include "rvu_npc_hash.h"
22 #include "mcs.h"
23 
24 #define DEBUGFS_DIR_NAME "octeontx2"
25 
26 enum {
27 	CGX_STAT0,
28 	CGX_STAT1,
29 	CGX_STAT2,
30 	CGX_STAT3,
31 	CGX_STAT4,
32 	CGX_STAT5,
33 	CGX_STAT6,
34 	CGX_STAT7,
35 	CGX_STAT8,
36 	CGX_STAT9,
37 	CGX_STAT10,
38 	CGX_STAT11,
39 	CGX_STAT12,
40 	CGX_STAT13,
41 	CGX_STAT14,
42 	CGX_STAT15,
43 	CGX_STAT16,
44 	CGX_STAT17,
45 	CGX_STAT18,
46 };
47 
48 /* NIX TX stats */
49 enum nix_stat_lf_tx {
50 	TX_UCAST	= 0x0,
51 	TX_BCAST	= 0x1,
52 	TX_MCAST	= 0x2,
53 	TX_DROP		= 0x3,
54 	TX_OCTS		= 0x4,
55 	TX_STATS_ENUM_LAST,
56 };
57 
58 /* NIX RX stats */
59 enum nix_stat_lf_rx {
60 	RX_OCTS		= 0x0,
61 	RX_UCAST	= 0x1,
62 	RX_BCAST	= 0x2,
63 	RX_MCAST	= 0x3,
64 	RX_DROP		= 0x4,
65 	RX_DROP_OCTS	= 0x5,
66 	RX_FCS		= 0x6,
67 	RX_ERR		= 0x7,
68 	RX_DRP_BCAST	= 0x8,
69 	RX_DRP_MCAST	= 0x9,
70 	RX_DRP_L3BCAST	= 0xa,
71 	RX_DRP_L3MCAST	= 0xb,
72 	RX_STATS_ENUM_LAST,
73 };
74 
75 static char *cgx_rx_stats_fields[] = {
76 	[CGX_STAT0]	= "Received packets",
77 	[CGX_STAT1]	= "Octets of received packets",
78 	[CGX_STAT2]	= "Received PAUSE packets",
79 	[CGX_STAT3]	= "Received PAUSE and control packets",
80 	[CGX_STAT4]	= "Filtered DMAC0 (NIX-bound) packets",
81 	[CGX_STAT5]	= "Filtered DMAC0 (NIX-bound) octets",
82 	[CGX_STAT6]	= "Packets dropped due to RX FIFO full",
83 	[CGX_STAT7]	= "Octets dropped due to RX FIFO full",
84 	[CGX_STAT8]	= "Error packets",
85 	[CGX_STAT9]	= "Filtered DMAC1 (NCSI-bound) packets",
86 	[CGX_STAT10]	= "Filtered DMAC1 (NCSI-bound) octets",
87 	[CGX_STAT11]	= "NCSI-bound packets dropped",
88 	[CGX_STAT12]	= "NCSI-bound octets dropped",
89 };
90 
91 static char *cgx_tx_stats_fields[] = {
92 	[CGX_STAT0]	= "Packets dropped due to excessive collisions",
93 	[CGX_STAT1]	= "Packets dropped due to excessive deferral",
94 	[CGX_STAT2]	= "Multiple collisions before successful transmission",
95 	[CGX_STAT3]	= "Single collisions before successful transmission",
96 	[CGX_STAT4]	= "Total octets sent on the interface",
97 	[CGX_STAT5]	= "Total frames sent on the interface",
98 	[CGX_STAT6]	= "Packets sent with an octet count < 64",
99 	[CGX_STAT7]	= "Packets sent with an octet count == 64",
100 	[CGX_STAT8]	= "Packets sent with an octet count of 65-127",
101 	[CGX_STAT9]	= "Packets sent with an octet count of 128-255",
102 	[CGX_STAT10]	= "Packets sent with an octet count of 256-511",
103 	[CGX_STAT11]	= "Packets sent with an octet count of 512-1023",
104 	[CGX_STAT12]	= "Packets sent with an octet count of 1024-1518",
105 	[CGX_STAT13]	= "Packets sent with an octet count of > 1518",
106 	[CGX_STAT14]	= "Packets sent to a broadcast DMAC",
107 	[CGX_STAT15]	= "Packets sent to the multicast DMAC",
108 	[CGX_STAT16]	= "Transmit underflow and were truncated",
109 	[CGX_STAT17]	= "Control/PAUSE packets sent",
110 };
111 
112 static char *rpm_rx_stats_fields[] = {
113 	"Octets of received packets",
114 	"Octets of received packets with out error",
115 	"Received packets with alignment errors",
116 	"Control/PAUSE packets received",
117 	"Packets received with Frame too long Errors",
118 	"Packets received with a1nrange length Errors",
119 	"Received packets",
120 	"Packets received with FrameCheckSequenceErrors",
121 	"Packets received with VLAN header",
122 	"Error packets",
123 	"Packets received with unicast DMAC",
124 	"Packets received with multicast DMAC",
125 	"Packets received with broadcast DMAC",
126 	"Dropped packets",
127 	"Total frames received on interface",
128 	"Packets received with an octet count < 64",
129 	"Packets received with an octet count == 64",
130 	"Packets received with an octet count of 65-127",
131 	"Packets received with an octet count of 128-255",
132 	"Packets received with an octet count of 256-511",
133 	"Packets received with an octet count of 512-1023",
134 	"Packets received with an octet count of 1024-1518",
135 	"Packets received with an octet count of > 1518",
136 	"Oversized Packets",
137 	"Jabber Packets",
138 	"Fragmented Packets",
139 	"CBFC(class based flow control) pause frames received for class 0",
140 	"CBFC pause frames received for class 1",
141 	"CBFC pause frames received for class 2",
142 	"CBFC pause frames received for class 3",
143 	"CBFC pause frames received for class 4",
144 	"CBFC pause frames received for class 5",
145 	"CBFC pause frames received for class 6",
146 	"CBFC pause frames received for class 7",
147 	"CBFC pause frames received for class 8",
148 	"CBFC pause frames received for class 9",
149 	"CBFC pause frames received for class 10",
150 	"CBFC pause frames received for class 11",
151 	"CBFC pause frames received for class 12",
152 	"CBFC pause frames received for class 13",
153 	"CBFC pause frames received for class 14",
154 	"CBFC pause frames received for class 15",
155 	"MAC control packets received",
156 };
157 
158 static char *rpm_tx_stats_fields[] = {
159 	"Total octets sent on the interface",
160 	"Total octets transmitted OK",
161 	"Control/Pause frames sent",
162 	"Total frames transmitted OK",
163 	"Total frames sent with VLAN header",
164 	"Error Packets",
165 	"Packets sent to unicast DMAC",
166 	"Packets sent to the multicast DMAC",
167 	"Packets sent to a broadcast DMAC",
168 	"Packets sent with an octet count == 64",
169 	"Packets sent with an octet count of 65-127",
170 	"Packets sent with an octet count of 128-255",
171 	"Packets sent with an octet count of 256-511",
172 	"Packets sent with an octet count of 512-1023",
173 	"Packets sent with an octet count of 1024-1518",
174 	"Packets sent with an octet count of > 1518",
175 	"CBFC(class based flow control) pause frames transmitted for class 0",
176 	"CBFC pause frames transmitted for class 1",
177 	"CBFC pause frames transmitted for class 2",
178 	"CBFC pause frames transmitted for class 3",
179 	"CBFC pause frames transmitted for class 4",
180 	"CBFC pause frames transmitted for class 5",
181 	"CBFC pause frames transmitted for class 6",
182 	"CBFC pause frames transmitted for class 7",
183 	"CBFC pause frames transmitted for class 8",
184 	"CBFC pause frames transmitted for class 9",
185 	"CBFC pause frames transmitted for class 10",
186 	"CBFC pause frames transmitted for class 11",
187 	"CBFC pause frames transmitted for class 12",
188 	"CBFC pause frames transmitted for class 13",
189 	"CBFC pause frames transmitted for class 14",
190 	"CBFC pause frames transmitted for class 15",
191 	"MAC control packets sent",
192 	"Total frames sent on the interface"
193 };
194 
195 enum cpt_eng_type {
196 	CPT_AE_TYPE = 1,
197 	CPT_SE_TYPE = 2,
198 	CPT_IE_TYPE = 3,
199 };
200 
201 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
202 						blk_addr, NDC_AF_CONST) & 0xFF)
203 
204 #define rvu_dbg_NULL NULL
205 #define rvu_dbg_open_NULL NULL
206 
207 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)	\
208 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
209 { \
210 	return single_open(file, rvu_dbg_##read_op, inode->i_private); \
211 } \
212 static const struct file_operations rvu_dbg_##name##_fops = { \
213 	.owner		= THIS_MODULE, \
214 	.open		= rvu_dbg_open_##name, \
215 	.read		= seq_read, \
216 	.write		= rvu_dbg_##write_op, \
217 	.llseek		= seq_lseek, \
218 	.release	= single_release, \
219 }
220 
221 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
222 static const struct file_operations rvu_dbg_##name##_fops = { \
223 	.owner = THIS_MODULE, \
224 	.open = simple_open, \
225 	.read = rvu_dbg_##read_op, \
226 	.write = rvu_dbg_##write_op \
227 }
228 
229 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
230 
231 static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
232 {
233 	struct mcs *mcs = filp->private;
234 	struct mcs_port_stats stats;
235 	int lmac;
236 
237 	seq_puts(filp, "\n port stats\n");
238 	mutex_lock(&mcs->stats_lock);
239 	for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
240 		mcs_get_port_stats(mcs, &stats, lmac, dir);
241 		seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
242 		seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
243 
244 		if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
245 			seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
246 				   stats.preempt_err_cnt);
247 		if (dir == MCS_TX)
248 			seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
249 				   stats.sectag_insert_err_cnt);
250 	}
251 	mutex_unlock(&mcs->stats_lock);
252 	return 0;
253 }
254 
255 static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
256 {
257 	return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
258 }
259 
260 RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
261 
262 static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
263 {
264 	return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
265 }
266 
267 RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
268 
269 static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
270 {
271 	struct mcs *mcs = filp->private;
272 	struct mcs_sa_stats stats;
273 	struct rsrc_bmap *map;
274 	int sa_id;
275 
276 	if (dir == MCS_TX) {
277 		map = &mcs->tx.sa;
278 		mutex_lock(&mcs->stats_lock);
279 		for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
280 			seq_puts(filp, "\n TX SA stats\n");
281 			mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
282 			seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
283 				   stats.pkt_encrypt_cnt);
284 
285 			seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
286 				   stats.pkt_protected_cnt);
287 		}
288 		mutex_unlock(&mcs->stats_lock);
289 		return 0;
290 	}
291 
292 	/* RX stats */
293 	map = &mcs->rx.sa;
294 	mutex_lock(&mcs->stats_lock);
295 	for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
296 		seq_puts(filp, "\n RX SA stats\n");
297 		mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
298 		seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
299 		seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
300 		seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
301 		seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
302 		seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
303 	}
304 	mutex_unlock(&mcs->stats_lock);
305 	return 0;
306 }
307 
308 static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
309 {
310 	return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
311 }
312 
313 RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
314 
315 static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
316 {
317 	return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
318 }
319 
320 RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
321 
322 static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
323 {
324 	struct mcs *mcs = filp->private;
325 	struct mcs_sc_stats stats;
326 	struct rsrc_bmap *map;
327 	int sc_id;
328 
329 	map = &mcs->tx.sc;
330 	seq_puts(filp, "\n SC stats\n");
331 
332 	mutex_lock(&mcs->stats_lock);
333 	for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
334 		mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
335 		seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
336 		seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
337 		seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
338 
339 		if (mcs->hw->mcs_blks == 1) {
340 			seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
341 				   stats.octet_encrypt_cnt);
342 			seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
343 				   stats.octet_protected_cnt);
344 		}
345 	}
346 	mutex_unlock(&mcs->stats_lock);
347 	return 0;
348 }
349 
350 RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
351 
352 static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
353 {
354 	struct mcs *mcs = filp->private;
355 	struct mcs_sc_stats stats;
356 	struct rsrc_bmap *map;
357 	int sc_id;
358 
359 	map = &mcs->rx.sc;
360 	seq_puts(filp, "\n SC stats\n");
361 
362 	mutex_lock(&mcs->stats_lock);
363 	for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
364 		mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
365 		seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
366 		seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
367 		seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
368 		seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
369 		seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
370 		seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
371 
372 		if (mcs->hw->mcs_blks > 1) {
373 			seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
374 			seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
375 		}
376 		if (mcs->hw->mcs_blks == 1) {
377 			seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
378 				   stats.octet_decrypt_cnt);
379 			seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
380 				   stats.octet_validate_cnt);
381 		}
382 	}
383 	mutex_unlock(&mcs->stats_lock);
384 	return 0;
385 }
386 
387 RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
388 
389 static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
390 {
391 	struct mcs *mcs = filp->private;
392 	struct mcs_flowid_stats stats;
393 	struct rsrc_bmap *map;
394 	int flow_id;
395 
396 	seq_puts(filp, "\n Flowid stats\n");
397 
398 	if (dir == MCS_RX)
399 		map = &mcs->rx.flow_ids;
400 	else
401 		map = &mcs->tx.flow_ids;
402 
403 	mutex_lock(&mcs->stats_lock);
404 	for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
405 		mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
406 		seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
407 	}
408 	mutex_unlock(&mcs->stats_lock);
409 	return 0;
410 }
411 
412 static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
413 {
414 	return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
415 }
416 
417 RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
418 
419 static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
420 {
421 	return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
422 }
423 
424 RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
425 
426 static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
427 {
428 	struct mcs *mcs = filp->private;
429 	struct mcs_secy_stats stats;
430 	struct rsrc_bmap *map;
431 	int secy_id;
432 
433 	map = &mcs->tx.secy;
434 	seq_puts(filp, "\n MCS TX secy stats\n");
435 
436 	mutex_lock(&mcs->stats_lock);
437 	for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
438 		mcs_get_tx_secy_stats(mcs, &stats, secy_id);
439 		seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
440 		seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
441 			   stats.ctl_pkt_bcast_cnt);
442 		seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
443 			   stats.ctl_pkt_mcast_cnt);
444 		seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
445 			   stats.ctl_pkt_ucast_cnt);
446 		seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
447 		seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
448 			   stats.unctl_pkt_bcast_cnt);
449 		seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
450 			   stats.unctl_pkt_mcast_cnt);
451 		seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
452 			   stats.unctl_pkt_ucast_cnt);
453 		seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
454 		seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
455 			   stats.octet_encrypted_cnt);
456 		seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
457 			   stats.octet_protected_cnt);
458 		seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
459 			   stats.pkt_noactivesa_cnt);
460 		seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
461 		seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
462 	}
463 	mutex_unlock(&mcs->stats_lock);
464 	return 0;
465 }
466 
467 RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
468 
469 static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
470 {
471 	struct mcs *mcs = filp->private;
472 	struct mcs_secy_stats stats;
473 	struct rsrc_bmap *map;
474 	int secy_id;
475 
476 	map = &mcs->rx.secy;
477 	seq_puts(filp, "\n MCS secy stats\n");
478 
479 	mutex_lock(&mcs->stats_lock);
480 	for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
481 		mcs_get_rx_secy_stats(mcs, &stats, secy_id);
482 		seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
483 		seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
484 			   stats.ctl_pkt_bcast_cnt);
485 		seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
486 			   stats.ctl_pkt_mcast_cnt);
487 		seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
488 			   stats.ctl_pkt_ucast_cnt);
489 		seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
490 		seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
491 			   stats.unctl_pkt_bcast_cnt);
492 		seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
493 			   stats.unctl_pkt_mcast_cnt);
494 		seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
495 			   stats.unctl_pkt_ucast_cnt);
496 		seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
497 		seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
498 			   stats.octet_decrypted_cnt);
499 		seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
500 			   stats.octet_validated_cnt);
501 		seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
502 			   stats.pkt_port_disabled_cnt);
503 		seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_badtag_cnt);
504 		seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_nosa_cnt);
505 		seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
506 			   stats.pkt_nosaerror_cnt);
507 		seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
508 			   stats.pkt_tagged_ctl_cnt);
509 		seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
510 		seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
511 		if (mcs->hw->mcs_blks > 1)
512 			seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
513 				   stats.pkt_notag_cnt);
514 	}
515 	mutex_unlock(&mcs->stats_lock);
516 	return 0;
517 }
518 
519 RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
520 
521 static void rvu_dbg_mcs_init(struct rvu *rvu)
522 {
523 	struct mcs *mcs;
524 	char dname[10];
525 	int i;
526 
527 	if (!rvu->mcs_blk_cnt)
528 		return;
529 
530 	rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
531 
532 	for (i = 0; i < rvu->mcs_blk_cnt; i++) {
533 		mcs = mcs_get_pdata(i);
534 
535 		sprintf(dname, "mcs%d", i);
536 		rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
537 						      rvu->rvu_dbg.mcs_root);
538 
539 		rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
540 
541 		debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
542 				    &rvu_dbg_mcs_rx_flowid_stats_fops);
543 
544 		debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
545 				    &rvu_dbg_mcs_rx_secy_stats_fops);
546 
547 		debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
548 				    &rvu_dbg_mcs_rx_sc_stats_fops);
549 
550 		debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
551 				    &rvu_dbg_mcs_rx_sa_stats_fops);
552 
553 		debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
554 				    &rvu_dbg_mcs_rx_port_stats_fops);
555 
556 		rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
557 
558 		debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
559 				    &rvu_dbg_mcs_tx_flowid_stats_fops);
560 
561 		debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
562 				    &rvu_dbg_mcs_tx_secy_stats_fops);
563 
564 		debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
565 				    &rvu_dbg_mcs_tx_sc_stats_fops);
566 
567 		debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
568 				    &rvu_dbg_mcs_tx_sa_stats_fops);
569 
570 		debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
571 				    &rvu_dbg_mcs_tx_port_stats_fops);
572 	}
573 }
574 
575 #define LMT_MAPTBL_ENTRY_SIZE 16
576 /* Dump LMTST map table */
577 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
578 					       char __user *buffer,
579 					       size_t count, loff_t *ppos)
580 {
581 	struct rvu *rvu = filp->private_data;
582 	u64 lmt_addr, val, tbl_base;
583 	int pf, vf, num_vfs, hw_vfs;
584 	void __iomem *lmt_map_base;
585 	int buf_size = 10240;
586 	size_t off = 0;
587 	int index = 0;
588 	char *buf;
589 	int ret;
590 
591 	/* don't allow partial reads */
592 	if (*ppos != 0)
593 		return 0;
594 
595 	buf = kzalloc(buf_size, GFP_KERNEL);
596 	if (!buf)
597 		return -ENOMEM;
598 
599 	tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
600 
601 	lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
602 	if (!lmt_map_base) {
603 		dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
604 		kfree(buf);
605 		return false;
606 	}
607 
608 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
609 			  "\n\t\t\t\t\tLmtst Map Table Entries");
610 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
611 			  "\n\t\t\t\t\t=======================");
612 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
613 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
614 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
615 			  "Lmtline Base (word 0)\t\t");
616 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
617 			  "Lmt Map Entry (word 1)");
618 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
619 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
620 		off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d  \t\t\t",
621 				    pf);
622 
623 		index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
624 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
625 				 (tbl_base + index));
626 		lmt_addr = readq(lmt_map_base + index);
627 		off += scnprintf(&buf[off], buf_size - 1 - off,
628 				 " 0x%016llx\t\t", lmt_addr);
629 		index += 8;
630 		val = readq(lmt_map_base + index);
631 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
632 				 val);
633 		/* Reading num of VFs per PF */
634 		rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
635 		for (vf = 0; vf < num_vfs; vf++) {
636 			index = (pf * rvu->hw->total_vfs * 16) +
637 				((vf + 1)  * LMT_MAPTBL_ENTRY_SIZE);
638 			off += scnprintf(&buf[off], buf_size - 1 - off,
639 					    "PF%d:VF%d  \t\t", pf, vf);
640 			off += scnprintf(&buf[off], buf_size - 1 - off,
641 					 " 0x%llx\t\t", (tbl_base + index));
642 			lmt_addr = readq(lmt_map_base + index);
643 			off += scnprintf(&buf[off], buf_size - 1 - off,
644 					 " 0x%016llx\t\t", lmt_addr);
645 			index += 8;
646 			val = readq(lmt_map_base + index);
647 			off += scnprintf(&buf[off], buf_size - 1 - off,
648 					 " 0x%016llx\n", val);
649 		}
650 	}
651 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\n");
652 
653 	ret = min(off, count);
654 	if (copy_to_user(buffer, buf, ret))
655 		ret = -EFAULT;
656 	kfree(buf);
657 
658 	iounmap(lmt_map_base);
659 	if (ret < 0)
660 		return ret;
661 
662 	*ppos = ret;
663 	return ret;
664 }
665 
666 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
667 
668 static void get_lf_str_list(struct rvu_block block, int pcifunc,
669 			    char *lfs)
670 {
671 	int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
672 
673 	for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
674 		if (lf >= block.lf.max)
675 			break;
676 
677 		if (block.fn_map[lf] != pcifunc)
678 			continue;
679 
680 		if (lf == prev_lf + 1) {
681 			prev_lf = lf;
682 			seq = 1;
683 			continue;
684 		}
685 
686 		if (seq)
687 			len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
688 		else
689 			len += (len ? sprintf(lfs + len, ",%d", lf) :
690 				      sprintf(lfs + len, "%d", lf));
691 
692 		prev_lf = lf;
693 		seq = 0;
694 	}
695 
696 	if (seq)
697 		len += sprintf(lfs + len, "-%d", prev_lf);
698 
699 	lfs[len] = '\0';
700 }
701 
702 static int get_max_column_width(struct rvu *rvu)
703 {
704 	int index, pf, vf, lf_str_size = 12, buf_size = 256;
705 	struct rvu_block block;
706 	u16 pcifunc;
707 	char *buf;
708 
709 	buf = kzalloc(buf_size, GFP_KERNEL);
710 	if (!buf)
711 		return -ENOMEM;
712 
713 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
714 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
715 			pcifunc = pf << 10 | vf;
716 			if (!pcifunc)
717 				continue;
718 
719 			for (index = 0; index < BLK_COUNT; index++) {
720 				block = rvu->hw->block[index];
721 				if (!strlen(block.name))
722 					continue;
723 
724 				get_lf_str_list(block, pcifunc, buf);
725 				if (lf_str_size <= strlen(buf))
726 					lf_str_size = strlen(buf) + 1;
727 			}
728 		}
729 	}
730 
731 	kfree(buf);
732 	return lf_str_size;
733 }
734 
735 /* Dumps current provisioning status of all RVU block LFs */
736 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
737 					  char __user *buffer,
738 					  size_t count, loff_t *ppos)
739 {
740 	int index, off = 0, flag = 0, len = 0, i = 0;
741 	struct rvu *rvu = filp->private_data;
742 	int bytes_not_copied = 0;
743 	struct rvu_block block;
744 	int pf, vf, pcifunc;
745 	int buf_size = 2048;
746 	int lf_str_size;
747 	char *lfs;
748 	char *buf;
749 
750 	/* don't allow partial reads */
751 	if (*ppos != 0)
752 		return 0;
753 
754 	buf = kzalloc(buf_size, GFP_KERNEL);
755 	if (!buf)
756 		return -ENOMEM;
757 
758 	/* Get the maximum width of a column */
759 	lf_str_size = get_max_column_width(rvu);
760 
761 	lfs = kzalloc(lf_str_size, GFP_KERNEL);
762 	if (!lfs) {
763 		kfree(buf);
764 		return -ENOMEM;
765 	}
766 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
767 			  "pcifunc");
768 	for (index = 0; index < BLK_COUNT; index++)
769 		if (strlen(rvu->hw->block[index].name)) {
770 			off += scnprintf(&buf[off], buf_size - 1 - off,
771 					 "%-*s", lf_str_size,
772 					 rvu->hw->block[index].name);
773 		}
774 
775 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
776 	bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
777 	if (bytes_not_copied)
778 		goto out;
779 
780 	i++;
781 	*ppos += off;
782 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
783 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
784 			off = 0;
785 			flag = 0;
786 			pcifunc = pf << 10 | vf;
787 			if (!pcifunc)
788 				continue;
789 
790 			if (vf) {
791 				sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
792 				off = scnprintf(&buf[off],
793 						buf_size - 1 - off,
794 						"%-*s", lf_str_size, lfs);
795 			} else {
796 				sprintf(lfs, "PF%d", pf);
797 				off = scnprintf(&buf[off],
798 						buf_size - 1 - off,
799 						"%-*s", lf_str_size, lfs);
800 			}
801 
802 			for (index = 0; index < BLK_COUNT; index++) {
803 				block = rvu->hw->block[index];
804 				if (!strlen(block.name))
805 					continue;
806 				len = 0;
807 				lfs[len] = '\0';
808 				get_lf_str_list(block, pcifunc, lfs);
809 				if (strlen(lfs))
810 					flag = 1;
811 
812 				off += scnprintf(&buf[off], buf_size - 1 - off,
813 						 "%-*s", lf_str_size, lfs);
814 			}
815 			if (flag) {
816 				off +=	scnprintf(&buf[off],
817 						  buf_size - 1 - off, "\n");
818 				bytes_not_copied = copy_to_user(buffer +
819 								(i * off),
820 								buf, off);
821 				if (bytes_not_copied)
822 					goto out;
823 
824 				i++;
825 				*ppos += off;
826 			}
827 		}
828 	}
829 
830 out:
831 	kfree(lfs);
832 	kfree(buf);
833 	if (bytes_not_copied)
834 		return -EFAULT;
835 
836 	return *ppos;
837 }
838 
839 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
840 
841 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
842 {
843 	struct rvu *rvu = filp->private;
844 	struct pci_dev *pdev = NULL;
845 	struct mac_ops *mac_ops;
846 	char cgx[10], lmac[10];
847 	struct rvu_pfvf *pfvf;
848 	int pf, domain, blkid;
849 	u8 cgx_id, lmac_id;
850 	u16 pcifunc;
851 
852 	domain = 2;
853 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
854 	/* There can be no CGX devices at all */
855 	if (!mac_ops)
856 		return 0;
857 	seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
858 		   mac_ops->name);
859 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
860 		if (!is_pf_cgxmapped(rvu, pf))
861 			continue;
862 
863 		pdev =  pci_get_domain_bus_and_slot(domain, pf + 1, 0);
864 		if (!pdev)
865 			continue;
866 
867 		cgx[0] = 0;
868 		lmac[0] = 0;
869 		pcifunc = pf << 10;
870 		pfvf = rvu_get_pfvf(rvu, pcifunc);
871 
872 		if (pfvf->nix_blkaddr == BLKADDR_NIX0)
873 			blkid = 0;
874 		else
875 			blkid = 1;
876 
877 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
878 				    &lmac_id);
879 		sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
880 		sprintf(lmac, "LMAC%d", lmac_id);
881 		seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
882 			   dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
883 	}
884 	return 0;
885 }
886 
887 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
888 
889 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
890 				u16 *pcifunc)
891 {
892 	struct rvu_block *block;
893 	struct rvu_hwinfo *hw;
894 
895 	hw = rvu->hw;
896 	block = &hw->block[blkaddr];
897 
898 	if (lf < 0 || lf >= block->lf.max) {
899 		dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
900 			 block->lf.max - 1);
901 		return false;
902 	}
903 
904 	*pcifunc = block->fn_map[lf];
905 	if (!*pcifunc) {
906 		dev_warn(rvu->dev,
907 			 "This LF is not attached to any RVU PFFUNC\n");
908 		return false;
909 	}
910 	return true;
911 }
912 
913 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
914 {
915 	char *buf;
916 
917 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
918 	if (!buf)
919 		return;
920 
921 	if (!pfvf->aura_ctx) {
922 		seq_puts(m, "Aura context is not initialized\n");
923 	} else {
924 		bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
925 					pfvf->aura_ctx->qsize);
926 		seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
927 		seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
928 	}
929 
930 	if (!pfvf->pool_ctx) {
931 		seq_puts(m, "Pool context is not initialized\n");
932 	} else {
933 		bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
934 					pfvf->pool_ctx->qsize);
935 		seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
936 		seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
937 	}
938 	kfree(buf);
939 }
940 
941 /* The 'qsize' entry dumps current Aura/Pool context Qsize
942  * and each context's current enable/disable status in a bitmap.
943  */
944 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
945 				 int blktype)
946 {
947 	void (*print_qsize)(struct seq_file *filp,
948 			    struct rvu_pfvf *pfvf) = NULL;
949 	struct dentry *current_dir;
950 	struct rvu_pfvf *pfvf;
951 	struct rvu *rvu;
952 	int qsize_id;
953 	u16 pcifunc;
954 	int blkaddr;
955 
956 	rvu = filp->private;
957 	switch (blktype) {
958 	case BLKTYPE_NPA:
959 		qsize_id = rvu->rvu_dbg.npa_qsize_id;
960 		print_qsize = print_npa_qsize;
961 		break;
962 
963 	case BLKTYPE_NIX:
964 		qsize_id = rvu->rvu_dbg.nix_qsize_id;
965 		print_qsize = print_nix_qsize;
966 		break;
967 
968 	default:
969 		return -EINVAL;
970 	}
971 
972 	if (blktype == BLKTYPE_NPA) {
973 		blkaddr = BLKADDR_NPA;
974 	} else {
975 		current_dir = filp->file->f_path.dentry->d_parent;
976 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
977 				   BLKADDR_NIX1 : BLKADDR_NIX0);
978 	}
979 
980 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
981 		return -EINVAL;
982 
983 	pfvf = rvu_get_pfvf(rvu, pcifunc);
984 	print_qsize(filp, pfvf);
985 
986 	return 0;
987 }
988 
989 static ssize_t rvu_dbg_qsize_write(struct file *filp,
990 				   const char __user *buffer, size_t count,
991 				   loff_t *ppos, int blktype)
992 {
993 	char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
994 	struct seq_file *seqfile = filp->private_data;
995 	char *cmd_buf, *cmd_buf_tmp, *subtoken;
996 	struct rvu *rvu = seqfile->private;
997 	struct dentry *current_dir;
998 	int blkaddr;
999 	u16 pcifunc;
1000 	int ret, lf;
1001 
1002 	cmd_buf = memdup_user(buffer, count + 1);
1003 	if (IS_ERR(cmd_buf))
1004 		return -ENOMEM;
1005 
1006 	cmd_buf[count] = '\0';
1007 
1008 	cmd_buf_tmp = strchr(cmd_buf, '\n');
1009 	if (cmd_buf_tmp) {
1010 		*cmd_buf_tmp = '\0';
1011 		count = cmd_buf_tmp - cmd_buf + 1;
1012 	}
1013 
1014 	cmd_buf_tmp = cmd_buf;
1015 	subtoken = strsep(&cmd_buf, " ");
1016 	ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
1017 	if (cmd_buf)
1018 		ret = -EINVAL;
1019 
1020 	if (ret < 0 || !strncmp(subtoken, "help", 4)) {
1021 		dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
1022 		goto qsize_write_done;
1023 	}
1024 
1025 	if (blktype == BLKTYPE_NPA) {
1026 		blkaddr = BLKADDR_NPA;
1027 	} else {
1028 		current_dir = filp->f_path.dentry->d_parent;
1029 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
1030 				   BLKADDR_NIX1 : BLKADDR_NIX0);
1031 	}
1032 
1033 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
1034 		ret = -EINVAL;
1035 		goto qsize_write_done;
1036 	}
1037 	if (blktype  == BLKTYPE_NPA)
1038 		rvu->rvu_dbg.npa_qsize_id = lf;
1039 	else
1040 		rvu->rvu_dbg.nix_qsize_id = lf;
1041 
1042 qsize_write_done:
1043 	kfree(cmd_buf_tmp);
1044 	return ret ? ret : count;
1045 }
1046 
1047 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
1048 				       const char __user *buffer,
1049 				       size_t count, loff_t *ppos)
1050 {
1051 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1052 					    BLKTYPE_NPA);
1053 }
1054 
1055 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
1056 {
1057 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
1058 }
1059 
1060 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
1061 
1062 /* Dumps given NPA Aura's context */
1063 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1064 {
1065 	struct npa_aura_s *aura = &rsp->aura;
1066 	struct rvu *rvu = m->private;
1067 
1068 	seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
1069 
1070 	seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
1071 		   aura->ena, aura->pool_caching);
1072 	seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
1073 		   aura->pool_way_mask, aura->avg_con);
1074 	seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
1075 		   aura->pool_drop_ena, aura->aura_drop_ena);
1076 	seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
1077 		   aura->bp_ena, aura->aura_drop);
1078 	seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
1079 		   aura->shift, aura->avg_level);
1080 
1081 	seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
1082 		   (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
1083 
1084 	seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
1085 		   (u64)aura->limit, aura->bp, aura->fc_ena);
1086 
1087 	if (!is_rvu_otx2(rvu))
1088 		seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
1089 	seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
1090 		   aura->fc_up_crossing, aura->fc_stype);
1091 	seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
1092 
1093 	seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
1094 
1095 	seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
1096 		   aura->pool_drop, aura->update_time);
1097 	seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
1098 		   aura->err_int, aura->err_int_ena);
1099 	seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
1100 		   aura->thresh_int, aura->thresh_int_ena);
1101 	seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
1102 		   aura->thresh_up, aura->thresh_qint_idx);
1103 	seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
1104 
1105 	seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
1106 	if (!is_rvu_otx2(rvu))
1107 		seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
1108 }
1109 
1110 /* Dumps given NPA Pool's context */
1111 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1112 {
1113 	struct npa_pool_s *pool = &rsp->pool;
1114 	struct rvu *rvu = m->private;
1115 
1116 	seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
1117 
1118 	seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
1119 		   pool->ena, pool->nat_align);
1120 	seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
1121 		   pool->stack_caching, pool->stack_way_mask);
1122 	seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
1123 		   pool->buf_offset, pool->buf_size);
1124 
1125 	seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
1126 		   pool->stack_max_pages, pool->stack_pages);
1127 
1128 	seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
1129 
1130 	seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
1131 		   pool->stack_offset, pool->shift, pool->avg_level);
1132 	seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
1133 		   pool->avg_con, pool->fc_ena, pool->fc_stype);
1134 	seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
1135 		   pool->fc_hyst_bits, pool->fc_up_crossing);
1136 	if (!is_rvu_otx2(rvu))
1137 		seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
1138 	seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
1139 
1140 	seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
1141 
1142 	seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
1143 
1144 	seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
1145 
1146 	seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
1147 		   pool->err_int, pool->err_int_ena);
1148 	seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
1149 	seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
1150 		   pool->thresh_int_ena, pool->thresh_up);
1151 	seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
1152 		   pool->thresh_qint_idx, pool->err_qint_idx);
1153 	if (!is_rvu_otx2(rvu))
1154 		seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
1155 }
1156 
1157 /* Reads aura/pool's ctx from admin queue */
1158 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
1159 {
1160 	void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
1161 	struct npa_aq_enq_req aq_req;
1162 	struct npa_aq_enq_rsp rsp;
1163 	struct rvu_pfvf *pfvf;
1164 	int aura, rc, max_id;
1165 	int npalf, id, all;
1166 	struct rvu *rvu;
1167 	u16 pcifunc;
1168 
1169 	rvu = m->private;
1170 
1171 	switch (ctype) {
1172 	case NPA_AQ_CTYPE_AURA:
1173 		npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
1174 		id = rvu->rvu_dbg.npa_aura_ctx.id;
1175 		all = rvu->rvu_dbg.npa_aura_ctx.all;
1176 		break;
1177 
1178 	case NPA_AQ_CTYPE_POOL:
1179 		npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
1180 		id = rvu->rvu_dbg.npa_pool_ctx.id;
1181 		all = rvu->rvu_dbg.npa_pool_ctx.all;
1182 		break;
1183 	default:
1184 		return -EINVAL;
1185 	}
1186 
1187 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1188 		return -EINVAL;
1189 
1190 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1191 	if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
1192 		seq_puts(m, "Aura context is not initialized\n");
1193 		return -EINVAL;
1194 	} else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
1195 		seq_puts(m, "Pool context is not initialized\n");
1196 		return -EINVAL;
1197 	}
1198 
1199 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
1200 	aq_req.hdr.pcifunc = pcifunc;
1201 	aq_req.ctype = ctype;
1202 	aq_req.op = NPA_AQ_INSTOP_READ;
1203 	if (ctype == NPA_AQ_CTYPE_AURA) {
1204 		max_id = pfvf->aura_ctx->qsize;
1205 		print_npa_ctx = print_npa_aura_ctx;
1206 	} else {
1207 		max_id = pfvf->pool_ctx->qsize;
1208 		print_npa_ctx = print_npa_pool_ctx;
1209 	}
1210 
1211 	if (id < 0 || id >= max_id) {
1212 		seq_printf(m, "Invalid %s, valid range is 0-%d\n",
1213 			   (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1214 			max_id - 1);
1215 		return -EINVAL;
1216 	}
1217 
1218 	if (all)
1219 		id = 0;
1220 	else
1221 		max_id = id + 1;
1222 
1223 	for (aura = id; aura < max_id; aura++) {
1224 		aq_req.aura_id = aura;
1225 		seq_printf(m, "======%s : %d=======\n",
1226 			   (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
1227 			aq_req.aura_id);
1228 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
1229 		if (rc) {
1230 			seq_puts(m, "Failed to read context\n");
1231 			return -EINVAL;
1232 		}
1233 		print_npa_ctx(m, &rsp);
1234 	}
1235 	return 0;
1236 }
1237 
1238 static int write_npa_ctx(struct rvu *rvu, bool all,
1239 			 int npalf, int id, int ctype)
1240 {
1241 	struct rvu_pfvf *pfvf;
1242 	int max_id = 0;
1243 	u16 pcifunc;
1244 
1245 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1246 		return -EINVAL;
1247 
1248 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1249 
1250 	if (ctype == NPA_AQ_CTYPE_AURA) {
1251 		if (!pfvf->aura_ctx) {
1252 			dev_warn(rvu->dev, "Aura context is not initialized\n");
1253 			return -EINVAL;
1254 		}
1255 		max_id = pfvf->aura_ctx->qsize;
1256 	} else if (ctype == NPA_AQ_CTYPE_POOL) {
1257 		if (!pfvf->pool_ctx) {
1258 			dev_warn(rvu->dev, "Pool context is not initialized\n");
1259 			return -EINVAL;
1260 		}
1261 		max_id = pfvf->pool_ctx->qsize;
1262 	}
1263 
1264 	if (id < 0 || id >= max_id) {
1265 		dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
1266 			 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1267 			max_id - 1);
1268 		return -EINVAL;
1269 	}
1270 
1271 	switch (ctype) {
1272 	case NPA_AQ_CTYPE_AURA:
1273 		rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
1274 		rvu->rvu_dbg.npa_aura_ctx.id = id;
1275 		rvu->rvu_dbg.npa_aura_ctx.all = all;
1276 		break;
1277 
1278 	case NPA_AQ_CTYPE_POOL:
1279 		rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
1280 		rvu->rvu_dbg.npa_pool_ctx.id = id;
1281 		rvu->rvu_dbg.npa_pool_ctx.all = all;
1282 		break;
1283 	default:
1284 		return -EINVAL;
1285 	}
1286 	return 0;
1287 }
1288 
1289 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
1290 				const char __user *buffer, int *npalf,
1291 				int *id, bool *all)
1292 {
1293 	int bytes_not_copied;
1294 	char *cmd_buf_tmp;
1295 	char *subtoken;
1296 	int ret;
1297 
1298 	bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
1299 	if (bytes_not_copied)
1300 		return -EFAULT;
1301 
1302 	cmd_buf[*count] = '\0';
1303 	cmd_buf_tmp = strchr(cmd_buf, '\n');
1304 
1305 	if (cmd_buf_tmp) {
1306 		*cmd_buf_tmp = '\0';
1307 		*count = cmd_buf_tmp - cmd_buf + 1;
1308 	}
1309 
1310 	subtoken = strsep(&cmd_buf, " ");
1311 	ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
1312 	if (ret < 0)
1313 		return ret;
1314 	subtoken = strsep(&cmd_buf, " ");
1315 	if (subtoken && strcmp(subtoken, "all") == 0) {
1316 		*all = true;
1317 	} else {
1318 		ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
1319 		if (ret < 0)
1320 			return ret;
1321 	}
1322 	if (cmd_buf)
1323 		return -EINVAL;
1324 	return ret;
1325 }
1326 
1327 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
1328 				     const char __user *buffer,
1329 				     size_t count, loff_t *ppos, int ctype)
1330 {
1331 	char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
1332 					"aura" : "pool";
1333 	struct seq_file *seqfp = filp->private_data;
1334 	struct rvu *rvu = seqfp->private;
1335 	int npalf, id = 0, ret;
1336 	bool all = false;
1337 
1338 	if ((*ppos != 0) || !count)
1339 		return -EINVAL;
1340 
1341 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1342 	if (!cmd_buf)
1343 		return count;
1344 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1345 				   &npalf, &id, &all);
1346 	if (ret < 0) {
1347 		dev_info(rvu->dev,
1348 			 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1349 			 ctype_string, ctype_string);
1350 		goto done;
1351 	} else {
1352 		ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1353 	}
1354 done:
1355 	kfree(cmd_buf);
1356 	return ret ? ret : count;
1357 }
1358 
1359 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1360 					  const char __user *buffer,
1361 					  size_t count, loff_t *ppos)
1362 {
1363 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1364 				     NPA_AQ_CTYPE_AURA);
1365 }
1366 
1367 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1368 {
1369 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1370 }
1371 
1372 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1373 
1374 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1375 					  const char __user *buffer,
1376 					  size_t count, loff_t *ppos)
1377 {
1378 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1379 				     NPA_AQ_CTYPE_POOL);
1380 }
1381 
1382 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1383 {
1384 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1385 }
1386 
1387 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1388 
1389 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1390 			    int ctype, int transaction)
1391 {
1392 	u64 req, out_req, lat, cant_alloc;
1393 	struct nix_hw *nix_hw;
1394 	struct rvu *rvu;
1395 	int port;
1396 
1397 	if (blk_addr == BLKADDR_NDC_NPA0) {
1398 		rvu = s->private;
1399 	} else {
1400 		nix_hw = s->private;
1401 		rvu = nix_hw->rvu;
1402 	}
1403 
1404 	for (port = 0; port < NDC_MAX_PORT; port++) {
1405 		req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1406 						(port, ctype, transaction));
1407 		lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1408 						(port, ctype, transaction));
1409 		out_req = rvu_read64(rvu, blk_addr,
1410 				     NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1411 				     (port, ctype, transaction));
1412 		cant_alloc = rvu_read64(rvu, blk_addr,
1413 					NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1414 					(port, transaction));
1415 		seq_printf(s, "\nPort:%d\n", port);
1416 		seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1417 		seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1418 		seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1419 		seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1420 		seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1421 	}
1422 }
1423 
1424 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1425 {
1426 	seq_puts(s, "\n***** CACHE mode read stats *****\n");
1427 	ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1428 	seq_puts(s, "\n***** CACHE mode write stats *****\n");
1429 	ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1430 	seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1431 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1432 	seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1433 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1434 	return 0;
1435 }
1436 
1437 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1438 {
1439 	return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1440 }
1441 
1442 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1443 
1444 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1445 {
1446 	struct nix_hw *nix_hw;
1447 	struct rvu *rvu;
1448 	int bank, max_bank;
1449 
1450 	if (blk_addr == BLKADDR_NDC_NPA0) {
1451 		rvu = s->private;
1452 	} else {
1453 		nix_hw = s->private;
1454 		rvu = nix_hw->rvu;
1455 	}
1456 
1457 	max_bank = NDC_MAX_BANK(rvu, blk_addr);
1458 	for (bank = 0; bank < max_bank; bank++) {
1459 		seq_printf(s, "BANK:%d\n", bank);
1460 		seq_printf(s, "\tHits:\t%lld\n",
1461 			   (u64)rvu_read64(rvu, blk_addr,
1462 			   NDC_AF_BANKX_HIT_PC(bank)));
1463 		seq_printf(s, "\tMiss:\t%lld\n",
1464 			   (u64)rvu_read64(rvu, blk_addr,
1465 			    NDC_AF_BANKX_MISS_PC(bank)));
1466 	}
1467 	return 0;
1468 }
1469 
1470 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1471 {
1472 	struct nix_hw *nix_hw = filp->private;
1473 	int blkaddr = 0;
1474 	int ndc_idx = 0;
1475 
1476 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1477 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1478 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1479 
1480 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1481 }
1482 
1483 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1484 
1485 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1486 {
1487 	struct nix_hw *nix_hw = filp->private;
1488 	int blkaddr = 0;
1489 	int ndc_idx = 0;
1490 
1491 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1492 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1493 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1494 
1495 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1496 }
1497 
1498 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1499 
1500 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1501 					     void *unused)
1502 {
1503 	return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1504 }
1505 
1506 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1507 
1508 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1509 						void *unused)
1510 {
1511 	struct nix_hw *nix_hw = filp->private;
1512 	int ndc_idx = NPA0_U;
1513 	int blkaddr = 0;
1514 
1515 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1516 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1517 
1518 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1519 }
1520 
1521 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1522 
1523 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1524 						void *unused)
1525 {
1526 	struct nix_hw *nix_hw = filp->private;
1527 	int ndc_idx = NPA0_U;
1528 	int blkaddr = 0;
1529 
1530 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1531 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1532 
1533 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1534 }
1535 
1536 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1537 
1538 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1539 				   struct nix_cn10k_sq_ctx_s *sq_ctx)
1540 {
1541 	seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1542 		   sq_ctx->ena, sq_ctx->qint_idx);
1543 	seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1544 		   sq_ctx->substream, sq_ctx->sdp_mcast);
1545 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1546 		   sq_ctx->cq, sq_ctx->sqe_way_mask);
1547 
1548 	seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1549 		   sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1550 	seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1551 		   sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1552 	seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1553 		   sq_ctx->default_chan, sq_ctx->sqb_count);
1554 
1555 	seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1556 	seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1557 	seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1558 		   sq_ctx->sqb_aura, sq_ctx->sq_int);
1559 	seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1560 		   sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1561 
1562 	seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1563 		   sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1564 	seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1565 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1566 	seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1567 		   sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1568 	seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1569 		   sq_ctx->tail_offset, sq_ctx->smenq_offset);
1570 	seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1571 		   sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1572 
1573 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1574 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1575 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1576 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1577 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1578 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1579 		   sq_ctx->smenq_next_sqb);
1580 
1581 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1582 
1583 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1584 	seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1585 		   sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1586 	seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1587 		   sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1588 	seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1589 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1590 
1591 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1592 		   (u64)sq_ctx->scm_lso_rem);
1593 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1594 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1595 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1596 		   (u64)sq_ctx->dropped_octs);
1597 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1598 		   (u64)sq_ctx->dropped_pkts);
1599 }
1600 
1601 /* Dumps given nix_sq's context */
1602 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1603 {
1604 	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1605 	struct nix_hw *nix_hw = m->private;
1606 	struct rvu *rvu = nix_hw->rvu;
1607 
1608 	if (!is_rvu_otx2(rvu)) {
1609 		print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1610 		return;
1611 	}
1612 	seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1613 		   sq_ctx->sqe_way_mask, sq_ctx->cq);
1614 	seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1615 		   sq_ctx->sdp_mcast, sq_ctx->substream);
1616 	seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1617 		   sq_ctx->qint_idx, sq_ctx->ena);
1618 
1619 	seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1620 		   sq_ctx->sqb_count, sq_ctx->default_chan);
1621 	seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1622 		   sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1623 	seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1624 		   sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1625 
1626 	seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1627 		   sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1628 	seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1629 		   sq_ctx->sq_int, sq_ctx->sqb_aura);
1630 	seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1631 
1632 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1633 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1634 	seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1635 		   sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1636 	seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1637 		   sq_ctx->smenq_offset, sq_ctx->tail_offset);
1638 	seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1639 		   sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1640 	seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1641 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1642 	seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1643 		   sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1644 
1645 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1646 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1647 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1648 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1649 		   sq_ctx->smenq_next_sqb);
1650 
1651 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1652 
1653 	seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1654 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1655 	seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1656 		   sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1657 	seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1658 		   sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1659 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1660 
1661 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1662 		   (u64)sq_ctx->scm_lso_rem);
1663 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1664 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1665 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1666 		   (u64)sq_ctx->dropped_octs);
1667 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1668 		   (u64)sq_ctx->dropped_pkts);
1669 }
1670 
1671 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1672 				   struct nix_cn10k_rq_ctx_s *rq_ctx)
1673 {
1674 	seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1675 		   rq_ctx->ena, rq_ctx->sso_ena);
1676 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1677 		   rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1678 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1679 		   rq_ctx->cq, rq_ctx->lenerr_dis);
1680 	seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1681 		   rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1682 	seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1683 		   rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1684 	seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1685 		   rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1686 	seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1687 
1688 	seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1689 		   rq_ctx->spb_aura, rq_ctx->lpb_aura);
1690 	seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1691 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1692 		   rq_ctx->sso_grp, rq_ctx->sso_tt);
1693 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1694 		   rq_ctx->pb_caching, rq_ctx->wqe_caching);
1695 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1696 		   rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1697 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1698 		   rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1699 	seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1700 		   rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1701 
1702 	seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1703 	seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1704 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1705 	seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1706 		   rq_ctx->wqe_skip, rq_ctx->spb_ena);
1707 	seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1708 		   rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1709 	seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1710 		   rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1711 	seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1712 		   rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1713 
1714 	seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1715 		   rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1716 	seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1717 		   rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1718 	seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1719 		   rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1720 	seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1721 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1722 
1723 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1724 		   rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1725 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1726 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1727 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1728 		   rq_ctx->rq_int, rq_ctx->rq_int_ena);
1729 	seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1730 
1731 	seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1732 		   rq_ctx->ltag, rq_ctx->good_utag);
1733 	seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1734 		   rq_ctx->bad_utag, rq_ctx->flow_tagw);
1735 	seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1736 		   rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1737 	seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1738 		   rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1739 	seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1740 
1741 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1742 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1743 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1744 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1745 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1746 }
1747 
1748 /* Dumps given nix_rq's context */
1749 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1750 {
1751 	struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1752 	struct nix_hw *nix_hw = m->private;
1753 	struct rvu *rvu = nix_hw->rvu;
1754 
1755 	if (!is_rvu_otx2(rvu)) {
1756 		print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1757 		return;
1758 	}
1759 
1760 	seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1761 		   rq_ctx->wqe_aura, rq_ctx->substream);
1762 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1763 		   rq_ctx->cq, rq_ctx->ena_wqwd);
1764 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1765 		   rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1766 	seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1767 
1768 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1769 		   rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1770 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1771 		   rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1772 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1773 		   rq_ctx->pb_caching, rq_ctx->sso_tt);
1774 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1775 		   rq_ctx->sso_grp, rq_ctx->lpb_aura);
1776 	seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1777 
1778 	seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1779 		   rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1780 	seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1781 		   rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1782 	seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1783 		   rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1784 	seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1785 		   rq_ctx->spb_ena, rq_ctx->wqe_skip);
1786 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1787 
1788 	seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1789 		   rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1790 	seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1791 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1792 	seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1793 		   rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1794 	seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1795 		   rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1796 
1797 	seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1798 		   rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1799 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1800 		   rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1801 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1802 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1803 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1804 
1805 	seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1806 		   rq_ctx->flow_tagw, rq_ctx->bad_utag);
1807 	seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1808 		   rq_ctx->good_utag, rq_ctx->ltag);
1809 
1810 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1811 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1812 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1813 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1814 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1815 }
1816 
1817 /* Dumps given nix_cq's context */
1818 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1819 {
1820 	struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1821 
1822 	seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1823 
1824 	seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1825 	seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1826 		   cq_ctx->avg_con, cq_ctx->cint_idx);
1827 	seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1828 		   cq_ctx->cq_err, cq_ctx->qint_idx);
1829 	seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1830 		   cq_ctx->bpid, cq_ctx->bp_ena);
1831 
1832 	seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1833 		   cq_ctx->update_time, cq_ctx->avg_level);
1834 	seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1835 		   cq_ctx->head, cq_ctx->tail);
1836 
1837 	seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1838 		   cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1839 	seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1840 		   cq_ctx->qsize, cq_ctx->caching);
1841 	seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1842 		   cq_ctx->substream, cq_ctx->ena);
1843 	seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1844 		   cq_ctx->drop_ena, cq_ctx->drop);
1845 	seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1846 }
1847 
1848 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1849 					 void *unused, int ctype)
1850 {
1851 	void (*print_nix_ctx)(struct seq_file *filp,
1852 			      struct nix_aq_enq_rsp *rsp) = NULL;
1853 	struct nix_hw *nix_hw = filp->private;
1854 	struct rvu *rvu = nix_hw->rvu;
1855 	struct nix_aq_enq_req aq_req;
1856 	struct nix_aq_enq_rsp rsp;
1857 	char *ctype_string = NULL;
1858 	int qidx, rc, max_id = 0;
1859 	struct rvu_pfvf *pfvf;
1860 	int nixlf, id, all;
1861 	u16 pcifunc;
1862 
1863 	switch (ctype) {
1864 	case NIX_AQ_CTYPE_CQ:
1865 		nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1866 		id = rvu->rvu_dbg.nix_cq_ctx.id;
1867 		all = rvu->rvu_dbg.nix_cq_ctx.all;
1868 		break;
1869 
1870 	case NIX_AQ_CTYPE_SQ:
1871 		nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1872 		id = rvu->rvu_dbg.nix_sq_ctx.id;
1873 		all = rvu->rvu_dbg.nix_sq_ctx.all;
1874 		break;
1875 
1876 	case NIX_AQ_CTYPE_RQ:
1877 		nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1878 		id = rvu->rvu_dbg.nix_rq_ctx.id;
1879 		all = rvu->rvu_dbg.nix_rq_ctx.all;
1880 		break;
1881 
1882 	default:
1883 		return -EINVAL;
1884 	}
1885 
1886 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1887 		return -EINVAL;
1888 
1889 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1890 	if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1891 		seq_puts(filp, "SQ context is not initialized\n");
1892 		return -EINVAL;
1893 	} else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1894 		seq_puts(filp, "RQ context is not initialized\n");
1895 		return -EINVAL;
1896 	} else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1897 		seq_puts(filp, "CQ context is not initialized\n");
1898 		return -EINVAL;
1899 	}
1900 
1901 	if (ctype == NIX_AQ_CTYPE_SQ) {
1902 		max_id = pfvf->sq_ctx->qsize;
1903 		ctype_string = "sq";
1904 		print_nix_ctx = print_nix_sq_ctx;
1905 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1906 		max_id = pfvf->rq_ctx->qsize;
1907 		ctype_string = "rq";
1908 		print_nix_ctx = print_nix_rq_ctx;
1909 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1910 		max_id = pfvf->cq_ctx->qsize;
1911 		ctype_string = "cq";
1912 		print_nix_ctx = print_nix_cq_ctx;
1913 	}
1914 
1915 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1916 	aq_req.hdr.pcifunc = pcifunc;
1917 	aq_req.ctype = ctype;
1918 	aq_req.op = NIX_AQ_INSTOP_READ;
1919 	if (all)
1920 		id = 0;
1921 	else
1922 		max_id = id + 1;
1923 	for (qidx = id; qidx < max_id; qidx++) {
1924 		aq_req.qidx = qidx;
1925 		seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1926 			   ctype_string, nixlf, aq_req.qidx);
1927 		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1928 		if (rc) {
1929 			seq_puts(filp, "Failed to read the context\n");
1930 			return -EINVAL;
1931 		}
1932 		print_nix_ctx(filp, &rsp);
1933 	}
1934 	return 0;
1935 }
1936 
1937 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1938 			       int id, int ctype, char *ctype_string,
1939 			       struct seq_file *m)
1940 {
1941 	struct nix_hw *nix_hw = m->private;
1942 	struct rvu_pfvf *pfvf;
1943 	int max_id = 0;
1944 	u16 pcifunc;
1945 
1946 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1947 		return -EINVAL;
1948 
1949 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1950 
1951 	if (ctype == NIX_AQ_CTYPE_SQ) {
1952 		if (!pfvf->sq_ctx) {
1953 			dev_warn(rvu->dev, "SQ context is not initialized\n");
1954 			return -EINVAL;
1955 		}
1956 		max_id = pfvf->sq_ctx->qsize;
1957 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1958 		if (!pfvf->rq_ctx) {
1959 			dev_warn(rvu->dev, "RQ context is not initialized\n");
1960 			return -EINVAL;
1961 		}
1962 		max_id = pfvf->rq_ctx->qsize;
1963 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1964 		if (!pfvf->cq_ctx) {
1965 			dev_warn(rvu->dev, "CQ context is not initialized\n");
1966 			return -EINVAL;
1967 		}
1968 		max_id = pfvf->cq_ctx->qsize;
1969 	}
1970 
1971 	if (id < 0 || id >= max_id) {
1972 		dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1973 			 ctype_string, max_id - 1);
1974 		return -EINVAL;
1975 	}
1976 	switch (ctype) {
1977 	case NIX_AQ_CTYPE_CQ:
1978 		rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1979 		rvu->rvu_dbg.nix_cq_ctx.id = id;
1980 		rvu->rvu_dbg.nix_cq_ctx.all = all;
1981 		break;
1982 
1983 	case NIX_AQ_CTYPE_SQ:
1984 		rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1985 		rvu->rvu_dbg.nix_sq_ctx.id = id;
1986 		rvu->rvu_dbg.nix_sq_ctx.all = all;
1987 		break;
1988 
1989 	case NIX_AQ_CTYPE_RQ:
1990 		rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1991 		rvu->rvu_dbg.nix_rq_ctx.id = id;
1992 		rvu->rvu_dbg.nix_rq_ctx.all = all;
1993 		break;
1994 	default:
1995 		return -EINVAL;
1996 	}
1997 	return 0;
1998 }
1999 
2000 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
2001 					   const char __user *buffer,
2002 					   size_t count, loff_t *ppos,
2003 					   int ctype)
2004 {
2005 	struct seq_file *m = filp->private_data;
2006 	struct nix_hw *nix_hw = m->private;
2007 	struct rvu *rvu = nix_hw->rvu;
2008 	char *cmd_buf, *ctype_string;
2009 	int nixlf, id = 0, ret;
2010 	bool all = false;
2011 
2012 	if ((*ppos != 0) || !count)
2013 		return -EINVAL;
2014 
2015 	switch (ctype) {
2016 	case NIX_AQ_CTYPE_SQ:
2017 		ctype_string = "sq";
2018 		break;
2019 	case NIX_AQ_CTYPE_RQ:
2020 		ctype_string = "rq";
2021 		break;
2022 	case NIX_AQ_CTYPE_CQ:
2023 		ctype_string = "cq";
2024 		break;
2025 	default:
2026 		return -EINVAL;
2027 	}
2028 
2029 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
2030 
2031 	if (!cmd_buf)
2032 		return count;
2033 
2034 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
2035 				   &nixlf, &id, &all);
2036 	if (ret < 0) {
2037 		dev_info(rvu->dev,
2038 			 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
2039 			 ctype_string, ctype_string);
2040 		goto done;
2041 	} else {
2042 		ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
2043 					  ctype_string, m);
2044 	}
2045 done:
2046 	kfree(cmd_buf);
2047 	return ret ? ret : count;
2048 }
2049 
2050 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
2051 					const char __user *buffer,
2052 					size_t count, loff_t *ppos)
2053 {
2054 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2055 					    NIX_AQ_CTYPE_SQ);
2056 }
2057 
2058 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
2059 {
2060 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
2061 }
2062 
2063 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
2064 
2065 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
2066 					const char __user *buffer,
2067 					size_t count, loff_t *ppos)
2068 {
2069 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2070 					    NIX_AQ_CTYPE_RQ);
2071 }
2072 
2073 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
2074 {
2075 	return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
2076 }
2077 
2078 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
2079 
2080 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
2081 					const char __user *buffer,
2082 					size_t count, loff_t *ppos)
2083 {
2084 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2085 					    NIX_AQ_CTYPE_CQ);
2086 }
2087 
2088 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
2089 {
2090 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
2091 }
2092 
2093 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
2094 
2095 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
2096 				 unsigned long *bmap, char *qtype)
2097 {
2098 	char *buf;
2099 
2100 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2101 	if (!buf)
2102 		return;
2103 
2104 	bitmap_print_to_pagebuf(false, buf, bmap, qsize);
2105 	seq_printf(filp, "%s context count : %d\n", qtype, qsize);
2106 	seq_printf(filp, "%s context ena/dis bitmap : %s\n",
2107 		   qtype, buf);
2108 	kfree(buf);
2109 }
2110 
2111 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
2112 {
2113 	if (!pfvf->cq_ctx)
2114 		seq_puts(filp, "cq context is not initialized\n");
2115 	else
2116 		print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
2117 				     "cq");
2118 
2119 	if (!pfvf->rq_ctx)
2120 		seq_puts(filp, "rq context is not initialized\n");
2121 	else
2122 		print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
2123 				     "rq");
2124 
2125 	if (!pfvf->sq_ctx)
2126 		seq_puts(filp, "sq context is not initialized\n");
2127 	else
2128 		print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
2129 				     "sq");
2130 }
2131 
2132 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
2133 				       const char __user *buffer,
2134 				       size_t count, loff_t *ppos)
2135 {
2136 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
2137 				   BLKTYPE_NIX);
2138 }
2139 
2140 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
2141 {
2142 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
2143 }
2144 
2145 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
2146 
2147 static void print_band_prof_ctx(struct seq_file *m,
2148 				struct nix_bandprof_s *prof)
2149 {
2150 	char *str;
2151 
2152 	switch (prof->pc_mode) {
2153 	case NIX_RX_PC_MODE_VLAN:
2154 		str = "VLAN";
2155 		break;
2156 	case NIX_RX_PC_MODE_DSCP:
2157 		str = "DSCP";
2158 		break;
2159 	case NIX_RX_PC_MODE_GEN:
2160 		str = "Generic";
2161 		break;
2162 	case NIX_RX_PC_MODE_RSVD:
2163 		str = "Reserved";
2164 		break;
2165 	}
2166 	seq_printf(m, "W0: pc_mode\t\t%s\n", str);
2167 	str = (prof->icolor == 3) ? "Color blind" :
2168 		(prof->icolor == 0) ? "Green" :
2169 		(prof->icolor == 1) ? "Yellow" : "Red";
2170 	seq_printf(m, "W0: icolor\t\t%s\n", str);
2171 	seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
2172 	seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
2173 	seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
2174 	seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
2175 	seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
2176 	seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
2177 	seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
2178 	seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
2179 
2180 	seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
2181 	str = (prof->lmode == 0) ? "byte" : "packet";
2182 	seq_printf(m, "W1: lmode\t\t%s\n", str);
2183 	seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
2184 	seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
2185 	seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
2186 	seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
2187 	str = (prof->gc_action == 0) ? "PASS" :
2188 		(prof->gc_action == 1) ? "DROP" : "RED";
2189 	seq_printf(m, "W1: gc_action\t\t%s\n", str);
2190 	str = (prof->yc_action == 0) ? "PASS" :
2191 		(prof->yc_action == 1) ? "DROP" : "RED";
2192 	seq_printf(m, "W1: yc_action\t\t%s\n", str);
2193 	str = (prof->rc_action == 0) ? "PASS" :
2194 		(prof->rc_action == 1) ? "DROP" : "RED";
2195 	seq_printf(m, "W1: rc_action\t\t%s\n", str);
2196 	seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
2197 	seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
2198 	seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
2199 
2200 	seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
2201 	seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
2202 	seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
2203 	seq_printf(m, "W4: green_pkt_pass\t%lld\n",
2204 		   (u64)prof->green_pkt_pass);
2205 	seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
2206 		   (u64)prof->yellow_pkt_pass);
2207 	seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
2208 	seq_printf(m, "W7: green_octs_pass\t%lld\n",
2209 		   (u64)prof->green_octs_pass);
2210 	seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
2211 		   (u64)prof->yellow_octs_pass);
2212 	seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
2213 	seq_printf(m, "W10: green_pkt_drop\t%lld\n",
2214 		   (u64)prof->green_pkt_drop);
2215 	seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
2216 		   (u64)prof->yellow_pkt_drop);
2217 	seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
2218 	seq_printf(m, "W13: green_octs_drop\t%lld\n",
2219 		   (u64)prof->green_octs_drop);
2220 	seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
2221 		   (u64)prof->yellow_octs_drop);
2222 	seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
2223 	seq_puts(m, "==============================\n");
2224 }
2225 
2226 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
2227 {
2228 	struct nix_hw *nix_hw = m->private;
2229 	struct nix_cn10k_aq_enq_req aq_req;
2230 	struct nix_cn10k_aq_enq_rsp aq_rsp;
2231 	struct rvu *rvu = nix_hw->rvu;
2232 	struct nix_ipolicer *ipolicer;
2233 	int layer, prof_idx, idx, rc;
2234 	u16 pcifunc;
2235 	char *str;
2236 
2237 	/* Ingress policers do not exist on all platforms */
2238 	if (!nix_hw->ipolicer)
2239 		return 0;
2240 
2241 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2242 		if (layer == BAND_PROF_INVAL_LAYER)
2243 			continue;
2244 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2245 			(layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
2246 
2247 		seq_printf(m, "\n%s bandwidth profiles\n", str);
2248 		seq_puts(m, "=======================\n");
2249 
2250 		ipolicer = &nix_hw->ipolicer[layer];
2251 
2252 		for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
2253 			if (is_rsrc_free(&ipolicer->band_prof, idx))
2254 				continue;
2255 
2256 			prof_idx = (idx & 0x3FFF) | (layer << 14);
2257 			rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
2258 						 0x00, NIX_AQ_CTYPE_BANDPROF,
2259 						 prof_idx);
2260 			if (rc) {
2261 				dev_err(rvu->dev,
2262 					"%s: Failed to fetch context of %s profile %d, err %d\n",
2263 					__func__, str, idx, rc);
2264 				return 0;
2265 			}
2266 			seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
2267 			pcifunc = ipolicer->pfvf_map[idx];
2268 			if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2269 				seq_printf(m, "Allocated to :: PF %d\n",
2270 					   rvu_get_pf(pcifunc));
2271 			else
2272 				seq_printf(m, "Allocated to :: PF %d VF %d\n",
2273 					   rvu_get_pf(pcifunc),
2274 					   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2275 			print_band_prof_ctx(m, &aq_rsp.prof);
2276 		}
2277 	}
2278 	return 0;
2279 }
2280 
2281 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
2282 
2283 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
2284 {
2285 	struct nix_hw *nix_hw = m->private;
2286 	struct nix_ipolicer *ipolicer;
2287 	int layer;
2288 	char *str;
2289 
2290 	/* Ingress policers do not exist on all platforms */
2291 	if (!nix_hw->ipolicer)
2292 		return 0;
2293 
2294 	seq_puts(m, "\nBandwidth profile resource free count\n");
2295 	seq_puts(m, "=====================================\n");
2296 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2297 		if (layer == BAND_PROF_INVAL_LAYER)
2298 			continue;
2299 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2300 			(layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
2301 
2302 		ipolicer = &nix_hw->ipolicer[layer];
2303 		seq_printf(m, "%s :: Max: %4d  Free: %4d\n", str,
2304 			   ipolicer->band_prof.max,
2305 			   rvu_rsrc_free_count(&ipolicer->band_prof));
2306 	}
2307 	seq_puts(m, "=====================================\n");
2308 
2309 	return 0;
2310 }
2311 
2312 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
2313 
2314 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
2315 {
2316 	struct nix_hw *nix_hw;
2317 
2318 	if (!is_block_implemented(rvu->hw, blkaddr))
2319 		return;
2320 
2321 	if (blkaddr == BLKADDR_NIX0) {
2322 		rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
2323 		nix_hw = &rvu->hw->nix[0];
2324 	} else {
2325 		rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
2326 						      rvu->rvu_dbg.root);
2327 		nix_hw = &rvu->hw->nix[1];
2328 	}
2329 
2330 	debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2331 			    &rvu_dbg_nix_sq_ctx_fops);
2332 	debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2333 			    &rvu_dbg_nix_rq_ctx_fops);
2334 	debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2335 			    &rvu_dbg_nix_cq_ctx_fops);
2336 	debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2337 			    &rvu_dbg_nix_ndc_tx_cache_fops);
2338 	debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2339 			    &rvu_dbg_nix_ndc_rx_cache_fops);
2340 	debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2341 			    &rvu_dbg_nix_ndc_tx_hits_miss_fops);
2342 	debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2343 			    &rvu_dbg_nix_ndc_rx_hits_miss_fops);
2344 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
2345 			    &rvu_dbg_nix_qsize_fops);
2346 	debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2347 			    &rvu_dbg_nix_band_prof_ctx_fops);
2348 	debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
2349 			    &rvu_dbg_nix_band_prof_rsrc_fops);
2350 }
2351 
2352 static void rvu_dbg_npa_init(struct rvu *rvu)
2353 {
2354 	rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2355 
2356 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2357 			    &rvu_dbg_npa_qsize_fops);
2358 	debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2359 			    &rvu_dbg_npa_aura_ctx_fops);
2360 	debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2361 			    &rvu_dbg_npa_pool_ctx_fops);
2362 	debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2363 			    &rvu_dbg_npa_ndc_cache_fops);
2364 	debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2365 			    &rvu_dbg_npa_ndc_hits_miss_fops);
2366 }
2367 
2368 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)				\
2369 	({								\
2370 		u64 cnt;						\
2371 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2372 					     NIX_STATS_RX, &(cnt));	\
2373 		if (!err)						\
2374 			seq_printf(s, "%s: %llu\n", name, cnt);		\
2375 		cnt;							\
2376 	})
2377 
2378 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)			\
2379 	({								\
2380 		u64 cnt;						\
2381 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2382 					  NIX_STATS_TX, &(cnt));	\
2383 		if (!err)						\
2384 			seq_printf(s, "%s: %llu\n", name, cnt);		\
2385 		cnt;							\
2386 	})
2387 
2388 static int cgx_print_stats(struct seq_file *s, int lmac_id)
2389 {
2390 	struct cgx_link_user_info linfo;
2391 	struct mac_ops *mac_ops;
2392 	void *cgxd = s->private;
2393 	u64 ucast, mcast, bcast;
2394 	int stat = 0, err = 0;
2395 	u64 tx_stat, rx_stat;
2396 	struct rvu *rvu;
2397 
2398 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2399 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2400 	if (!rvu)
2401 		return -ENODEV;
2402 
2403 	mac_ops = get_mac_ops(cgxd);
2404 	/* There can be no CGX devices at all */
2405 	if (!mac_ops)
2406 		return 0;
2407 
2408 	/* Link status */
2409 	seq_puts(s, "\n=======Link Status======\n\n");
2410 	err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2411 	if (err)
2412 		seq_puts(s, "Failed to read link status\n");
2413 	seq_printf(s, "\nLink is %s %d Mbps\n\n",
2414 		   linfo.link_up ? "UP" : "DOWN", linfo.speed);
2415 
2416 	/* Rx stats */
2417 	seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2418 		   mac_ops->name);
2419 	ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2420 	if (err)
2421 		return err;
2422 	mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2423 	if (err)
2424 		return err;
2425 	bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2426 	if (err)
2427 		return err;
2428 	seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2429 	PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2430 	if (err)
2431 		return err;
2432 	PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2433 	if (err)
2434 		return err;
2435 	PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2436 	if (err)
2437 		return err;
2438 
2439 	/* Tx stats */
2440 	seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2441 		   mac_ops->name);
2442 	ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2443 	if (err)
2444 		return err;
2445 	mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2446 	if (err)
2447 		return err;
2448 	bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2449 	if (err)
2450 		return err;
2451 	seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2452 	PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2453 	if (err)
2454 		return err;
2455 	PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2456 	if (err)
2457 		return err;
2458 
2459 	/* Rx stats */
2460 	seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2461 	while (stat < mac_ops->rx_stats_cnt) {
2462 		err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2463 		if (err)
2464 			return err;
2465 		if (is_rvu_otx2(rvu))
2466 			seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2467 				   rx_stat);
2468 		else
2469 			seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2470 				   rx_stat);
2471 		stat++;
2472 	}
2473 
2474 	/* Tx stats */
2475 	stat = 0;
2476 	seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2477 	while (stat < mac_ops->tx_stats_cnt) {
2478 		err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2479 		if (err)
2480 			return err;
2481 
2482 		if (is_rvu_otx2(rvu))
2483 			seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2484 				   tx_stat);
2485 		else
2486 			seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2487 				   tx_stat);
2488 		stat++;
2489 	}
2490 
2491 	return err;
2492 }
2493 
2494 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
2495 {
2496 	struct dentry *current_dir;
2497 	char *buf;
2498 
2499 	current_dir = filp->file->f_path.dentry->d_parent;
2500 	buf = strrchr(current_dir->d_name.name, 'c');
2501 	if (!buf)
2502 		return -EINVAL;
2503 
2504 	return kstrtoint(buf + 1, 10, lmac_id);
2505 }
2506 
2507 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
2508 {
2509 	int lmac_id, err;
2510 
2511 	err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2512 	if (!err)
2513 		return cgx_print_stats(filp, lmac_id);
2514 
2515 	return err;
2516 }
2517 
2518 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2519 
2520 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2521 {
2522 	struct pci_dev *pdev = NULL;
2523 	void *cgxd = s->private;
2524 	char *bcast, *mcast;
2525 	u16 index, domain;
2526 	u8 dmac[ETH_ALEN];
2527 	struct rvu *rvu;
2528 	u64 cfg, mac;
2529 	int pf;
2530 
2531 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2532 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2533 	if (!rvu)
2534 		return -ENODEV;
2535 
2536 	pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2537 	domain = 2;
2538 
2539 	pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2540 	if (!pdev)
2541 		return 0;
2542 
2543 	cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2544 	bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2545 	mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2546 
2547 	seq_puts(s,
2548 		 "PCI dev       RVUPF   BROADCAST  MULTICAST  FILTER-MODE\n");
2549 	seq_printf(s, "%s  PF%d  %9s  %9s",
2550 		   dev_name(&pdev->dev), pf, bcast, mcast);
2551 	if (cfg & CGX_DMAC_CAM_ACCEPT)
2552 		seq_printf(s, "%12s\n\n", "UNICAST");
2553 	else
2554 		seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2555 
2556 	seq_puts(s, "\nDMAC-INDEX  ADDRESS\n");
2557 
2558 	for (index = 0 ; index < 32 ; index++) {
2559 		cfg = cgx_read_dmac_entry(cgxd, index);
2560 		/* Display enabled dmac entries associated with current lmac */
2561 		if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2562 		    FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2563 			mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2564 			u64_to_ether_addr(mac, dmac);
2565 			seq_printf(s, "%7d     %pM\n", index, dmac);
2566 		}
2567 	}
2568 
2569 	return 0;
2570 }
2571 
2572 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2573 {
2574 	int err, lmac_id;
2575 
2576 	err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2577 	if (!err)
2578 		return cgx_print_dmac_flt(filp, lmac_id);
2579 
2580 	return err;
2581 }
2582 
2583 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2584 
2585 static void rvu_dbg_cgx_init(struct rvu *rvu)
2586 {
2587 	struct mac_ops *mac_ops;
2588 	unsigned long lmac_bmap;
2589 	int i, lmac_id;
2590 	char dname[20];
2591 	void *cgx;
2592 
2593 	if (!cgx_get_cgxcnt_max())
2594 		return;
2595 
2596 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2597 	if (!mac_ops)
2598 		return;
2599 
2600 	rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2601 						   rvu->rvu_dbg.root);
2602 
2603 	for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2604 		cgx = rvu_cgx_pdata(i, rvu);
2605 		if (!cgx)
2606 			continue;
2607 		lmac_bmap = cgx_get_lmac_bmap(cgx);
2608 		/* cgx debugfs dir */
2609 		sprintf(dname, "%s%d", mac_ops->name, i);
2610 		rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2611 						      rvu->rvu_dbg.cgx_root);
2612 
2613 		for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
2614 			/* lmac debugfs dir */
2615 			sprintf(dname, "lmac%d", lmac_id);
2616 			rvu->rvu_dbg.lmac =
2617 				debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2618 
2619 			debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2620 					    cgx, &rvu_dbg_cgx_stat_fops);
2621 			debugfs_create_file("mac_filter", 0600,
2622 					    rvu->rvu_dbg.lmac, cgx,
2623 					    &rvu_dbg_cgx_dmac_flt_fops);
2624 		}
2625 	}
2626 }
2627 
2628 /* NPC debugfs APIs */
2629 static void rvu_print_npc_mcam_info(struct seq_file *s,
2630 				    u16 pcifunc, int blkaddr)
2631 {
2632 	struct rvu *rvu = s->private;
2633 	int entry_acnt, entry_ecnt;
2634 	int cntr_acnt, cntr_ecnt;
2635 
2636 	rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2637 					  &entry_acnt, &entry_ecnt);
2638 	rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2639 					    &cntr_acnt, &cntr_ecnt);
2640 	if (!entry_acnt && !cntr_acnt)
2641 		return;
2642 
2643 	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2644 		seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2645 			   rvu_get_pf(pcifunc));
2646 	else
2647 		seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2648 			   rvu_get_pf(pcifunc),
2649 			   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2650 
2651 	if (entry_acnt) {
2652 		seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2653 		seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2654 	}
2655 	if (cntr_acnt) {
2656 		seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2657 		seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2658 	}
2659 }
2660 
2661 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2662 {
2663 	struct rvu *rvu = filp->private;
2664 	int pf, vf, numvfs, blkaddr;
2665 	struct npc_mcam *mcam;
2666 	u16 pcifunc, counters;
2667 	u64 cfg;
2668 
2669 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2670 	if (blkaddr < 0)
2671 		return -ENODEV;
2672 
2673 	mcam = &rvu->hw->mcam;
2674 	counters = rvu->hw->npc_counters;
2675 
2676 	seq_puts(filp, "\nNPC MCAM info:\n");
2677 	/* MCAM keywidth on receive and transmit sides */
2678 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2679 	cfg = (cfg >> 32) & 0x07;
2680 	seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2681 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2682 		   "224bits" : "448bits"));
2683 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2684 	cfg = (cfg >> 32) & 0x07;
2685 	seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2686 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2687 		   "224bits" : "448bits"));
2688 
2689 	mutex_lock(&mcam->lock);
2690 	/* MCAM entries */
2691 	seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2692 	seq_printf(filp, "\t\t Reserved \t: %d\n",
2693 		   mcam->total_entries - mcam->bmap_entries);
2694 	seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2695 
2696 	/* MCAM counters */
2697 	seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2698 	seq_printf(filp, "\t\t Reserved \t: %d\n",
2699 		   counters - mcam->counters.max);
2700 	seq_printf(filp, "\t\t Available \t: %d\n",
2701 		   rvu_rsrc_free_count(&mcam->counters));
2702 
2703 	if (mcam->bmap_entries == mcam->bmap_fcnt) {
2704 		mutex_unlock(&mcam->lock);
2705 		return 0;
2706 	}
2707 
2708 	seq_puts(filp, "\n\t\t Current allocation\n");
2709 	seq_puts(filp, "\t\t====================\n");
2710 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2711 		pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2712 		rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2713 
2714 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2715 		numvfs = (cfg >> 12) & 0xFF;
2716 		for (vf = 0; vf < numvfs; vf++) {
2717 			pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2718 			rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2719 		}
2720 	}
2721 
2722 	mutex_unlock(&mcam->lock);
2723 	return 0;
2724 }
2725 
2726 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2727 
2728 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2729 					     void *unused)
2730 {
2731 	struct rvu *rvu = filp->private;
2732 	struct npc_mcam *mcam;
2733 	int blkaddr;
2734 
2735 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2736 	if (blkaddr < 0)
2737 		return -ENODEV;
2738 
2739 	mcam = &rvu->hw->mcam;
2740 
2741 	seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2742 	seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2743 		   rvu_read64(rvu, blkaddr,
2744 			      NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2745 
2746 	return 0;
2747 }
2748 
2749 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2750 
2751 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2752 					struct rvu_npc_mcam_rule *rule)
2753 {
2754 	u8 bit;
2755 
2756 	for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2757 		seq_printf(s, "\t%s  ", npc_get_field_name(bit));
2758 		switch (bit) {
2759 		case NPC_DMAC:
2760 			seq_printf(s, "%pM ", rule->packet.dmac);
2761 			seq_printf(s, "mask %pM\n", rule->mask.dmac);
2762 			break;
2763 		case NPC_SMAC:
2764 			seq_printf(s, "%pM ", rule->packet.smac);
2765 			seq_printf(s, "mask %pM\n", rule->mask.smac);
2766 			break;
2767 		case NPC_ETYPE:
2768 			seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2769 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2770 			break;
2771 		case NPC_OUTER_VID:
2772 			seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2773 			seq_printf(s, "mask 0x%x\n",
2774 				   ntohs(rule->mask.vlan_tci));
2775 			break;
2776 		case NPC_TOS:
2777 			seq_printf(s, "%d ", rule->packet.tos);
2778 			seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2779 			break;
2780 		case NPC_SIP_IPV4:
2781 			seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2782 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2783 			break;
2784 		case NPC_DIP_IPV4:
2785 			seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2786 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2787 			break;
2788 		case NPC_SIP_IPV6:
2789 			seq_printf(s, "%pI6 ", rule->packet.ip6src);
2790 			seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2791 			break;
2792 		case NPC_DIP_IPV6:
2793 			seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2794 			seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2795 			break;
2796 		case NPC_SPORT_TCP:
2797 		case NPC_SPORT_UDP:
2798 		case NPC_SPORT_SCTP:
2799 			seq_printf(s, "%d ", ntohs(rule->packet.sport));
2800 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2801 			break;
2802 		case NPC_DPORT_TCP:
2803 		case NPC_DPORT_UDP:
2804 		case NPC_DPORT_SCTP:
2805 			seq_printf(s, "%d ", ntohs(rule->packet.dport));
2806 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2807 			break;
2808 		default:
2809 			seq_puts(s, "\n");
2810 			break;
2811 		}
2812 	}
2813 }
2814 
2815 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2816 					 struct rvu_npc_mcam_rule *rule)
2817 {
2818 	if (is_npc_intf_tx(rule->intf)) {
2819 		switch (rule->tx_action.op) {
2820 		case NIX_TX_ACTIONOP_DROP:
2821 			seq_puts(s, "\taction: Drop\n");
2822 			break;
2823 		case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2824 			seq_puts(s, "\taction: Unicast to default channel\n");
2825 			break;
2826 		case NIX_TX_ACTIONOP_UCAST_CHAN:
2827 			seq_printf(s, "\taction: Unicast to channel %d\n",
2828 				   rule->tx_action.index);
2829 			break;
2830 		case NIX_TX_ACTIONOP_MCAST:
2831 			seq_puts(s, "\taction: Multicast\n");
2832 			break;
2833 		case NIX_TX_ACTIONOP_DROP_VIOL:
2834 			seq_puts(s, "\taction: Lockdown Violation Drop\n");
2835 			break;
2836 		default:
2837 			break;
2838 		}
2839 	} else {
2840 		switch (rule->rx_action.op) {
2841 		case NIX_RX_ACTIONOP_DROP:
2842 			seq_puts(s, "\taction: Drop\n");
2843 			break;
2844 		case NIX_RX_ACTIONOP_UCAST:
2845 			seq_printf(s, "\taction: Direct to queue %d\n",
2846 				   rule->rx_action.index);
2847 			break;
2848 		case NIX_RX_ACTIONOP_RSS:
2849 			seq_puts(s, "\taction: RSS\n");
2850 			break;
2851 		case NIX_RX_ACTIONOP_UCAST_IPSEC:
2852 			seq_puts(s, "\taction: Unicast ipsec\n");
2853 			break;
2854 		case NIX_RX_ACTIONOP_MCAST:
2855 			seq_puts(s, "\taction: Multicast\n");
2856 			break;
2857 		default:
2858 			break;
2859 		}
2860 	}
2861 }
2862 
2863 static const char *rvu_dbg_get_intf_name(int intf)
2864 {
2865 	switch (intf) {
2866 	case NIX_INTFX_RX(0):
2867 		return "NIX0_RX";
2868 	case NIX_INTFX_RX(1):
2869 		return "NIX1_RX";
2870 	case NIX_INTFX_TX(0):
2871 		return "NIX0_TX";
2872 	case NIX_INTFX_TX(1):
2873 		return "NIX1_TX";
2874 	default:
2875 		break;
2876 	}
2877 
2878 	return "unknown";
2879 }
2880 
2881 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2882 {
2883 	struct rvu_npc_mcam_rule *iter;
2884 	struct rvu *rvu = s->private;
2885 	struct npc_mcam *mcam;
2886 	int pf, vf = -1;
2887 	bool enabled;
2888 	int blkaddr;
2889 	u16 target;
2890 	u64 hits;
2891 
2892 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2893 	if (blkaddr < 0)
2894 		return 0;
2895 
2896 	mcam = &rvu->hw->mcam;
2897 
2898 	mutex_lock(&mcam->lock);
2899 	list_for_each_entry(iter, &mcam->mcam_rules, list) {
2900 		pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2901 		seq_printf(s, "\n\tInstalled by: PF%d ", pf);
2902 
2903 		if (iter->owner & RVU_PFVF_FUNC_MASK) {
2904 			vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
2905 			seq_printf(s, "VF%d", vf);
2906 		}
2907 		seq_puts(s, "\n");
2908 
2909 		seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
2910 						    "RX" : "TX");
2911 		seq_printf(s, "\tinterface: %s\n",
2912 			   rvu_dbg_get_intf_name(iter->intf));
2913 		seq_printf(s, "\tmcam entry: %d\n", iter->entry);
2914 
2915 		rvu_dbg_npc_mcam_show_flows(s, iter);
2916 		if (is_npc_intf_rx(iter->intf)) {
2917 			target = iter->rx_action.pf_func;
2918 			pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2919 			seq_printf(s, "\tForward to: PF%d ", pf);
2920 
2921 			if (target & RVU_PFVF_FUNC_MASK) {
2922 				vf = (target & RVU_PFVF_FUNC_MASK) - 1;
2923 				seq_printf(s, "VF%d", vf);
2924 			}
2925 			seq_puts(s, "\n");
2926 			seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
2927 			seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
2928 		}
2929 
2930 		rvu_dbg_npc_mcam_show_action(s, iter);
2931 
2932 		enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
2933 		seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
2934 
2935 		if (!iter->has_cntr)
2936 			continue;
2937 		seq_printf(s, "\tcounter: %d\n", iter->cntr);
2938 
2939 		hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
2940 		seq_printf(s, "\thits: %lld\n", hits);
2941 	}
2942 	mutex_unlock(&mcam->lock);
2943 
2944 	return 0;
2945 }
2946 
2947 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
2948 
2949 static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
2950 {
2951 	struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
2952 	struct npc_exact_table_entry *cam_entry;
2953 	struct npc_exact_table *table;
2954 	struct rvu *rvu = s->private;
2955 	int i, j;
2956 
2957 	u8 bitmap = 0;
2958 
2959 	table = rvu->hw->table;
2960 
2961 	mutex_lock(&table->lock);
2962 
2963 	/* Check if there is at least one entry in mem table */
2964 	if (!table->mem_tbl_entry_cnt)
2965 		goto dump_cam_table;
2966 
2967 	/* Print table headers */
2968 	seq_puts(s, "\n\tExact Match MEM Table\n");
2969 	seq_puts(s, "Index\t");
2970 
2971 	for (i = 0; i < table->mem_table.ways; i++) {
2972 		mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
2973 							struct npc_exact_table_entry, list);
2974 
2975 		seq_printf(s, "Way-%d\t\t\t\t\t", i);
2976 	}
2977 
2978 	seq_puts(s, "\n");
2979 	for (i = 0; i < table->mem_table.ways; i++)
2980 		seq_puts(s, "\tChan  MAC                     \t");
2981 
2982 	seq_puts(s, "\n\n");
2983 
2984 	/* Print mem table entries */
2985 	for (i = 0; i < table->mem_table.depth; i++) {
2986 		bitmap = 0;
2987 		for (j = 0; j < table->mem_table.ways; j++) {
2988 			if (!mem_entry[j])
2989 				continue;
2990 
2991 			if (mem_entry[j]->index != i)
2992 				continue;
2993 
2994 			bitmap |= BIT(j);
2995 		}
2996 
2997 		/* No valid entries */
2998 		if (!bitmap)
2999 			continue;
3000 
3001 		seq_printf(s, "%d\t", i);
3002 		for (j = 0; j < table->mem_table.ways; j++) {
3003 			if (!(bitmap & BIT(j))) {
3004 				seq_puts(s, "nil\t\t\t\t\t");
3005 				continue;
3006 			}
3007 
3008 			seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
3009 				   mem_entry[j]->mac);
3010 			mem_entry[j] = list_next_entry(mem_entry[j], list);
3011 		}
3012 		seq_puts(s, "\n");
3013 	}
3014 
3015 dump_cam_table:
3016 
3017 	if (!table->cam_tbl_entry_cnt)
3018 		goto done;
3019 
3020 	seq_puts(s, "\n\tExact Match CAM Table\n");
3021 	seq_puts(s, "index\tchan\tMAC\n");
3022 
3023 	/* Traverse cam table entries */
3024 	list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
3025 		seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
3026 			   cam_entry->mac);
3027 	}
3028 
3029 done:
3030 	mutex_unlock(&table->lock);
3031 	return 0;
3032 }
3033 
3034 RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
3035 
3036 static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
3037 {
3038 	struct npc_exact_table *table;
3039 	struct rvu *rvu = s->private;
3040 	int i;
3041 
3042 	table = rvu->hw->table;
3043 
3044 	seq_puts(s, "\n\tExact Table Info\n");
3045 	seq_printf(s, "Exact Match Feature : %s\n",
3046 		   rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
3047 	if (!rvu->hw->cap.npc_exact_match_enabled)
3048 		return 0;
3049 
3050 	seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
3051 	for (i = 0; i < table->num_drop_rules; i++)
3052 		seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
3053 
3054 	seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
3055 	for (i = 0; i < table->num_drop_rules; i++)
3056 		seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
3057 
3058 	seq_puts(s, "\n\tMEM Table Info\n");
3059 	seq_printf(s, "Ways : %d\n", table->mem_table.ways);
3060 	seq_printf(s, "Depth : %d\n", table->mem_table.depth);
3061 	seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
3062 	seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
3063 	seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
3064 
3065 	seq_puts(s, "\n\tCAM Table Info\n");
3066 	seq_printf(s, "Depth : %d\n", table->cam_table.depth);
3067 
3068 	return 0;
3069 }
3070 
3071 RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
3072 
3073 static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
3074 {
3075 	struct npc_exact_table *table;
3076 	struct rvu *rvu = s->private;
3077 	struct npc_key_field *field;
3078 	u16 chan, pcifunc;
3079 	int blkaddr, i;
3080 	u64 cfg, cam1;
3081 	char *str;
3082 
3083 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3084 	table = rvu->hw->table;
3085 
3086 	field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
3087 
3088 	seq_puts(s, "\n\t Exact Hit on drop status\n");
3089 	seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
3090 
3091 	for (i = 0; i < table->num_drop_rules; i++) {
3092 		pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
3093 		cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
3094 
3095 		/* channel will be always in keyword 0 */
3096 		cam1 = rvu_read64(rvu, blkaddr,
3097 				  NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
3098 		chan = field->kw_mask[0] & cam1;
3099 
3100 		str = (cfg & 1) ? "enabled" : "disabled";
3101 
3102 		seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
3103 			   rvu_read64(rvu, blkaddr,
3104 				      NPC_AF_MATCH_STATX(table->counter_idx[i])),
3105 			   chan, str);
3106 	}
3107 
3108 	return 0;
3109 }
3110 
3111 RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
3112 
3113 static void rvu_dbg_npc_init(struct rvu *rvu)
3114 {
3115 	rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
3116 
3117 	debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
3118 			    &rvu_dbg_npc_mcam_info_fops);
3119 	debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
3120 			    &rvu_dbg_npc_mcam_rules_fops);
3121 
3122 	debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
3123 			    &rvu_dbg_npc_rx_miss_act_fops);
3124 
3125 	if (!rvu->hw->cap.npc_exact_match_enabled)
3126 		return;
3127 
3128 	debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
3129 			    &rvu_dbg_npc_exact_entries_fops);
3130 
3131 	debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
3132 			    &rvu_dbg_npc_exact_info_fops);
3133 
3134 	debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
3135 			    &rvu_dbg_npc_exact_drop_cnt_fops);
3136 
3137 }
3138 
3139 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
3140 {
3141 	struct cpt_ctx *ctx = filp->private;
3142 	u64 busy_sts = 0, free_sts = 0;
3143 	u32 e_min = 0, e_max = 0, e, i;
3144 	u16 max_ses, max_ies, max_aes;
3145 	struct rvu *rvu = ctx->rvu;
3146 	int blkaddr = ctx->blkaddr;
3147 	u64 reg;
3148 
3149 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3150 	max_ses = reg & 0xffff;
3151 	max_ies = (reg >> 16) & 0xffff;
3152 	max_aes = (reg >> 32) & 0xffff;
3153 
3154 	switch (eng_type) {
3155 	case CPT_AE_TYPE:
3156 		e_min = max_ses + max_ies;
3157 		e_max = max_ses + max_ies + max_aes;
3158 		break;
3159 	case CPT_SE_TYPE:
3160 		e_min = 0;
3161 		e_max = max_ses;
3162 		break;
3163 	case CPT_IE_TYPE:
3164 		e_min = max_ses;
3165 		e_max = max_ses + max_ies;
3166 		break;
3167 	default:
3168 		return -EINVAL;
3169 	}
3170 
3171 	for (e = e_min, i = 0; e < e_max; e++, i++) {
3172 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
3173 		if (reg & 0x1)
3174 			busy_sts |= 1ULL << i;
3175 
3176 		if (reg & 0x2)
3177 			free_sts |= 1ULL << i;
3178 	}
3179 	seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
3180 	seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
3181 
3182 	return 0;
3183 }
3184 
3185 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
3186 {
3187 	return cpt_eng_sts_display(filp, CPT_AE_TYPE);
3188 }
3189 
3190 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
3191 
3192 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
3193 {
3194 	return cpt_eng_sts_display(filp, CPT_SE_TYPE);
3195 }
3196 
3197 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
3198 
3199 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
3200 {
3201 	return cpt_eng_sts_display(filp, CPT_IE_TYPE);
3202 }
3203 
3204 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
3205 
3206 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
3207 {
3208 	struct cpt_ctx *ctx = filp->private;
3209 	u16 max_ses, max_ies, max_aes;
3210 	struct rvu *rvu = ctx->rvu;
3211 	int blkaddr = ctx->blkaddr;
3212 	u32 e_max, e;
3213 	u64 reg;
3214 
3215 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3216 	max_ses = reg & 0xffff;
3217 	max_ies = (reg >> 16) & 0xffff;
3218 	max_aes = (reg >> 32) & 0xffff;
3219 
3220 	e_max = max_ses + max_ies + max_aes;
3221 
3222 	seq_puts(filp, "===========================================\n");
3223 	for (e = 0; e < e_max; e++) {
3224 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
3225 		seq_printf(filp, "CPT Engine[%u] Group Enable   0x%02llx\n", e,
3226 			   reg & 0xff);
3227 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
3228 		seq_printf(filp, "CPT Engine[%u] Active Info    0x%llx\n", e,
3229 			   reg);
3230 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
3231 		seq_printf(filp, "CPT Engine[%u] Control        0x%llx\n", e,
3232 			   reg);
3233 		seq_puts(filp, "===========================================\n");
3234 	}
3235 	return 0;
3236 }
3237 
3238 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
3239 
3240 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
3241 {
3242 	struct cpt_ctx *ctx = filp->private;
3243 	int blkaddr = ctx->blkaddr;
3244 	struct rvu *rvu = ctx->rvu;
3245 	struct rvu_block *block;
3246 	struct rvu_hwinfo *hw;
3247 	u64 reg;
3248 	u32 lf;
3249 
3250 	hw = rvu->hw;
3251 	block = &hw->block[blkaddr];
3252 	if (!block->lf.bmap)
3253 		return -ENODEV;
3254 
3255 	seq_puts(filp, "===========================================\n");
3256 	for (lf = 0; lf < block->lf.max; lf++) {
3257 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
3258 		seq_printf(filp, "CPT Lf[%u] CTL          0x%llx\n", lf, reg);
3259 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
3260 		seq_printf(filp, "CPT Lf[%u] CTL2         0x%llx\n", lf, reg);
3261 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
3262 		seq_printf(filp, "CPT Lf[%u] PTR_CTL      0x%llx\n", lf, reg);
3263 		reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
3264 				(lf << block->lfshift));
3265 		seq_printf(filp, "CPT Lf[%u] CFG          0x%llx\n", lf, reg);
3266 		seq_puts(filp, "===========================================\n");
3267 	}
3268 	return 0;
3269 }
3270 
3271 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
3272 
3273 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
3274 {
3275 	struct cpt_ctx *ctx = filp->private;
3276 	struct rvu *rvu = ctx->rvu;
3277 	int blkaddr = ctx->blkaddr;
3278 	u64 reg0, reg1;
3279 
3280 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
3281 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
3282 	seq_printf(filp, "CPT_AF_FLTX_INT:       0x%llx 0x%llx\n", reg0, reg1);
3283 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
3284 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
3285 	seq_printf(filp, "CPT_AF_PSNX_EXE:       0x%llx 0x%llx\n", reg0, reg1);
3286 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
3287 	seq_printf(filp, "CPT_AF_PSNX_LF:        0x%llx\n", reg0);
3288 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
3289 	seq_printf(filp, "CPT_AF_RVU_INT:        0x%llx\n", reg0);
3290 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
3291 	seq_printf(filp, "CPT_AF_RAS_INT:        0x%llx\n", reg0);
3292 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
3293 	seq_printf(filp, "CPT_AF_EXE_ERR_INFO:   0x%llx\n", reg0);
3294 
3295 	return 0;
3296 }
3297 
3298 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
3299 
3300 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
3301 {
3302 	struct cpt_ctx *ctx = filp->private;
3303 	struct rvu *rvu = ctx->rvu;
3304 	int blkaddr = ctx->blkaddr;
3305 	u64 reg;
3306 
3307 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
3308 	seq_printf(filp, "CPT instruction requests   %llu\n", reg);
3309 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
3310 	seq_printf(filp, "CPT instruction latency    %llu\n", reg);
3311 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
3312 	seq_printf(filp, "CPT NCB read requests      %llu\n", reg);
3313 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
3314 	seq_printf(filp, "CPT NCB read latency       %llu\n", reg);
3315 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
3316 	seq_printf(filp, "CPT read requests caused by UC fills   %llu\n", reg);
3317 	reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
3318 	seq_printf(filp, "CPT active cycles pc       %llu\n", reg);
3319 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
3320 	seq_printf(filp, "CPT clock count pc         %llu\n", reg);
3321 
3322 	return 0;
3323 }
3324 
3325 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
3326 
3327 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
3328 {
3329 	struct cpt_ctx *ctx;
3330 
3331 	if (!is_block_implemented(rvu->hw, blkaddr))
3332 		return;
3333 
3334 	if (blkaddr == BLKADDR_CPT0) {
3335 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
3336 		ctx = &rvu->rvu_dbg.cpt_ctx[0];
3337 		ctx->blkaddr = BLKADDR_CPT0;
3338 		ctx->rvu = rvu;
3339 	} else {
3340 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
3341 						      rvu->rvu_dbg.root);
3342 		ctx = &rvu->rvu_dbg.cpt_ctx[1];
3343 		ctx->blkaddr = BLKADDR_CPT1;
3344 		ctx->rvu = rvu;
3345 	}
3346 
3347 	debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
3348 			    &rvu_dbg_cpt_pc_fops);
3349 	debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3350 			    &rvu_dbg_cpt_ae_sts_fops);
3351 	debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3352 			    &rvu_dbg_cpt_se_sts_fops);
3353 	debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3354 			    &rvu_dbg_cpt_ie_sts_fops);
3355 	debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
3356 			    &rvu_dbg_cpt_engines_info_fops);
3357 	debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
3358 			    &rvu_dbg_cpt_lfs_info_fops);
3359 	debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
3360 			    &rvu_dbg_cpt_err_info_fops);
3361 }
3362 
3363 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
3364 {
3365 	if (!is_rvu_otx2(rvu))
3366 		return "cn10k";
3367 	else
3368 		return "octeontx2";
3369 }
3370 
3371 void rvu_dbg_init(struct rvu *rvu)
3372 {
3373 	rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
3374 
3375 	debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
3376 			    &rvu_dbg_rsrc_status_fops);
3377 
3378 	if (!is_rvu_otx2(rvu))
3379 		debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
3380 				    rvu, &rvu_dbg_lmtst_map_table_fops);
3381 
3382 	if (!cgx_get_cgxcnt_max())
3383 		goto create;
3384 
3385 	if (is_rvu_otx2(rvu))
3386 		debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
3387 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3388 	else
3389 		debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
3390 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3391 
3392 create:
3393 	rvu_dbg_npa_init(rvu);
3394 	rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
3395 
3396 	rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
3397 	rvu_dbg_cgx_init(rvu);
3398 	rvu_dbg_npc_init(rvu);
3399 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
3400 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
3401 	rvu_dbg_mcs_init(rvu);
3402 }
3403 
3404 void rvu_dbg_exit(struct rvu *rvu)
3405 {
3406 	debugfs_remove_recursive(rvu->rvu_dbg.root);
3407 }
3408 
3409 #endif /* CONFIG_DEBUG_FS */
3410