1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2019 Marvell.
5  *
6  */
7 
8 #ifdef CONFIG_DEBUG_FS
9 
10 #include <linux/fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 #include "npc.h"
21 
22 #define DEBUGFS_DIR_NAME "octeontx2"
23 
24 enum {
25 	CGX_STAT0,
26 	CGX_STAT1,
27 	CGX_STAT2,
28 	CGX_STAT3,
29 	CGX_STAT4,
30 	CGX_STAT5,
31 	CGX_STAT6,
32 	CGX_STAT7,
33 	CGX_STAT8,
34 	CGX_STAT9,
35 	CGX_STAT10,
36 	CGX_STAT11,
37 	CGX_STAT12,
38 	CGX_STAT13,
39 	CGX_STAT14,
40 	CGX_STAT15,
41 	CGX_STAT16,
42 	CGX_STAT17,
43 	CGX_STAT18,
44 };
45 
46 /* NIX TX stats */
47 enum nix_stat_lf_tx {
48 	TX_UCAST	= 0x0,
49 	TX_BCAST	= 0x1,
50 	TX_MCAST	= 0x2,
51 	TX_DROP		= 0x3,
52 	TX_OCTS		= 0x4,
53 	TX_STATS_ENUM_LAST,
54 };
55 
56 /* NIX RX stats */
57 enum nix_stat_lf_rx {
58 	RX_OCTS		= 0x0,
59 	RX_UCAST	= 0x1,
60 	RX_BCAST	= 0x2,
61 	RX_MCAST	= 0x3,
62 	RX_DROP		= 0x4,
63 	RX_DROP_OCTS	= 0x5,
64 	RX_FCS		= 0x6,
65 	RX_ERR		= 0x7,
66 	RX_DRP_BCAST	= 0x8,
67 	RX_DRP_MCAST	= 0x9,
68 	RX_DRP_L3BCAST	= 0xa,
69 	RX_DRP_L3MCAST	= 0xb,
70 	RX_STATS_ENUM_LAST,
71 };
72 
73 static char *cgx_rx_stats_fields[] = {
74 	[CGX_STAT0]	= "Received packets",
75 	[CGX_STAT1]	= "Octets of received packets",
76 	[CGX_STAT2]	= "Received PAUSE packets",
77 	[CGX_STAT3]	= "Received PAUSE and control packets",
78 	[CGX_STAT4]	= "Filtered DMAC0 (NIX-bound) packets",
79 	[CGX_STAT5]	= "Filtered DMAC0 (NIX-bound) octets",
80 	[CGX_STAT6]	= "Packets dropped due to RX FIFO full",
81 	[CGX_STAT7]	= "Octets dropped due to RX FIFO full",
82 	[CGX_STAT8]	= "Error packets",
83 	[CGX_STAT9]	= "Filtered DMAC1 (NCSI-bound) packets",
84 	[CGX_STAT10]	= "Filtered DMAC1 (NCSI-bound) octets",
85 	[CGX_STAT11]	= "NCSI-bound packets dropped",
86 	[CGX_STAT12]	= "NCSI-bound octets dropped",
87 };
88 
89 static char *cgx_tx_stats_fields[] = {
90 	[CGX_STAT0]	= "Packets dropped due to excessive collisions",
91 	[CGX_STAT1]	= "Packets dropped due to excessive deferral",
92 	[CGX_STAT2]	= "Multiple collisions before successful transmission",
93 	[CGX_STAT3]	= "Single collisions before successful transmission",
94 	[CGX_STAT4]	= "Total octets sent on the interface",
95 	[CGX_STAT5]	= "Total frames sent on the interface",
96 	[CGX_STAT6]	= "Packets sent with an octet count < 64",
97 	[CGX_STAT7]	= "Packets sent with an octet count == 64",
98 	[CGX_STAT8]	= "Packets sent with an octet count of 65-127",
99 	[CGX_STAT9]	= "Packets sent with an octet count of 128-255",
100 	[CGX_STAT10]	= "Packets sent with an octet count of 256-511",
101 	[CGX_STAT11]	= "Packets sent with an octet count of 512-1023",
102 	[CGX_STAT12]	= "Packets sent with an octet count of 1024-1518",
103 	[CGX_STAT13]	= "Packets sent with an octet count of > 1518",
104 	[CGX_STAT14]	= "Packets sent to a broadcast DMAC",
105 	[CGX_STAT15]	= "Packets sent to the multicast DMAC",
106 	[CGX_STAT16]	= "Transmit underflow and were truncated",
107 	[CGX_STAT17]	= "Control/PAUSE packets sent",
108 };
109 
110 static char *rpm_rx_stats_fields[] = {
111 	"Octets of received packets",
112 	"Octets of received packets with out error",
113 	"Received packets with alignment errors",
114 	"Control/PAUSE packets received",
115 	"Packets received with Frame too long Errors",
116 	"Packets received with a1nrange length Errors",
117 	"Received packets",
118 	"Packets received with FrameCheckSequenceErrors",
119 	"Packets received with VLAN header",
120 	"Error packets",
121 	"Packets received with unicast DMAC",
122 	"Packets received with multicast DMAC",
123 	"Packets received with broadcast DMAC",
124 	"Dropped packets",
125 	"Total frames received on interface",
126 	"Packets received with an octet count < 64",
127 	"Packets received with an octet count == 64",
128 	"Packets received with an octet count of 65-127",
129 	"Packets received with an octet count of 128-255",
130 	"Packets received with an octet count of 256-511",
131 	"Packets received with an octet count of 512-1023",
132 	"Packets received with an octet count of 1024-1518",
133 	"Packets received with an octet count of > 1518",
134 	"Oversized Packets",
135 	"Jabber Packets",
136 	"Fragmented Packets",
137 	"CBFC(class based flow control) pause frames received for class 0",
138 	"CBFC pause frames received for class 1",
139 	"CBFC pause frames received for class 2",
140 	"CBFC pause frames received for class 3",
141 	"CBFC pause frames received for class 4",
142 	"CBFC pause frames received for class 5",
143 	"CBFC pause frames received for class 6",
144 	"CBFC pause frames received for class 7",
145 	"CBFC pause frames received for class 8",
146 	"CBFC pause frames received for class 9",
147 	"CBFC pause frames received for class 10",
148 	"CBFC pause frames received for class 11",
149 	"CBFC pause frames received for class 12",
150 	"CBFC pause frames received for class 13",
151 	"CBFC pause frames received for class 14",
152 	"CBFC pause frames received for class 15",
153 	"MAC control packets received",
154 };
155 
156 static char *rpm_tx_stats_fields[] = {
157 	"Total octets sent on the interface",
158 	"Total octets transmitted OK",
159 	"Control/Pause frames sent",
160 	"Total frames transmitted OK",
161 	"Total frames sent with VLAN header",
162 	"Error Packets",
163 	"Packets sent to unicast DMAC",
164 	"Packets sent to the multicast DMAC",
165 	"Packets sent to a broadcast DMAC",
166 	"Packets sent with an octet count == 64",
167 	"Packets sent with an octet count of 65-127",
168 	"Packets sent with an octet count of 128-255",
169 	"Packets sent with an octet count of 256-511",
170 	"Packets sent with an octet count of 512-1023",
171 	"Packets sent with an octet count of 1024-1518",
172 	"Packets sent with an octet count of > 1518",
173 	"CBFC(class based flow control) pause frames transmitted for class 0",
174 	"CBFC pause frames transmitted for class 1",
175 	"CBFC pause frames transmitted for class 2",
176 	"CBFC pause frames transmitted for class 3",
177 	"CBFC pause frames transmitted for class 4",
178 	"CBFC pause frames transmitted for class 5",
179 	"CBFC pause frames transmitted for class 6",
180 	"CBFC pause frames transmitted for class 7",
181 	"CBFC pause frames transmitted for class 8",
182 	"CBFC pause frames transmitted for class 9",
183 	"CBFC pause frames transmitted for class 10",
184 	"CBFC pause frames transmitted for class 11",
185 	"CBFC pause frames transmitted for class 12",
186 	"CBFC pause frames transmitted for class 13",
187 	"CBFC pause frames transmitted for class 14",
188 	"CBFC pause frames transmitted for class 15",
189 	"MAC control packets sent",
190 	"Total frames sent on the interface"
191 };
192 
193 enum cpt_eng_type {
194 	CPT_AE_TYPE = 1,
195 	CPT_SE_TYPE = 2,
196 	CPT_IE_TYPE = 3,
197 };
198 
199 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
200 						blk_addr, NDC_AF_CONST) & 0xFF)
201 
202 #define rvu_dbg_NULL NULL
203 #define rvu_dbg_open_NULL NULL
204 
205 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)	\
206 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
207 { \
208 	return single_open(file, rvu_dbg_##read_op, inode->i_private); \
209 } \
210 static const struct file_operations rvu_dbg_##name##_fops = { \
211 	.owner		= THIS_MODULE, \
212 	.open		= rvu_dbg_open_##name, \
213 	.read		= seq_read, \
214 	.write		= rvu_dbg_##write_op, \
215 	.llseek		= seq_lseek, \
216 	.release	= single_release, \
217 }
218 
219 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
220 static const struct file_operations rvu_dbg_##name##_fops = { \
221 	.owner = THIS_MODULE, \
222 	.open = simple_open, \
223 	.read = rvu_dbg_##read_op, \
224 	.write = rvu_dbg_##write_op \
225 }
226 
227 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
228 
229 #define LMT_MAPTBL_ENTRY_SIZE 16
230 /* Dump LMTST map table */
231 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
232 					       char __user *buffer,
233 					       size_t count, loff_t *ppos)
234 {
235 	struct rvu *rvu = filp->private_data;
236 	u64 lmt_addr, val, tbl_base;
237 	int pf, vf, num_vfs, hw_vfs;
238 	void __iomem *lmt_map_base;
239 	int index = 0, off = 0;
240 	int bytes_not_copied;
241 	int buf_size = 10240;
242 	char *buf;
243 
244 	/* don't allow partial reads */
245 	if (*ppos != 0)
246 		return 0;
247 
248 	buf = kzalloc(buf_size, GFP_KERNEL);
249 	if (!buf)
250 		return -ENOSPC;
251 
252 	tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
253 
254 	lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
255 	if (!lmt_map_base) {
256 		dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
257 		kfree(buf);
258 		return false;
259 	}
260 
261 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
262 			  "\n\t\t\t\t\tLmtst Map Table Entries");
263 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
264 			  "\n\t\t\t\t\t=======================");
265 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
266 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
267 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
268 			  "Lmtline Base (word 0)\t\t");
269 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
270 			  "Lmt Map Entry (word 1)");
271 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
272 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
273 		off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d  \t\t\t",
274 				    pf);
275 
276 		index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
277 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
278 				 (tbl_base + index));
279 		lmt_addr = readq(lmt_map_base + index);
280 		off += scnprintf(&buf[off], buf_size - 1 - off,
281 				 " 0x%016llx\t\t", lmt_addr);
282 		index += 8;
283 		val = readq(lmt_map_base + index);
284 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
285 				 val);
286 		/* Reading num of VFs per PF */
287 		rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
288 		for (vf = 0; vf < num_vfs; vf++) {
289 			index = (pf * rvu->hw->total_vfs * 16) +
290 				((vf + 1)  * LMT_MAPTBL_ENTRY_SIZE);
291 			off += scnprintf(&buf[off], buf_size - 1 - off,
292 					    "PF%d:VF%d  \t\t", pf, vf);
293 			off += scnprintf(&buf[off], buf_size - 1 - off,
294 					 " 0x%llx\t\t", (tbl_base + index));
295 			lmt_addr = readq(lmt_map_base + index);
296 			off += scnprintf(&buf[off], buf_size - 1 - off,
297 					 " 0x%016llx\t\t", lmt_addr);
298 			index += 8;
299 			val = readq(lmt_map_base + index);
300 			off += scnprintf(&buf[off], buf_size - 1 - off,
301 					 " 0x%016llx\n", val);
302 		}
303 	}
304 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\n");
305 
306 	bytes_not_copied = copy_to_user(buffer, buf, off);
307 	kfree(buf);
308 
309 	iounmap(lmt_map_base);
310 	if (bytes_not_copied)
311 		return -EFAULT;
312 
313 	*ppos = off;
314 	return off;
315 }
316 
317 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
318 
319 static void get_lf_str_list(struct rvu_block block, int pcifunc,
320 			    char *lfs)
321 {
322 	int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
323 
324 	for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
325 		if (lf >= block.lf.max)
326 			break;
327 
328 		if (block.fn_map[lf] != pcifunc)
329 			continue;
330 
331 		if (lf == prev_lf + 1) {
332 			prev_lf = lf;
333 			seq = 1;
334 			continue;
335 		}
336 
337 		if (seq)
338 			len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
339 		else
340 			len += (len ? sprintf(lfs + len, ",%d", lf) :
341 				      sprintf(lfs + len, "%d", lf));
342 
343 		prev_lf = lf;
344 		seq = 0;
345 	}
346 
347 	if (seq)
348 		len += sprintf(lfs + len, "-%d", prev_lf);
349 
350 	lfs[len] = '\0';
351 }
352 
353 static int get_max_column_width(struct rvu *rvu)
354 {
355 	int index, pf, vf, lf_str_size = 12, buf_size = 256;
356 	struct rvu_block block;
357 	u16 pcifunc;
358 	char *buf;
359 
360 	buf = kzalloc(buf_size, GFP_KERNEL);
361 	if (!buf)
362 		return -ENOMEM;
363 
364 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
365 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
366 			pcifunc = pf << 10 | vf;
367 			if (!pcifunc)
368 				continue;
369 
370 			for (index = 0; index < BLK_COUNT; index++) {
371 				block = rvu->hw->block[index];
372 				if (!strlen(block.name))
373 					continue;
374 
375 				get_lf_str_list(block, pcifunc, buf);
376 				if (lf_str_size <= strlen(buf))
377 					lf_str_size = strlen(buf) + 1;
378 			}
379 		}
380 	}
381 
382 	kfree(buf);
383 	return lf_str_size;
384 }
385 
386 /* Dumps current provisioning status of all RVU block LFs */
387 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
388 					  char __user *buffer,
389 					  size_t count, loff_t *ppos)
390 {
391 	int index, off = 0, flag = 0, len = 0, i = 0;
392 	struct rvu *rvu = filp->private_data;
393 	int bytes_not_copied = 0;
394 	struct rvu_block block;
395 	int pf, vf, pcifunc;
396 	int buf_size = 2048;
397 	int lf_str_size;
398 	char *lfs;
399 	char *buf;
400 
401 	/* don't allow partial reads */
402 	if (*ppos != 0)
403 		return 0;
404 
405 	buf = kzalloc(buf_size, GFP_KERNEL);
406 	if (!buf)
407 		return -ENOSPC;
408 
409 	/* Get the maximum width of a column */
410 	lf_str_size = get_max_column_width(rvu);
411 
412 	lfs = kzalloc(lf_str_size, GFP_KERNEL);
413 	if (!lfs) {
414 		kfree(buf);
415 		return -ENOMEM;
416 	}
417 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
418 			  "pcifunc");
419 	for (index = 0; index < BLK_COUNT; index++)
420 		if (strlen(rvu->hw->block[index].name)) {
421 			off += scnprintf(&buf[off], buf_size - 1 - off,
422 					 "%-*s", lf_str_size,
423 					 rvu->hw->block[index].name);
424 		}
425 
426 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
427 	bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
428 	if (bytes_not_copied)
429 		goto out;
430 
431 	i++;
432 	*ppos += off;
433 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
434 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
435 			off = 0;
436 			flag = 0;
437 			pcifunc = pf << 10 | vf;
438 			if (!pcifunc)
439 				continue;
440 
441 			if (vf) {
442 				sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
443 				off = scnprintf(&buf[off],
444 						buf_size - 1 - off,
445 						"%-*s", lf_str_size, lfs);
446 			} else {
447 				sprintf(lfs, "PF%d", pf);
448 				off = scnprintf(&buf[off],
449 						buf_size - 1 - off,
450 						"%-*s", lf_str_size, lfs);
451 			}
452 
453 			for (index = 0; index < BLK_COUNT; index++) {
454 				block = rvu->hw->block[index];
455 				if (!strlen(block.name))
456 					continue;
457 				len = 0;
458 				lfs[len] = '\0';
459 				get_lf_str_list(block, pcifunc, lfs);
460 				if (strlen(lfs))
461 					flag = 1;
462 
463 				off += scnprintf(&buf[off], buf_size - 1 - off,
464 						 "%-*s", lf_str_size, lfs);
465 			}
466 			if (flag) {
467 				off +=	scnprintf(&buf[off],
468 						  buf_size - 1 - off, "\n");
469 				bytes_not_copied = copy_to_user(buffer +
470 								(i * off),
471 								buf, off);
472 				if (bytes_not_copied)
473 					goto out;
474 
475 				i++;
476 				*ppos += off;
477 			}
478 		}
479 	}
480 
481 out:
482 	kfree(lfs);
483 	kfree(buf);
484 	if (bytes_not_copied)
485 		return -EFAULT;
486 
487 	return *ppos;
488 }
489 
490 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
491 
492 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
493 {
494 	struct rvu *rvu = filp->private;
495 	struct pci_dev *pdev = NULL;
496 	struct mac_ops *mac_ops;
497 	char cgx[10], lmac[10];
498 	struct rvu_pfvf *pfvf;
499 	int pf, domain, blkid;
500 	u8 cgx_id, lmac_id;
501 	u16 pcifunc;
502 
503 	domain = 2;
504 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
505 	/* There can be no CGX devices at all */
506 	if (!mac_ops)
507 		return 0;
508 	seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
509 		   mac_ops->name);
510 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
511 		if (!is_pf_cgxmapped(rvu, pf))
512 			continue;
513 
514 		pdev =  pci_get_domain_bus_and_slot(domain, pf + 1, 0);
515 		if (!pdev)
516 			continue;
517 
518 		cgx[0] = 0;
519 		lmac[0] = 0;
520 		pcifunc = pf << 10;
521 		pfvf = rvu_get_pfvf(rvu, pcifunc);
522 
523 		if (pfvf->nix_blkaddr == BLKADDR_NIX0)
524 			blkid = 0;
525 		else
526 			blkid = 1;
527 
528 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
529 				    &lmac_id);
530 		sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
531 		sprintf(lmac, "LMAC%d", lmac_id);
532 		seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
533 			   dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
534 	}
535 	return 0;
536 }
537 
538 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
539 
540 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
541 				u16 *pcifunc)
542 {
543 	struct rvu_block *block;
544 	struct rvu_hwinfo *hw;
545 
546 	hw = rvu->hw;
547 	block = &hw->block[blkaddr];
548 
549 	if (lf < 0 || lf >= block->lf.max) {
550 		dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
551 			 block->lf.max - 1);
552 		return false;
553 	}
554 
555 	*pcifunc = block->fn_map[lf];
556 	if (!*pcifunc) {
557 		dev_warn(rvu->dev,
558 			 "This LF is not attached to any RVU PFFUNC\n");
559 		return false;
560 	}
561 	return true;
562 }
563 
564 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
565 {
566 	char *buf;
567 
568 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
569 	if (!buf)
570 		return;
571 
572 	if (!pfvf->aura_ctx) {
573 		seq_puts(m, "Aura context is not initialized\n");
574 	} else {
575 		bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
576 					pfvf->aura_ctx->qsize);
577 		seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
578 		seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
579 	}
580 
581 	if (!pfvf->pool_ctx) {
582 		seq_puts(m, "Pool context is not initialized\n");
583 	} else {
584 		bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
585 					pfvf->pool_ctx->qsize);
586 		seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
587 		seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
588 	}
589 	kfree(buf);
590 }
591 
592 /* The 'qsize' entry dumps current Aura/Pool context Qsize
593  * and each context's current enable/disable status in a bitmap.
594  */
595 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
596 				 int blktype)
597 {
598 	void (*print_qsize)(struct seq_file *filp,
599 			    struct rvu_pfvf *pfvf) = NULL;
600 	struct dentry *current_dir;
601 	struct rvu_pfvf *pfvf;
602 	struct rvu *rvu;
603 	int qsize_id;
604 	u16 pcifunc;
605 	int blkaddr;
606 
607 	rvu = filp->private;
608 	switch (blktype) {
609 	case BLKTYPE_NPA:
610 		qsize_id = rvu->rvu_dbg.npa_qsize_id;
611 		print_qsize = print_npa_qsize;
612 		break;
613 
614 	case BLKTYPE_NIX:
615 		qsize_id = rvu->rvu_dbg.nix_qsize_id;
616 		print_qsize = print_nix_qsize;
617 		break;
618 
619 	default:
620 		return -EINVAL;
621 	}
622 
623 	if (blktype == BLKTYPE_NPA) {
624 		blkaddr = BLKADDR_NPA;
625 	} else {
626 		current_dir = filp->file->f_path.dentry->d_parent;
627 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
628 				   BLKADDR_NIX1 : BLKADDR_NIX0);
629 	}
630 
631 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
632 		return -EINVAL;
633 
634 	pfvf = rvu_get_pfvf(rvu, pcifunc);
635 	print_qsize(filp, pfvf);
636 
637 	return 0;
638 }
639 
640 static ssize_t rvu_dbg_qsize_write(struct file *filp,
641 				   const char __user *buffer, size_t count,
642 				   loff_t *ppos, int blktype)
643 {
644 	char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
645 	struct seq_file *seqfile = filp->private_data;
646 	char *cmd_buf, *cmd_buf_tmp, *subtoken;
647 	struct rvu *rvu = seqfile->private;
648 	struct dentry *current_dir;
649 	int blkaddr;
650 	u16 pcifunc;
651 	int ret, lf;
652 
653 	cmd_buf = memdup_user(buffer, count + 1);
654 	if (IS_ERR(cmd_buf))
655 		return -ENOMEM;
656 
657 	cmd_buf[count] = '\0';
658 
659 	cmd_buf_tmp = strchr(cmd_buf, '\n');
660 	if (cmd_buf_tmp) {
661 		*cmd_buf_tmp = '\0';
662 		count = cmd_buf_tmp - cmd_buf + 1;
663 	}
664 
665 	cmd_buf_tmp = cmd_buf;
666 	subtoken = strsep(&cmd_buf, " ");
667 	ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
668 	if (cmd_buf)
669 		ret = -EINVAL;
670 
671 	if (ret < 0 || !strncmp(subtoken, "help", 4)) {
672 		dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
673 		goto qsize_write_done;
674 	}
675 
676 	if (blktype == BLKTYPE_NPA) {
677 		blkaddr = BLKADDR_NPA;
678 	} else {
679 		current_dir = filp->f_path.dentry->d_parent;
680 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
681 				   BLKADDR_NIX1 : BLKADDR_NIX0);
682 	}
683 
684 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
685 		ret = -EINVAL;
686 		goto qsize_write_done;
687 	}
688 	if (blktype  == BLKTYPE_NPA)
689 		rvu->rvu_dbg.npa_qsize_id = lf;
690 	else
691 		rvu->rvu_dbg.nix_qsize_id = lf;
692 
693 qsize_write_done:
694 	kfree(cmd_buf_tmp);
695 	return ret ? ret : count;
696 }
697 
698 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
699 				       const char __user *buffer,
700 				       size_t count, loff_t *ppos)
701 {
702 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
703 					    BLKTYPE_NPA);
704 }
705 
706 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
707 {
708 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
709 }
710 
711 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
712 
713 /* Dumps given NPA Aura's context */
714 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
715 {
716 	struct npa_aura_s *aura = &rsp->aura;
717 	struct rvu *rvu = m->private;
718 
719 	seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
720 
721 	seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
722 		   aura->ena, aura->pool_caching);
723 	seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
724 		   aura->pool_way_mask, aura->avg_con);
725 	seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
726 		   aura->pool_drop_ena, aura->aura_drop_ena);
727 	seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
728 		   aura->bp_ena, aura->aura_drop);
729 	seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
730 		   aura->shift, aura->avg_level);
731 
732 	seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
733 		   (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
734 
735 	seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
736 		   (u64)aura->limit, aura->bp, aura->fc_ena);
737 
738 	if (!is_rvu_otx2(rvu))
739 		seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
740 	seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
741 		   aura->fc_up_crossing, aura->fc_stype);
742 	seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
743 
744 	seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
745 
746 	seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
747 		   aura->pool_drop, aura->update_time);
748 	seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
749 		   aura->err_int, aura->err_int_ena);
750 	seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
751 		   aura->thresh_int, aura->thresh_int_ena);
752 	seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
753 		   aura->thresh_up, aura->thresh_qint_idx);
754 	seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
755 
756 	seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
757 	if (!is_rvu_otx2(rvu))
758 		seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
759 }
760 
761 /* Dumps given NPA Pool's context */
762 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
763 {
764 	struct npa_pool_s *pool = &rsp->pool;
765 	struct rvu *rvu = m->private;
766 
767 	seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
768 
769 	seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
770 		   pool->ena, pool->nat_align);
771 	seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
772 		   pool->stack_caching, pool->stack_way_mask);
773 	seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
774 		   pool->buf_offset, pool->buf_size);
775 
776 	seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
777 		   pool->stack_max_pages, pool->stack_pages);
778 
779 	seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
780 
781 	seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
782 		   pool->stack_offset, pool->shift, pool->avg_level);
783 	seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
784 		   pool->avg_con, pool->fc_ena, pool->fc_stype);
785 	seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
786 		   pool->fc_hyst_bits, pool->fc_up_crossing);
787 	if (!is_rvu_otx2(rvu))
788 		seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
789 	seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
790 
791 	seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
792 
793 	seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
794 
795 	seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
796 
797 	seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
798 		   pool->err_int, pool->err_int_ena);
799 	seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
800 	seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
801 		   pool->thresh_int_ena, pool->thresh_up);
802 	seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
803 		   pool->thresh_qint_idx, pool->err_qint_idx);
804 	if (!is_rvu_otx2(rvu))
805 		seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
806 }
807 
808 /* Reads aura/pool's ctx from admin queue */
809 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
810 {
811 	void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
812 	struct npa_aq_enq_req aq_req;
813 	struct npa_aq_enq_rsp rsp;
814 	struct rvu_pfvf *pfvf;
815 	int aura, rc, max_id;
816 	int npalf, id, all;
817 	struct rvu *rvu;
818 	u16 pcifunc;
819 
820 	rvu = m->private;
821 
822 	switch (ctype) {
823 	case NPA_AQ_CTYPE_AURA:
824 		npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
825 		id = rvu->rvu_dbg.npa_aura_ctx.id;
826 		all = rvu->rvu_dbg.npa_aura_ctx.all;
827 		break;
828 
829 	case NPA_AQ_CTYPE_POOL:
830 		npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
831 		id = rvu->rvu_dbg.npa_pool_ctx.id;
832 		all = rvu->rvu_dbg.npa_pool_ctx.all;
833 		break;
834 	default:
835 		return -EINVAL;
836 	}
837 
838 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
839 		return -EINVAL;
840 
841 	pfvf = rvu_get_pfvf(rvu, pcifunc);
842 	if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
843 		seq_puts(m, "Aura context is not initialized\n");
844 		return -EINVAL;
845 	} else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
846 		seq_puts(m, "Pool context is not initialized\n");
847 		return -EINVAL;
848 	}
849 
850 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
851 	aq_req.hdr.pcifunc = pcifunc;
852 	aq_req.ctype = ctype;
853 	aq_req.op = NPA_AQ_INSTOP_READ;
854 	if (ctype == NPA_AQ_CTYPE_AURA) {
855 		max_id = pfvf->aura_ctx->qsize;
856 		print_npa_ctx = print_npa_aura_ctx;
857 	} else {
858 		max_id = pfvf->pool_ctx->qsize;
859 		print_npa_ctx = print_npa_pool_ctx;
860 	}
861 
862 	if (id < 0 || id >= max_id) {
863 		seq_printf(m, "Invalid %s, valid range is 0-%d\n",
864 			   (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
865 			max_id - 1);
866 		return -EINVAL;
867 	}
868 
869 	if (all)
870 		id = 0;
871 	else
872 		max_id = id + 1;
873 
874 	for (aura = id; aura < max_id; aura++) {
875 		aq_req.aura_id = aura;
876 		seq_printf(m, "======%s : %d=======\n",
877 			   (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
878 			aq_req.aura_id);
879 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
880 		if (rc) {
881 			seq_puts(m, "Failed to read context\n");
882 			return -EINVAL;
883 		}
884 		print_npa_ctx(m, &rsp);
885 	}
886 	return 0;
887 }
888 
889 static int write_npa_ctx(struct rvu *rvu, bool all,
890 			 int npalf, int id, int ctype)
891 {
892 	struct rvu_pfvf *pfvf;
893 	int max_id = 0;
894 	u16 pcifunc;
895 
896 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
897 		return -EINVAL;
898 
899 	pfvf = rvu_get_pfvf(rvu, pcifunc);
900 
901 	if (ctype == NPA_AQ_CTYPE_AURA) {
902 		if (!pfvf->aura_ctx) {
903 			dev_warn(rvu->dev, "Aura context is not initialized\n");
904 			return -EINVAL;
905 		}
906 		max_id = pfvf->aura_ctx->qsize;
907 	} else if (ctype == NPA_AQ_CTYPE_POOL) {
908 		if (!pfvf->pool_ctx) {
909 			dev_warn(rvu->dev, "Pool context is not initialized\n");
910 			return -EINVAL;
911 		}
912 		max_id = pfvf->pool_ctx->qsize;
913 	}
914 
915 	if (id < 0 || id >= max_id) {
916 		dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
917 			 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
918 			max_id - 1);
919 		return -EINVAL;
920 	}
921 
922 	switch (ctype) {
923 	case NPA_AQ_CTYPE_AURA:
924 		rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
925 		rvu->rvu_dbg.npa_aura_ctx.id = id;
926 		rvu->rvu_dbg.npa_aura_ctx.all = all;
927 		break;
928 
929 	case NPA_AQ_CTYPE_POOL:
930 		rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
931 		rvu->rvu_dbg.npa_pool_ctx.id = id;
932 		rvu->rvu_dbg.npa_pool_ctx.all = all;
933 		break;
934 	default:
935 		return -EINVAL;
936 	}
937 	return 0;
938 }
939 
940 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
941 				const char __user *buffer, int *npalf,
942 				int *id, bool *all)
943 {
944 	int bytes_not_copied;
945 	char *cmd_buf_tmp;
946 	char *subtoken;
947 	int ret;
948 
949 	bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
950 	if (bytes_not_copied)
951 		return -EFAULT;
952 
953 	cmd_buf[*count] = '\0';
954 	cmd_buf_tmp = strchr(cmd_buf, '\n');
955 
956 	if (cmd_buf_tmp) {
957 		*cmd_buf_tmp = '\0';
958 		*count = cmd_buf_tmp - cmd_buf + 1;
959 	}
960 
961 	subtoken = strsep(&cmd_buf, " ");
962 	ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
963 	if (ret < 0)
964 		return ret;
965 	subtoken = strsep(&cmd_buf, " ");
966 	if (subtoken && strcmp(subtoken, "all") == 0) {
967 		*all = true;
968 	} else {
969 		ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
970 		if (ret < 0)
971 			return ret;
972 	}
973 	if (cmd_buf)
974 		return -EINVAL;
975 	return ret;
976 }
977 
978 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
979 				     const char __user *buffer,
980 				     size_t count, loff_t *ppos, int ctype)
981 {
982 	char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
983 					"aura" : "pool";
984 	struct seq_file *seqfp = filp->private_data;
985 	struct rvu *rvu = seqfp->private;
986 	int npalf, id = 0, ret;
987 	bool all = false;
988 
989 	if ((*ppos != 0) || !count)
990 		return -EINVAL;
991 
992 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
993 	if (!cmd_buf)
994 		return count;
995 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
996 				   &npalf, &id, &all);
997 	if (ret < 0) {
998 		dev_info(rvu->dev,
999 			 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1000 			 ctype_string, ctype_string);
1001 		goto done;
1002 	} else {
1003 		ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1004 	}
1005 done:
1006 	kfree(cmd_buf);
1007 	return ret ? ret : count;
1008 }
1009 
1010 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1011 					  const char __user *buffer,
1012 					  size_t count, loff_t *ppos)
1013 {
1014 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1015 				     NPA_AQ_CTYPE_AURA);
1016 }
1017 
1018 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1019 {
1020 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1021 }
1022 
1023 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1024 
1025 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1026 					  const char __user *buffer,
1027 					  size_t count, loff_t *ppos)
1028 {
1029 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1030 				     NPA_AQ_CTYPE_POOL);
1031 }
1032 
1033 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1034 {
1035 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1036 }
1037 
1038 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1039 
1040 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1041 			    int ctype, int transaction)
1042 {
1043 	u64 req, out_req, lat, cant_alloc;
1044 	struct nix_hw *nix_hw;
1045 	struct rvu *rvu;
1046 	int port;
1047 
1048 	if (blk_addr == BLKADDR_NDC_NPA0) {
1049 		rvu = s->private;
1050 	} else {
1051 		nix_hw = s->private;
1052 		rvu = nix_hw->rvu;
1053 	}
1054 
1055 	for (port = 0; port < NDC_MAX_PORT; port++) {
1056 		req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1057 						(port, ctype, transaction));
1058 		lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1059 						(port, ctype, transaction));
1060 		out_req = rvu_read64(rvu, blk_addr,
1061 				     NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1062 				     (port, ctype, transaction));
1063 		cant_alloc = rvu_read64(rvu, blk_addr,
1064 					NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1065 					(port, transaction));
1066 		seq_printf(s, "\nPort:%d\n", port);
1067 		seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1068 		seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1069 		seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1070 		seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1071 		seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1072 	}
1073 }
1074 
1075 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1076 {
1077 	seq_puts(s, "\n***** CACHE mode read stats *****\n");
1078 	ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1079 	seq_puts(s, "\n***** CACHE mode write stats *****\n");
1080 	ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1081 	seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1082 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1083 	seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1084 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1085 	return 0;
1086 }
1087 
1088 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1089 {
1090 	return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1091 }
1092 
1093 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1094 
1095 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1096 {
1097 	struct nix_hw *nix_hw;
1098 	struct rvu *rvu;
1099 	int bank, max_bank;
1100 
1101 	if (blk_addr == BLKADDR_NDC_NPA0) {
1102 		rvu = s->private;
1103 	} else {
1104 		nix_hw = s->private;
1105 		rvu = nix_hw->rvu;
1106 	}
1107 
1108 	max_bank = NDC_MAX_BANK(rvu, blk_addr);
1109 	for (bank = 0; bank < max_bank; bank++) {
1110 		seq_printf(s, "BANK:%d\n", bank);
1111 		seq_printf(s, "\tHits:\t%lld\n",
1112 			   (u64)rvu_read64(rvu, blk_addr,
1113 			   NDC_AF_BANKX_HIT_PC(bank)));
1114 		seq_printf(s, "\tMiss:\t%lld\n",
1115 			   (u64)rvu_read64(rvu, blk_addr,
1116 			    NDC_AF_BANKX_MISS_PC(bank)));
1117 	}
1118 	return 0;
1119 }
1120 
1121 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1122 {
1123 	struct nix_hw *nix_hw = filp->private;
1124 	int blkaddr = 0;
1125 	int ndc_idx = 0;
1126 
1127 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1128 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1129 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1130 
1131 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1132 }
1133 
1134 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1135 
1136 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1137 {
1138 	struct nix_hw *nix_hw = filp->private;
1139 	int blkaddr = 0;
1140 	int ndc_idx = 0;
1141 
1142 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1143 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1144 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1145 
1146 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1147 }
1148 
1149 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1150 
1151 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1152 					     void *unused)
1153 {
1154 	return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1155 }
1156 
1157 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1158 
1159 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1160 						void *unused)
1161 {
1162 	struct nix_hw *nix_hw = filp->private;
1163 	int ndc_idx = NPA0_U;
1164 	int blkaddr = 0;
1165 
1166 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1167 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1168 
1169 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1170 }
1171 
1172 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1173 
1174 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1175 						void *unused)
1176 {
1177 	struct nix_hw *nix_hw = filp->private;
1178 	int ndc_idx = NPA0_U;
1179 	int blkaddr = 0;
1180 
1181 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1182 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1183 
1184 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1185 }
1186 
1187 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1188 
1189 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1190 				   struct nix_cn10k_sq_ctx_s *sq_ctx)
1191 {
1192 	seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1193 		   sq_ctx->ena, sq_ctx->qint_idx);
1194 	seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1195 		   sq_ctx->substream, sq_ctx->sdp_mcast);
1196 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1197 		   sq_ctx->cq, sq_ctx->sqe_way_mask);
1198 
1199 	seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1200 		   sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1201 	seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1202 		   sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1203 	seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1204 		   sq_ctx->default_chan, sq_ctx->sqb_count);
1205 
1206 	seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1207 	seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1208 	seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1209 		   sq_ctx->sqb_aura, sq_ctx->sq_int);
1210 	seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1211 		   sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1212 
1213 	seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1214 		   sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1215 	seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1216 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1217 	seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1218 		   sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1219 	seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1220 		   sq_ctx->tail_offset, sq_ctx->smenq_offset);
1221 	seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1222 		   sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1223 
1224 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1225 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1226 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1227 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1228 		   sq_ctx->smenq_next_sqb);
1229 
1230 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1231 
1232 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1233 	seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1234 		   sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1235 	seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1236 		   sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1237 	seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1238 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1239 
1240 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1241 		   (u64)sq_ctx->scm_lso_rem);
1242 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1243 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1244 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1245 		   (u64)sq_ctx->dropped_octs);
1246 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1247 		   (u64)sq_ctx->dropped_pkts);
1248 }
1249 
1250 /* Dumps given nix_sq's context */
1251 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1252 {
1253 	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1254 	struct nix_hw *nix_hw = m->private;
1255 	struct rvu *rvu = nix_hw->rvu;
1256 
1257 	if (!is_rvu_otx2(rvu)) {
1258 		print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1259 		return;
1260 	}
1261 	seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1262 		   sq_ctx->sqe_way_mask, sq_ctx->cq);
1263 	seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1264 		   sq_ctx->sdp_mcast, sq_ctx->substream);
1265 	seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1266 		   sq_ctx->qint_idx, sq_ctx->ena);
1267 
1268 	seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1269 		   sq_ctx->sqb_count, sq_ctx->default_chan);
1270 	seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1271 		   sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1272 	seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1273 		   sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1274 
1275 	seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1276 		   sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1277 	seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1278 		   sq_ctx->sq_int, sq_ctx->sqb_aura);
1279 	seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1280 
1281 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1282 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1283 	seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1284 		   sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1285 	seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1286 		   sq_ctx->smenq_offset, sq_ctx->tail_offset);
1287 	seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1288 		   sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1289 	seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1290 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1291 	seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1292 		   sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1293 
1294 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1295 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1296 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1297 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1298 		   sq_ctx->smenq_next_sqb);
1299 
1300 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1301 
1302 	seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1303 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1304 	seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1305 		   sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1306 	seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1307 		   sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1308 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1309 
1310 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1311 		   (u64)sq_ctx->scm_lso_rem);
1312 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1313 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1314 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1315 		   (u64)sq_ctx->dropped_octs);
1316 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1317 		   (u64)sq_ctx->dropped_pkts);
1318 }
1319 
1320 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1321 				   struct nix_cn10k_rq_ctx_s *rq_ctx)
1322 {
1323 	seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1324 		   rq_ctx->ena, rq_ctx->sso_ena);
1325 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1326 		   rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1327 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1328 		   rq_ctx->cq, rq_ctx->lenerr_dis);
1329 	seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1330 		   rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1331 	seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1332 		   rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1333 	seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1334 		   rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1335 	seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1336 
1337 	seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1338 		   rq_ctx->spb_aura, rq_ctx->lpb_aura);
1339 	seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1340 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1341 		   rq_ctx->sso_grp, rq_ctx->sso_tt);
1342 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1343 		   rq_ctx->pb_caching, rq_ctx->wqe_caching);
1344 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1345 		   rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1346 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1347 		   rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1348 	seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1349 		   rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1350 
1351 	seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1352 	seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1353 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1354 	seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1355 		   rq_ctx->wqe_skip, rq_ctx->spb_ena);
1356 	seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1357 		   rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1358 	seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1359 		   rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1360 	seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1361 		   rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1362 
1363 	seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1364 		   rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1365 	seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1366 		   rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1367 	seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1368 		   rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1369 	seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1370 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1371 
1372 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1373 		   rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1374 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1375 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1376 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1377 		   rq_ctx->rq_int, rq_ctx->rq_int_ena);
1378 	seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1379 
1380 	seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1381 		   rq_ctx->ltag, rq_ctx->good_utag);
1382 	seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1383 		   rq_ctx->bad_utag, rq_ctx->flow_tagw);
1384 	seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1385 		   rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1386 	seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1387 		   rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1388 	seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1389 
1390 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1391 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1392 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1393 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1394 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1395 }
1396 
1397 /* Dumps given nix_rq's context */
1398 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1399 {
1400 	struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1401 	struct nix_hw *nix_hw = m->private;
1402 	struct rvu *rvu = nix_hw->rvu;
1403 
1404 	if (!is_rvu_otx2(rvu)) {
1405 		print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1406 		return;
1407 	}
1408 
1409 	seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1410 		   rq_ctx->wqe_aura, rq_ctx->substream);
1411 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1412 		   rq_ctx->cq, rq_ctx->ena_wqwd);
1413 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1414 		   rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1415 	seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1416 
1417 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1418 		   rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1419 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1420 		   rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1421 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1422 		   rq_ctx->pb_caching, rq_ctx->sso_tt);
1423 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1424 		   rq_ctx->sso_grp, rq_ctx->lpb_aura);
1425 	seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1426 
1427 	seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1428 		   rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1429 	seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1430 		   rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1431 	seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1432 		   rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1433 	seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1434 		   rq_ctx->spb_ena, rq_ctx->wqe_skip);
1435 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1436 
1437 	seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1438 		   rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1439 	seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1440 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1441 	seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1442 		   rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1443 	seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1444 		   rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1445 
1446 	seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1447 		   rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1448 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1449 		   rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1450 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1451 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1452 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1453 
1454 	seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1455 		   rq_ctx->flow_tagw, rq_ctx->bad_utag);
1456 	seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1457 		   rq_ctx->good_utag, rq_ctx->ltag);
1458 
1459 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1460 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1461 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1462 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1463 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1464 }
1465 
1466 /* Dumps given nix_cq's context */
1467 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1468 {
1469 	struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1470 
1471 	seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1472 
1473 	seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1474 	seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1475 		   cq_ctx->avg_con, cq_ctx->cint_idx);
1476 	seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1477 		   cq_ctx->cq_err, cq_ctx->qint_idx);
1478 	seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1479 		   cq_ctx->bpid, cq_ctx->bp_ena);
1480 
1481 	seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1482 		   cq_ctx->update_time, cq_ctx->avg_level);
1483 	seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1484 		   cq_ctx->head, cq_ctx->tail);
1485 
1486 	seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1487 		   cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1488 	seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1489 		   cq_ctx->qsize, cq_ctx->caching);
1490 	seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1491 		   cq_ctx->substream, cq_ctx->ena);
1492 	seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1493 		   cq_ctx->drop_ena, cq_ctx->drop);
1494 	seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1495 }
1496 
1497 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1498 					 void *unused, int ctype)
1499 {
1500 	void (*print_nix_ctx)(struct seq_file *filp,
1501 			      struct nix_aq_enq_rsp *rsp) = NULL;
1502 	struct nix_hw *nix_hw = filp->private;
1503 	struct rvu *rvu = nix_hw->rvu;
1504 	struct nix_aq_enq_req aq_req;
1505 	struct nix_aq_enq_rsp rsp;
1506 	char *ctype_string = NULL;
1507 	int qidx, rc, max_id = 0;
1508 	struct rvu_pfvf *pfvf;
1509 	int nixlf, id, all;
1510 	u16 pcifunc;
1511 
1512 	switch (ctype) {
1513 	case NIX_AQ_CTYPE_CQ:
1514 		nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1515 		id = rvu->rvu_dbg.nix_cq_ctx.id;
1516 		all = rvu->rvu_dbg.nix_cq_ctx.all;
1517 		break;
1518 
1519 	case NIX_AQ_CTYPE_SQ:
1520 		nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1521 		id = rvu->rvu_dbg.nix_sq_ctx.id;
1522 		all = rvu->rvu_dbg.nix_sq_ctx.all;
1523 		break;
1524 
1525 	case NIX_AQ_CTYPE_RQ:
1526 		nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1527 		id = rvu->rvu_dbg.nix_rq_ctx.id;
1528 		all = rvu->rvu_dbg.nix_rq_ctx.all;
1529 		break;
1530 
1531 	default:
1532 		return -EINVAL;
1533 	}
1534 
1535 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1536 		return -EINVAL;
1537 
1538 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1539 	if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1540 		seq_puts(filp, "SQ context is not initialized\n");
1541 		return -EINVAL;
1542 	} else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1543 		seq_puts(filp, "RQ context is not initialized\n");
1544 		return -EINVAL;
1545 	} else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1546 		seq_puts(filp, "CQ context is not initialized\n");
1547 		return -EINVAL;
1548 	}
1549 
1550 	if (ctype == NIX_AQ_CTYPE_SQ) {
1551 		max_id = pfvf->sq_ctx->qsize;
1552 		ctype_string = "sq";
1553 		print_nix_ctx = print_nix_sq_ctx;
1554 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1555 		max_id = pfvf->rq_ctx->qsize;
1556 		ctype_string = "rq";
1557 		print_nix_ctx = print_nix_rq_ctx;
1558 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1559 		max_id = pfvf->cq_ctx->qsize;
1560 		ctype_string = "cq";
1561 		print_nix_ctx = print_nix_cq_ctx;
1562 	}
1563 
1564 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1565 	aq_req.hdr.pcifunc = pcifunc;
1566 	aq_req.ctype = ctype;
1567 	aq_req.op = NIX_AQ_INSTOP_READ;
1568 	if (all)
1569 		id = 0;
1570 	else
1571 		max_id = id + 1;
1572 	for (qidx = id; qidx < max_id; qidx++) {
1573 		aq_req.qidx = qidx;
1574 		seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1575 			   ctype_string, nixlf, aq_req.qidx);
1576 		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1577 		if (rc) {
1578 			seq_puts(filp, "Failed to read the context\n");
1579 			return -EINVAL;
1580 		}
1581 		print_nix_ctx(filp, &rsp);
1582 	}
1583 	return 0;
1584 }
1585 
1586 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1587 			       int id, int ctype, char *ctype_string,
1588 			       struct seq_file *m)
1589 {
1590 	struct nix_hw *nix_hw = m->private;
1591 	struct rvu_pfvf *pfvf;
1592 	int max_id = 0;
1593 	u16 pcifunc;
1594 
1595 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1596 		return -EINVAL;
1597 
1598 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1599 
1600 	if (ctype == NIX_AQ_CTYPE_SQ) {
1601 		if (!pfvf->sq_ctx) {
1602 			dev_warn(rvu->dev, "SQ context is not initialized\n");
1603 			return -EINVAL;
1604 		}
1605 		max_id = pfvf->sq_ctx->qsize;
1606 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1607 		if (!pfvf->rq_ctx) {
1608 			dev_warn(rvu->dev, "RQ context is not initialized\n");
1609 			return -EINVAL;
1610 		}
1611 		max_id = pfvf->rq_ctx->qsize;
1612 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1613 		if (!pfvf->cq_ctx) {
1614 			dev_warn(rvu->dev, "CQ context is not initialized\n");
1615 			return -EINVAL;
1616 		}
1617 		max_id = pfvf->cq_ctx->qsize;
1618 	}
1619 
1620 	if (id < 0 || id >= max_id) {
1621 		dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1622 			 ctype_string, max_id - 1);
1623 		return -EINVAL;
1624 	}
1625 	switch (ctype) {
1626 	case NIX_AQ_CTYPE_CQ:
1627 		rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1628 		rvu->rvu_dbg.nix_cq_ctx.id = id;
1629 		rvu->rvu_dbg.nix_cq_ctx.all = all;
1630 		break;
1631 
1632 	case NIX_AQ_CTYPE_SQ:
1633 		rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1634 		rvu->rvu_dbg.nix_sq_ctx.id = id;
1635 		rvu->rvu_dbg.nix_sq_ctx.all = all;
1636 		break;
1637 
1638 	case NIX_AQ_CTYPE_RQ:
1639 		rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1640 		rvu->rvu_dbg.nix_rq_ctx.id = id;
1641 		rvu->rvu_dbg.nix_rq_ctx.all = all;
1642 		break;
1643 	default:
1644 		return -EINVAL;
1645 	}
1646 	return 0;
1647 }
1648 
1649 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
1650 					   const char __user *buffer,
1651 					   size_t count, loff_t *ppos,
1652 					   int ctype)
1653 {
1654 	struct seq_file *m = filp->private_data;
1655 	struct nix_hw *nix_hw = m->private;
1656 	struct rvu *rvu = nix_hw->rvu;
1657 	char *cmd_buf, *ctype_string;
1658 	int nixlf, id = 0, ret;
1659 	bool all = false;
1660 
1661 	if ((*ppos != 0) || !count)
1662 		return -EINVAL;
1663 
1664 	switch (ctype) {
1665 	case NIX_AQ_CTYPE_SQ:
1666 		ctype_string = "sq";
1667 		break;
1668 	case NIX_AQ_CTYPE_RQ:
1669 		ctype_string = "rq";
1670 		break;
1671 	case NIX_AQ_CTYPE_CQ:
1672 		ctype_string = "cq";
1673 		break;
1674 	default:
1675 		return -EINVAL;
1676 	}
1677 
1678 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1679 
1680 	if (!cmd_buf)
1681 		return count;
1682 
1683 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1684 				   &nixlf, &id, &all);
1685 	if (ret < 0) {
1686 		dev_info(rvu->dev,
1687 			 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
1688 			 ctype_string, ctype_string);
1689 		goto done;
1690 	} else {
1691 		ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
1692 					  ctype_string, m);
1693 	}
1694 done:
1695 	kfree(cmd_buf);
1696 	return ret ? ret : count;
1697 }
1698 
1699 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
1700 					const char __user *buffer,
1701 					size_t count, loff_t *ppos)
1702 {
1703 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1704 					    NIX_AQ_CTYPE_SQ);
1705 }
1706 
1707 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
1708 {
1709 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
1710 }
1711 
1712 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
1713 
1714 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
1715 					const char __user *buffer,
1716 					size_t count, loff_t *ppos)
1717 {
1718 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1719 					    NIX_AQ_CTYPE_RQ);
1720 }
1721 
1722 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
1723 {
1724 	return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
1725 }
1726 
1727 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
1728 
1729 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
1730 					const char __user *buffer,
1731 					size_t count, loff_t *ppos)
1732 {
1733 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1734 					    NIX_AQ_CTYPE_CQ);
1735 }
1736 
1737 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1738 {
1739 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1740 }
1741 
1742 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1743 
1744 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1745 				 unsigned long *bmap, char *qtype)
1746 {
1747 	char *buf;
1748 
1749 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1750 	if (!buf)
1751 		return;
1752 
1753 	bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1754 	seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1755 	seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1756 		   qtype, buf);
1757 	kfree(buf);
1758 }
1759 
1760 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1761 {
1762 	if (!pfvf->cq_ctx)
1763 		seq_puts(filp, "cq context is not initialized\n");
1764 	else
1765 		print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1766 				     "cq");
1767 
1768 	if (!pfvf->rq_ctx)
1769 		seq_puts(filp, "rq context is not initialized\n");
1770 	else
1771 		print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1772 				     "rq");
1773 
1774 	if (!pfvf->sq_ctx)
1775 		seq_puts(filp, "sq context is not initialized\n");
1776 	else
1777 		print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1778 				     "sq");
1779 }
1780 
1781 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1782 				       const char __user *buffer,
1783 				       size_t count, loff_t *ppos)
1784 {
1785 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1786 				   BLKTYPE_NIX);
1787 }
1788 
1789 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1790 {
1791 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1792 }
1793 
1794 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1795 
1796 static void print_band_prof_ctx(struct seq_file *m,
1797 				struct nix_bandprof_s *prof)
1798 {
1799 	char *str;
1800 
1801 	switch (prof->pc_mode) {
1802 	case NIX_RX_PC_MODE_VLAN:
1803 		str = "VLAN";
1804 		break;
1805 	case NIX_RX_PC_MODE_DSCP:
1806 		str = "DSCP";
1807 		break;
1808 	case NIX_RX_PC_MODE_GEN:
1809 		str = "Generic";
1810 		break;
1811 	case NIX_RX_PC_MODE_RSVD:
1812 		str = "Reserved";
1813 		break;
1814 	}
1815 	seq_printf(m, "W0: pc_mode\t\t%s\n", str);
1816 	str = (prof->icolor == 3) ? "Color blind" :
1817 		(prof->icolor == 0) ? "Green" :
1818 		(prof->icolor == 1) ? "Yellow" : "Red";
1819 	seq_printf(m, "W0: icolor\t\t%s\n", str);
1820 	seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
1821 	seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
1822 	seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
1823 	seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
1824 	seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
1825 	seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
1826 	seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
1827 	seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
1828 
1829 	seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
1830 	str = (prof->lmode == 0) ? "byte" : "packet";
1831 	seq_printf(m, "W1: lmode\t\t%s\n", str);
1832 	seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
1833 	seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
1834 	seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
1835 	seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
1836 	str = (prof->gc_action == 0) ? "PASS" :
1837 		(prof->gc_action == 1) ? "DROP" : "RED";
1838 	seq_printf(m, "W1: gc_action\t\t%s\n", str);
1839 	str = (prof->yc_action == 0) ? "PASS" :
1840 		(prof->yc_action == 1) ? "DROP" : "RED";
1841 	seq_printf(m, "W1: yc_action\t\t%s\n", str);
1842 	str = (prof->rc_action == 0) ? "PASS" :
1843 		(prof->rc_action == 1) ? "DROP" : "RED";
1844 	seq_printf(m, "W1: rc_action\t\t%s\n", str);
1845 	seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
1846 	seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
1847 	seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
1848 
1849 	seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
1850 	seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
1851 	seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
1852 	seq_printf(m, "W4: green_pkt_pass\t%lld\n",
1853 		   (u64)prof->green_pkt_pass);
1854 	seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
1855 		   (u64)prof->yellow_pkt_pass);
1856 	seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
1857 	seq_printf(m, "W7: green_octs_pass\t%lld\n",
1858 		   (u64)prof->green_octs_pass);
1859 	seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
1860 		   (u64)prof->yellow_octs_pass);
1861 	seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
1862 	seq_printf(m, "W10: green_pkt_drop\t%lld\n",
1863 		   (u64)prof->green_pkt_drop);
1864 	seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
1865 		   (u64)prof->yellow_pkt_drop);
1866 	seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
1867 	seq_printf(m, "W13: green_octs_drop\t%lld\n",
1868 		   (u64)prof->green_octs_drop);
1869 	seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
1870 		   (u64)prof->yellow_octs_drop);
1871 	seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
1872 	seq_puts(m, "==============================\n");
1873 }
1874 
1875 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
1876 {
1877 	struct nix_hw *nix_hw = m->private;
1878 	struct nix_cn10k_aq_enq_req aq_req;
1879 	struct nix_cn10k_aq_enq_rsp aq_rsp;
1880 	struct rvu *rvu = nix_hw->rvu;
1881 	struct nix_ipolicer *ipolicer;
1882 	int layer, prof_idx, idx, rc;
1883 	u16 pcifunc;
1884 	char *str;
1885 
1886 	/* Ingress policers do not exist on all platforms */
1887 	if (!nix_hw->ipolicer)
1888 		return 0;
1889 
1890 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1891 		if (layer == BAND_PROF_INVAL_LAYER)
1892 			continue;
1893 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1894 			(layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
1895 
1896 		seq_printf(m, "\n%s bandwidth profiles\n", str);
1897 		seq_puts(m, "=======================\n");
1898 
1899 		ipolicer = &nix_hw->ipolicer[layer];
1900 
1901 		for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
1902 			if (is_rsrc_free(&ipolicer->band_prof, idx))
1903 				continue;
1904 
1905 			prof_idx = (idx & 0x3FFF) | (layer << 14);
1906 			rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1907 						 0x00, NIX_AQ_CTYPE_BANDPROF,
1908 						 prof_idx);
1909 			if (rc) {
1910 				dev_err(rvu->dev,
1911 					"%s: Failed to fetch context of %s profile %d, err %d\n",
1912 					__func__, str, idx, rc);
1913 				return 0;
1914 			}
1915 			seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
1916 			pcifunc = ipolicer->pfvf_map[idx];
1917 			if (!(pcifunc & RVU_PFVF_FUNC_MASK))
1918 				seq_printf(m, "Allocated to :: PF %d\n",
1919 					   rvu_get_pf(pcifunc));
1920 			else
1921 				seq_printf(m, "Allocated to :: PF %d VF %d\n",
1922 					   rvu_get_pf(pcifunc),
1923 					   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1924 			print_band_prof_ctx(m, &aq_rsp.prof);
1925 		}
1926 	}
1927 	return 0;
1928 }
1929 
1930 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
1931 
1932 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
1933 {
1934 	struct nix_hw *nix_hw = m->private;
1935 	struct nix_ipolicer *ipolicer;
1936 	int layer;
1937 	char *str;
1938 
1939 	/* Ingress policers do not exist on all platforms */
1940 	if (!nix_hw->ipolicer)
1941 		return 0;
1942 
1943 	seq_puts(m, "\nBandwidth profile resource free count\n");
1944 	seq_puts(m, "=====================================\n");
1945 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1946 		if (layer == BAND_PROF_INVAL_LAYER)
1947 			continue;
1948 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1949 			(layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
1950 
1951 		ipolicer = &nix_hw->ipolicer[layer];
1952 		seq_printf(m, "%s :: Max: %4d  Free: %4d\n", str,
1953 			   ipolicer->band_prof.max,
1954 			   rvu_rsrc_free_count(&ipolicer->band_prof));
1955 	}
1956 	seq_puts(m, "=====================================\n");
1957 
1958 	return 0;
1959 }
1960 
1961 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
1962 
1963 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
1964 {
1965 	struct nix_hw *nix_hw;
1966 
1967 	if (!is_block_implemented(rvu->hw, blkaddr))
1968 		return;
1969 
1970 	if (blkaddr == BLKADDR_NIX0) {
1971 		rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1972 		nix_hw = &rvu->hw->nix[0];
1973 	} else {
1974 		rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
1975 						      rvu->rvu_dbg.root);
1976 		nix_hw = &rvu->hw->nix[1];
1977 	}
1978 
1979 	debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1980 			    &rvu_dbg_nix_sq_ctx_fops);
1981 	debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1982 			    &rvu_dbg_nix_rq_ctx_fops);
1983 	debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1984 			    &rvu_dbg_nix_cq_ctx_fops);
1985 	debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1986 			    &rvu_dbg_nix_ndc_tx_cache_fops);
1987 	debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1988 			    &rvu_dbg_nix_ndc_rx_cache_fops);
1989 	debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1990 			    &rvu_dbg_nix_ndc_tx_hits_miss_fops);
1991 	debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1992 			    &rvu_dbg_nix_ndc_rx_hits_miss_fops);
1993 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
1994 			    &rvu_dbg_nix_qsize_fops);
1995 	debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1996 			    &rvu_dbg_nix_band_prof_ctx_fops);
1997 	debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
1998 			    &rvu_dbg_nix_band_prof_rsrc_fops);
1999 }
2000 
2001 static void rvu_dbg_npa_init(struct rvu *rvu)
2002 {
2003 	rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2004 
2005 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2006 			    &rvu_dbg_npa_qsize_fops);
2007 	debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2008 			    &rvu_dbg_npa_aura_ctx_fops);
2009 	debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2010 			    &rvu_dbg_npa_pool_ctx_fops);
2011 	debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2012 			    &rvu_dbg_npa_ndc_cache_fops);
2013 	debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2014 			    &rvu_dbg_npa_ndc_hits_miss_fops);
2015 }
2016 
2017 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)				\
2018 	({								\
2019 		u64 cnt;						\
2020 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2021 					     NIX_STATS_RX, &(cnt));	\
2022 		if (!err)						\
2023 			seq_printf(s, "%s: %llu\n", name, cnt);		\
2024 		cnt;							\
2025 	})
2026 
2027 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)			\
2028 	({								\
2029 		u64 cnt;						\
2030 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2031 					  NIX_STATS_TX, &(cnt));	\
2032 		if (!err)						\
2033 			seq_printf(s, "%s: %llu\n", name, cnt);		\
2034 		cnt;							\
2035 	})
2036 
2037 static int cgx_print_stats(struct seq_file *s, int lmac_id)
2038 {
2039 	struct cgx_link_user_info linfo;
2040 	struct mac_ops *mac_ops;
2041 	void *cgxd = s->private;
2042 	u64 ucast, mcast, bcast;
2043 	int stat = 0, err = 0;
2044 	u64 tx_stat, rx_stat;
2045 	struct rvu *rvu;
2046 
2047 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2048 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2049 	if (!rvu)
2050 		return -ENODEV;
2051 
2052 	mac_ops = get_mac_ops(cgxd);
2053 	/* There can be no CGX devices at all */
2054 	if (!mac_ops)
2055 		return 0;
2056 
2057 	/* Link status */
2058 	seq_puts(s, "\n=======Link Status======\n\n");
2059 	err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2060 	if (err)
2061 		seq_puts(s, "Failed to read link status\n");
2062 	seq_printf(s, "\nLink is %s %d Mbps\n\n",
2063 		   linfo.link_up ? "UP" : "DOWN", linfo.speed);
2064 
2065 	/* Rx stats */
2066 	seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2067 		   mac_ops->name);
2068 	ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2069 	if (err)
2070 		return err;
2071 	mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2072 	if (err)
2073 		return err;
2074 	bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2075 	if (err)
2076 		return err;
2077 	seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2078 	PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2079 	if (err)
2080 		return err;
2081 	PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2082 	if (err)
2083 		return err;
2084 	PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2085 	if (err)
2086 		return err;
2087 
2088 	/* Tx stats */
2089 	seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2090 		   mac_ops->name);
2091 	ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2092 	if (err)
2093 		return err;
2094 	mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2095 	if (err)
2096 		return err;
2097 	bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2098 	if (err)
2099 		return err;
2100 	seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2101 	PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2102 	if (err)
2103 		return err;
2104 	PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2105 	if (err)
2106 		return err;
2107 
2108 	/* Rx stats */
2109 	seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2110 	while (stat < mac_ops->rx_stats_cnt) {
2111 		err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2112 		if (err)
2113 			return err;
2114 		if (is_rvu_otx2(rvu))
2115 			seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2116 				   rx_stat);
2117 		else
2118 			seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2119 				   rx_stat);
2120 		stat++;
2121 	}
2122 
2123 	/* Tx stats */
2124 	stat = 0;
2125 	seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2126 	while (stat < mac_ops->tx_stats_cnt) {
2127 		err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2128 		if (err)
2129 			return err;
2130 
2131 		if (is_rvu_otx2(rvu))
2132 			seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2133 				   tx_stat);
2134 		else
2135 			seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2136 				   tx_stat);
2137 		stat++;
2138 	}
2139 
2140 	return err;
2141 }
2142 
2143 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
2144 {
2145 	struct dentry *current_dir;
2146 	char *buf;
2147 
2148 	current_dir = filp->file->f_path.dentry->d_parent;
2149 	buf = strrchr(current_dir->d_name.name, 'c');
2150 	if (!buf)
2151 		return -EINVAL;
2152 
2153 	return kstrtoint(buf + 1, 10, lmac_id);
2154 }
2155 
2156 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
2157 {
2158 	int lmac_id, err;
2159 
2160 	err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2161 	if (!err)
2162 		return cgx_print_stats(filp, lmac_id);
2163 
2164 	return err;
2165 }
2166 
2167 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2168 
2169 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2170 {
2171 	struct pci_dev *pdev = NULL;
2172 	void *cgxd = s->private;
2173 	char *bcast, *mcast;
2174 	u16 index, domain;
2175 	u8 dmac[ETH_ALEN];
2176 	struct rvu *rvu;
2177 	u64 cfg, mac;
2178 	int pf;
2179 
2180 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2181 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2182 	if (!rvu)
2183 		return -ENODEV;
2184 
2185 	pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2186 	domain = 2;
2187 
2188 	pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2189 	if (!pdev)
2190 		return 0;
2191 
2192 	cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2193 	bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2194 	mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2195 
2196 	seq_puts(s,
2197 		 "PCI dev       RVUPF   BROADCAST  MULTICAST  FILTER-MODE\n");
2198 	seq_printf(s, "%s  PF%d  %9s  %9s",
2199 		   dev_name(&pdev->dev), pf, bcast, mcast);
2200 	if (cfg & CGX_DMAC_CAM_ACCEPT)
2201 		seq_printf(s, "%12s\n\n", "UNICAST");
2202 	else
2203 		seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2204 
2205 	seq_puts(s, "\nDMAC-INDEX  ADDRESS\n");
2206 
2207 	for (index = 0 ; index < 32 ; index++) {
2208 		cfg = cgx_read_dmac_entry(cgxd, index);
2209 		/* Display enabled dmac entries associated with current lmac */
2210 		if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2211 		    FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2212 			mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2213 			u64_to_ether_addr(mac, dmac);
2214 			seq_printf(s, "%7d     %pM\n", index, dmac);
2215 		}
2216 	}
2217 
2218 	return 0;
2219 }
2220 
2221 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2222 {
2223 	int err, lmac_id;
2224 
2225 	err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2226 	if (!err)
2227 		return cgx_print_dmac_flt(filp, lmac_id);
2228 
2229 	return err;
2230 }
2231 
2232 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2233 
2234 static void rvu_dbg_cgx_init(struct rvu *rvu)
2235 {
2236 	struct mac_ops *mac_ops;
2237 	unsigned long lmac_bmap;
2238 	int i, lmac_id;
2239 	char dname[20];
2240 	void *cgx;
2241 
2242 	if (!cgx_get_cgxcnt_max())
2243 		return;
2244 
2245 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2246 	if (!mac_ops)
2247 		return;
2248 
2249 	rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2250 						   rvu->rvu_dbg.root);
2251 
2252 	for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2253 		cgx = rvu_cgx_pdata(i, rvu);
2254 		if (!cgx)
2255 			continue;
2256 		lmac_bmap = cgx_get_lmac_bmap(cgx);
2257 		/* cgx debugfs dir */
2258 		sprintf(dname, "%s%d", mac_ops->name, i);
2259 		rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2260 						      rvu->rvu_dbg.cgx_root);
2261 
2262 		for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
2263 			/* lmac debugfs dir */
2264 			sprintf(dname, "lmac%d", lmac_id);
2265 			rvu->rvu_dbg.lmac =
2266 				debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2267 
2268 			debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2269 					    cgx, &rvu_dbg_cgx_stat_fops);
2270 			debugfs_create_file("mac_filter", 0600,
2271 					    rvu->rvu_dbg.lmac, cgx,
2272 					    &rvu_dbg_cgx_dmac_flt_fops);
2273 		}
2274 	}
2275 }
2276 
2277 /* NPC debugfs APIs */
2278 static void rvu_print_npc_mcam_info(struct seq_file *s,
2279 				    u16 pcifunc, int blkaddr)
2280 {
2281 	struct rvu *rvu = s->private;
2282 	int entry_acnt, entry_ecnt;
2283 	int cntr_acnt, cntr_ecnt;
2284 
2285 	rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2286 					  &entry_acnt, &entry_ecnt);
2287 	rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2288 					    &cntr_acnt, &cntr_ecnt);
2289 	if (!entry_acnt && !cntr_acnt)
2290 		return;
2291 
2292 	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2293 		seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2294 			   rvu_get_pf(pcifunc));
2295 	else
2296 		seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2297 			   rvu_get_pf(pcifunc),
2298 			   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2299 
2300 	if (entry_acnt) {
2301 		seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2302 		seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2303 	}
2304 	if (cntr_acnt) {
2305 		seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2306 		seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2307 	}
2308 }
2309 
2310 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2311 {
2312 	struct rvu *rvu = filp->private;
2313 	int pf, vf, numvfs, blkaddr;
2314 	struct npc_mcam *mcam;
2315 	u16 pcifunc, counters;
2316 	u64 cfg;
2317 
2318 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2319 	if (blkaddr < 0)
2320 		return -ENODEV;
2321 
2322 	mcam = &rvu->hw->mcam;
2323 	counters = rvu->hw->npc_counters;
2324 
2325 	seq_puts(filp, "\nNPC MCAM info:\n");
2326 	/* MCAM keywidth on receive and transmit sides */
2327 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2328 	cfg = (cfg >> 32) & 0x07;
2329 	seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2330 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2331 		   "224bits" : "448bits"));
2332 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2333 	cfg = (cfg >> 32) & 0x07;
2334 	seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2335 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2336 		   "224bits" : "448bits"));
2337 
2338 	mutex_lock(&mcam->lock);
2339 	/* MCAM entries */
2340 	seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2341 	seq_printf(filp, "\t\t Reserved \t: %d\n",
2342 		   mcam->total_entries - mcam->bmap_entries);
2343 	seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2344 
2345 	/* MCAM counters */
2346 	seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2347 	seq_printf(filp, "\t\t Reserved \t: %d\n",
2348 		   counters - mcam->counters.max);
2349 	seq_printf(filp, "\t\t Available \t: %d\n",
2350 		   rvu_rsrc_free_count(&mcam->counters));
2351 
2352 	if (mcam->bmap_entries == mcam->bmap_fcnt) {
2353 		mutex_unlock(&mcam->lock);
2354 		return 0;
2355 	}
2356 
2357 	seq_puts(filp, "\n\t\t Current allocation\n");
2358 	seq_puts(filp, "\t\t====================\n");
2359 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2360 		pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2361 		rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2362 
2363 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2364 		numvfs = (cfg >> 12) & 0xFF;
2365 		for (vf = 0; vf < numvfs; vf++) {
2366 			pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2367 			rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2368 		}
2369 	}
2370 
2371 	mutex_unlock(&mcam->lock);
2372 	return 0;
2373 }
2374 
2375 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2376 
2377 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2378 					     void *unused)
2379 {
2380 	struct rvu *rvu = filp->private;
2381 	struct npc_mcam *mcam;
2382 	int blkaddr;
2383 
2384 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2385 	if (blkaddr < 0)
2386 		return -ENODEV;
2387 
2388 	mcam = &rvu->hw->mcam;
2389 
2390 	seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2391 	seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2392 		   rvu_read64(rvu, blkaddr,
2393 			      NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2394 
2395 	return 0;
2396 }
2397 
2398 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2399 
2400 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2401 					struct rvu_npc_mcam_rule *rule)
2402 {
2403 	u8 bit;
2404 
2405 	for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2406 		seq_printf(s, "\t%s  ", npc_get_field_name(bit));
2407 		switch (bit) {
2408 		case NPC_DMAC:
2409 			seq_printf(s, "%pM ", rule->packet.dmac);
2410 			seq_printf(s, "mask %pM\n", rule->mask.dmac);
2411 			break;
2412 		case NPC_SMAC:
2413 			seq_printf(s, "%pM ", rule->packet.smac);
2414 			seq_printf(s, "mask %pM\n", rule->mask.smac);
2415 			break;
2416 		case NPC_ETYPE:
2417 			seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2418 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2419 			break;
2420 		case NPC_OUTER_VID:
2421 			seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2422 			seq_printf(s, "mask 0x%x\n",
2423 				   ntohs(rule->mask.vlan_tci));
2424 			break;
2425 		case NPC_TOS:
2426 			seq_printf(s, "%d ", rule->packet.tos);
2427 			seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2428 			break;
2429 		case NPC_SIP_IPV4:
2430 			seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2431 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2432 			break;
2433 		case NPC_DIP_IPV4:
2434 			seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2435 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2436 			break;
2437 		case NPC_SIP_IPV6:
2438 			seq_printf(s, "%pI6 ", rule->packet.ip6src);
2439 			seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2440 			break;
2441 		case NPC_DIP_IPV6:
2442 			seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2443 			seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2444 			break;
2445 		case NPC_SPORT_TCP:
2446 		case NPC_SPORT_UDP:
2447 		case NPC_SPORT_SCTP:
2448 			seq_printf(s, "%d ", ntohs(rule->packet.sport));
2449 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2450 			break;
2451 		case NPC_DPORT_TCP:
2452 		case NPC_DPORT_UDP:
2453 		case NPC_DPORT_SCTP:
2454 			seq_printf(s, "%d ", ntohs(rule->packet.dport));
2455 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2456 			break;
2457 		default:
2458 			seq_puts(s, "\n");
2459 			break;
2460 		}
2461 	}
2462 }
2463 
2464 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2465 					 struct rvu_npc_mcam_rule *rule)
2466 {
2467 	if (is_npc_intf_tx(rule->intf)) {
2468 		switch (rule->tx_action.op) {
2469 		case NIX_TX_ACTIONOP_DROP:
2470 			seq_puts(s, "\taction: Drop\n");
2471 			break;
2472 		case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2473 			seq_puts(s, "\taction: Unicast to default channel\n");
2474 			break;
2475 		case NIX_TX_ACTIONOP_UCAST_CHAN:
2476 			seq_printf(s, "\taction: Unicast to channel %d\n",
2477 				   rule->tx_action.index);
2478 			break;
2479 		case NIX_TX_ACTIONOP_MCAST:
2480 			seq_puts(s, "\taction: Multicast\n");
2481 			break;
2482 		case NIX_TX_ACTIONOP_DROP_VIOL:
2483 			seq_puts(s, "\taction: Lockdown Violation Drop\n");
2484 			break;
2485 		default:
2486 			break;
2487 		}
2488 	} else {
2489 		switch (rule->rx_action.op) {
2490 		case NIX_RX_ACTIONOP_DROP:
2491 			seq_puts(s, "\taction: Drop\n");
2492 			break;
2493 		case NIX_RX_ACTIONOP_UCAST:
2494 			seq_printf(s, "\taction: Direct to queue %d\n",
2495 				   rule->rx_action.index);
2496 			break;
2497 		case NIX_RX_ACTIONOP_RSS:
2498 			seq_puts(s, "\taction: RSS\n");
2499 			break;
2500 		case NIX_RX_ACTIONOP_UCAST_IPSEC:
2501 			seq_puts(s, "\taction: Unicast ipsec\n");
2502 			break;
2503 		case NIX_RX_ACTIONOP_MCAST:
2504 			seq_puts(s, "\taction: Multicast\n");
2505 			break;
2506 		default:
2507 			break;
2508 		}
2509 	}
2510 }
2511 
2512 static const char *rvu_dbg_get_intf_name(int intf)
2513 {
2514 	switch (intf) {
2515 	case NIX_INTFX_RX(0):
2516 		return "NIX0_RX";
2517 	case NIX_INTFX_RX(1):
2518 		return "NIX1_RX";
2519 	case NIX_INTFX_TX(0):
2520 		return "NIX0_TX";
2521 	case NIX_INTFX_TX(1):
2522 		return "NIX1_TX";
2523 	default:
2524 		break;
2525 	}
2526 
2527 	return "unknown";
2528 }
2529 
2530 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2531 {
2532 	struct rvu_npc_mcam_rule *iter;
2533 	struct rvu *rvu = s->private;
2534 	struct npc_mcam *mcam;
2535 	int pf, vf = -1;
2536 	bool enabled;
2537 	int blkaddr;
2538 	u16 target;
2539 	u64 hits;
2540 
2541 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2542 	if (blkaddr < 0)
2543 		return 0;
2544 
2545 	mcam = &rvu->hw->mcam;
2546 
2547 	mutex_lock(&mcam->lock);
2548 	list_for_each_entry(iter, &mcam->mcam_rules, list) {
2549 		pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2550 		seq_printf(s, "\n\tInstalled by: PF%d ", pf);
2551 
2552 		if (iter->owner & RVU_PFVF_FUNC_MASK) {
2553 			vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
2554 			seq_printf(s, "VF%d", vf);
2555 		}
2556 		seq_puts(s, "\n");
2557 
2558 		seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
2559 						    "RX" : "TX");
2560 		seq_printf(s, "\tinterface: %s\n",
2561 			   rvu_dbg_get_intf_name(iter->intf));
2562 		seq_printf(s, "\tmcam entry: %d\n", iter->entry);
2563 
2564 		rvu_dbg_npc_mcam_show_flows(s, iter);
2565 		if (is_npc_intf_rx(iter->intf)) {
2566 			target = iter->rx_action.pf_func;
2567 			pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2568 			seq_printf(s, "\tForward to: PF%d ", pf);
2569 
2570 			if (target & RVU_PFVF_FUNC_MASK) {
2571 				vf = (target & RVU_PFVF_FUNC_MASK) - 1;
2572 				seq_printf(s, "VF%d", vf);
2573 			}
2574 			seq_puts(s, "\n");
2575 			seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
2576 			seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
2577 		}
2578 
2579 		rvu_dbg_npc_mcam_show_action(s, iter);
2580 
2581 		enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
2582 		seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
2583 
2584 		if (!iter->has_cntr)
2585 			continue;
2586 		seq_printf(s, "\tcounter: %d\n", iter->cntr);
2587 
2588 		hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
2589 		seq_printf(s, "\thits: %lld\n", hits);
2590 	}
2591 	mutex_unlock(&mcam->lock);
2592 
2593 	return 0;
2594 }
2595 
2596 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
2597 
2598 static void rvu_dbg_npc_init(struct rvu *rvu)
2599 {
2600 	rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
2601 
2602 	debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
2603 			    &rvu_dbg_npc_mcam_info_fops);
2604 	debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
2605 			    &rvu_dbg_npc_mcam_rules_fops);
2606 	debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
2607 			    &rvu_dbg_npc_rx_miss_act_fops);
2608 }
2609 
2610 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
2611 {
2612 	struct cpt_ctx *ctx = filp->private;
2613 	u64 busy_sts = 0, free_sts = 0;
2614 	u32 e_min = 0, e_max = 0, e, i;
2615 	u16 max_ses, max_ies, max_aes;
2616 	struct rvu *rvu = ctx->rvu;
2617 	int blkaddr = ctx->blkaddr;
2618 	u64 reg;
2619 
2620 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2621 	max_ses = reg & 0xffff;
2622 	max_ies = (reg >> 16) & 0xffff;
2623 	max_aes = (reg >> 32) & 0xffff;
2624 
2625 	switch (eng_type) {
2626 	case CPT_AE_TYPE:
2627 		e_min = max_ses + max_ies;
2628 		e_max = max_ses + max_ies + max_aes;
2629 		break;
2630 	case CPT_SE_TYPE:
2631 		e_min = 0;
2632 		e_max = max_ses;
2633 		break;
2634 	case CPT_IE_TYPE:
2635 		e_min = max_ses;
2636 		e_max = max_ses + max_ies;
2637 		break;
2638 	default:
2639 		return -EINVAL;
2640 	}
2641 
2642 	for (e = e_min, i = 0; e < e_max; e++, i++) {
2643 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
2644 		if (reg & 0x1)
2645 			busy_sts |= 1ULL << i;
2646 
2647 		if (reg & 0x2)
2648 			free_sts |= 1ULL << i;
2649 	}
2650 	seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
2651 	seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
2652 
2653 	return 0;
2654 }
2655 
2656 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
2657 {
2658 	return cpt_eng_sts_display(filp, CPT_AE_TYPE);
2659 }
2660 
2661 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
2662 
2663 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
2664 {
2665 	return cpt_eng_sts_display(filp, CPT_SE_TYPE);
2666 }
2667 
2668 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
2669 
2670 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
2671 {
2672 	return cpt_eng_sts_display(filp, CPT_IE_TYPE);
2673 }
2674 
2675 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
2676 
2677 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
2678 {
2679 	struct cpt_ctx *ctx = filp->private;
2680 	u16 max_ses, max_ies, max_aes;
2681 	struct rvu *rvu = ctx->rvu;
2682 	int blkaddr = ctx->blkaddr;
2683 	u32 e_max, e;
2684 	u64 reg;
2685 
2686 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2687 	max_ses = reg & 0xffff;
2688 	max_ies = (reg >> 16) & 0xffff;
2689 	max_aes = (reg >> 32) & 0xffff;
2690 
2691 	e_max = max_ses + max_ies + max_aes;
2692 
2693 	seq_puts(filp, "===========================================\n");
2694 	for (e = 0; e < e_max; e++) {
2695 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
2696 		seq_printf(filp, "CPT Engine[%u] Group Enable   0x%02llx\n", e,
2697 			   reg & 0xff);
2698 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
2699 		seq_printf(filp, "CPT Engine[%u] Active Info    0x%llx\n", e,
2700 			   reg);
2701 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
2702 		seq_printf(filp, "CPT Engine[%u] Control        0x%llx\n", e,
2703 			   reg);
2704 		seq_puts(filp, "===========================================\n");
2705 	}
2706 	return 0;
2707 }
2708 
2709 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
2710 
2711 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
2712 {
2713 	struct cpt_ctx *ctx = filp->private;
2714 	int blkaddr = ctx->blkaddr;
2715 	struct rvu *rvu = ctx->rvu;
2716 	struct rvu_block *block;
2717 	struct rvu_hwinfo *hw;
2718 	u64 reg;
2719 	u32 lf;
2720 
2721 	hw = rvu->hw;
2722 	block = &hw->block[blkaddr];
2723 	if (!block->lf.bmap)
2724 		return -ENODEV;
2725 
2726 	seq_puts(filp, "===========================================\n");
2727 	for (lf = 0; lf < block->lf.max; lf++) {
2728 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
2729 		seq_printf(filp, "CPT Lf[%u] CTL          0x%llx\n", lf, reg);
2730 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
2731 		seq_printf(filp, "CPT Lf[%u] CTL2         0x%llx\n", lf, reg);
2732 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
2733 		seq_printf(filp, "CPT Lf[%u] PTR_CTL      0x%llx\n", lf, reg);
2734 		reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
2735 				(lf << block->lfshift));
2736 		seq_printf(filp, "CPT Lf[%u] CFG          0x%llx\n", lf, reg);
2737 		seq_puts(filp, "===========================================\n");
2738 	}
2739 	return 0;
2740 }
2741 
2742 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
2743 
2744 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
2745 {
2746 	struct cpt_ctx *ctx = filp->private;
2747 	struct rvu *rvu = ctx->rvu;
2748 	int blkaddr = ctx->blkaddr;
2749 	u64 reg0, reg1;
2750 
2751 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
2752 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
2753 	seq_printf(filp, "CPT_AF_FLTX_INT:       0x%llx 0x%llx\n", reg0, reg1);
2754 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
2755 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
2756 	seq_printf(filp, "CPT_AF_PSNX_EXE:       0x%llx 0x%llx\n", reg0, reg1);
2757 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
2758 	seq_printf(filp, "CPT_AF_PSNX_LF:        0x%llx\n", reg0);
2759 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
2760 	seq_printf(filp, "CPT_AF_RVU_INT:        0x%llx\n", reg0);
2761 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
2762 	seq_printf(filp, "CPT_AF_RAS_INT:        0x%llx\n", reg0);
2763 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
2764 	seq_printf(filp, "CPT_AF_EXE_ERR_INFO:   0x%llx\n", reg0);
2765 
2766 	return 0;
2767 }
2768 
2769 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
2770 
2771 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
2772 {
2773 	struct cpt_ctx *ctx = filp->private;
2774 	struct rvu *rvu = ctx->rvu;
2775 	int blkaddr = ctx->blkaddr;
2776 	u64 reg;
2777 
2778 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
2779 	seq_printf(filp, "CPT instruction requests   %llu\n", reg);
2780 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
2781 	seq_printf(filp, "CPT instruction latency    %llu\n", reg);
2782 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
2783 	seq_printf(filp, "CPT NCB read requests      %llu\n", reg);
2784 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
2785 	seq_printf(filp, "CPT NCB read latency       %llu\n", reg);
2786 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
2787 	seq_printf(filp, "CPT read requests caused by UC fills   %llu\n", reg);
2788 	reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
2789 	seq_printf(filp, "CPT active cycles pc       %llu\n", reg);
2790 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
2791 	seq_printf(filp, "CPT clock count pc         %llu\n", reg);
2792 
2793 	return 0;
2794 }
2795 
2796 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
2797 
2798 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
2799 {
2800 	struct cpt_ctx *ctx;
2801 
2802 	if (!is_block_implemented(rvu->hw, blkaddr))
2803 		return;
2804 
2805 	if (blkaddr == BLKADDR_CPT0) {
2806 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
2807 		ctx = &rvu->rvu_dbg.cpt_ctx[0];
2808 		ctx->blkaddr = BLKADDR_CPT0;
2809 		ctx->rvu = rvu;
2810 	} else {
2811 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
2812 						      rvu->rvu_dbg.root);
2813 		ctx = &rvu->rvu_dbg.cpt_ctx[1];
2814 		ctx->blkaddr = BLKADDR_CPT1;
2815 		ctx->rvu = rvu;
2816 	}
2817 
2818 	debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
2819 			    &rvu_dbg_cpt_pc_fops);
2820 	debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2821 			    &rvu_dbg_cpt_ae_sts_fops);
2822 	debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2823 			    &rvu_dbg_cpt_se_sts_fops);
2824 	debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2825 			    &rvu_dbg_cpt_ie_sts_fops);
2826 	debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
2827 			    &rvu_dbg_cpt_engines_info_fops);
2828 	debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
2829 			    &rvu_dbg_cpt_lfs_info_fops);
2830 	debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
2831 			    &rvu_dbg_cpt_err_info_fops);
2832 }
2833 
2834 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
2835 {
2836 	if (!is_rvu_otx2(rvu))
2837 		return "cn10k";
2838 	else
2839 		return "octeontx2";
2840 }
2841 
2842 void rvu_dbg_init(struct rvu *rvu)
2843 {
2844 	rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
2845 
2846 	debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
2847 			    &rvu_dbg_rsrc_status_fops);
2848 
2849 	if (!is_rvu_otx2(rvu))
2850 		debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
2851 				    rvu, &rvu_dbg_lmtst_map_table_fops);
2852 
2853 	if (!cgx_get_cgxcnt_max())
2854 		goto create;
2855 
2856 	if (is_rvu_otx2(rvu))
2857 		debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
2858 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2859 	else
2860 		debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
2861 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2862 
2863 create:
2864 	rvu_dbg_npa_init(rvu);
2865 	rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
2866 
2867 	rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
2868 	rvu_dbg_cgx_init(rvu);
2869 	rvu_dbg_npc_init(rvu);
2870 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
2871 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
2872 }
2873 
2874 void rvu_dbg_exit(struct rvu *rvu)
2875 {
2876 	debugfs_remove_recursive(rvu->rvu_dbg.root);
2877 }
2878 
2879 #endif /* CONFIG_DEBUG_FS */
2880