1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2019 Marvell.
5  *
6  */
7 
8 #ifdef CONFIG_DEBUG_FS
9 
10 #include <linux/fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 #include "npc.h"
21 
22 #define DEBUGFS_DIR_NAME "octeontx2"
23 
24 enum {
25 	CGX_STAT0,
26 	CGX_STAT1,
27 	CGX_STAT2,
28 	CGX_STAT3,
29 	CGX_STAT4,
30 	CGX_STAT5,
31 	CGX_STAT6,
32 	CGX_STAT7,
33 	CGX_STAT8,
34 	CGX_STAT9,
35 	CGX_STAT10,
36 	CGX_STAT11,
37 	CGX_STAT12,
38 	CGX_STAT13,
39 	CGX_STAT14,
40 	CGX_STAT15,
41 	CGX_STAT16,
42 	CGX_STAT17,
43 	CGX_STAT18,
44 };
45 
46 /* NIX TX stats */
47 enum nix_stat_lf_tx {
48 	TX_UCAST	= 0x0,
49 	TX_BCAST	= 0x1,
50 	TX_MCAST	= 0x2,
51 	TX_DROP		= 0x3,
52 	TX_OCTS		= 0x4,
53 	TX_STATS_ENUM_LAST,
54 };
55 
56 /* NIX RX stats */
57 enum nix_stat_lf_rx {
58 	RX_OCTS		= 0x0,
59 	RX_UCAST	= 0x1,
60 	RX_BCAST	= 0x2,
61 	RX_MCAST	= 0x3,
62 	RX_DROP		= 0x4,
63 	RX_DROP_OCTS	= 0x5,
64 	RX_FCS		= 0x6,
65 	RX_ERR		= 0x7,
66 	RX_DRP_BCAST	= 0x8,
67 	RX_DRP_MCAST	= 0x9,
68 	RX_DRP_L3BCAST	= 0xa,
69 	RX_DRP_L3MCAST	= 0xb,
70 	RX_STATS_ENUM_LAST,
71 };
72 
73 static char *cgx_rx_stats_fields[] = {
74 	[CGX_STAT0]	= "Received packets",
75 	[CGX_STAT1]	= "Octets of received packets",
76 	[CGX_STAT2]	= "Received PAUSE packets",
77 	[CGX_STAT3]	= "Received PAUSE and control packets",
78 	[CGX_STAT4]	= "Filtered DMAC0 (NIX-bound) packets",
79 	[CGX_STAT5]	= "Filtered DMAC0 (NIX-bound) octets",
80 	[CGX_STAT6]	= "Packets dropped due to RX FIFO full",
81 	[CGX_STAT7]	= "Octets dropped due to RX FIFO full",
82 	[CGX_STAT8]	= "Error packets",
83 	[CGX_STAT9]	= "Filtered DMAC1 (NCSI-bound) packets",
84 	[CGX_STAT10]	= "Filtered DMAC1 (NCSI-bound) octets",
85 	[CGX_STAT11]	= "NCSI-bound packets dropped",
86 	[CGX_STAT12]	= "NCSI-bound octets dropped",
87 };
88 
89 static char *cgx_tx_stats_fields[] = {
90 	[CGX_STAT0]	= "Packets dropped due to excessive collisions",
91 	[CGX_STAT1]	= "Packets dropped due to excessive deferral",
92 	[CGX_STAT2]	= "Multiple collisions before successful transmission",
93 	[CGX_STAT3]	= "Single collisions before successful transmission",
94 	[CGX_STAT4]	= "Total octets sent on the interface",
95 	[CGX_STAT5]	= "Total frames sent on the interface",
96 	[CGX_STAT6]	= "Packets sent with an octet count < 64",
97 	[CGX_STAT7]	= "Packets sent with an octet count == 64",
98 	[CGX_STAT8]	= "Packets sent with an octet count of 65-127",
99 	[CGX_STAT9]	= "Packets sent with an octet count of 128-255",
100 	[CGX_STAT10]	= "Packets sent with an octet count of 256-511",
101 	[CGX_STAT11]	= "Packets sent with an octet count of 512-1023",
102 	[CGX_STAT12]	= "Packets sent with an octet count of 1024-1518",
103 	[CGX_STAT13]	= "Packets sent with an octet count of > 1518",
104 	[CGX_STAT14]	= "Packets sent to a broadcast DMAC",
105 	[CGX_STAT15]	= "Packets sent to the multicast DMAC",
106 	[CGX_STAT16]	= "Transmit underflow and were truncated",
107 	[CGX_STAT17]	= "Control/PAUSE packets sent",
108 };
109 
110 static char *rpm_rx_stats_fields[] = {
111 	"Octets of received packets",
112 	"Octets of received packets with out error",
113 	"Received packets with alignment errors",
114 	"Control/PAUSE packets received",
115 	"Packets received with Frame too long Errors",
116 	"Packets received with a1nrange length Errors",
117 	"Received packets",
118 	"Packets received with FrameCheckSequenceErrors",
119 	"Packets received with VLAN header",
120 	"Error packets",
121 	"Packets received with unicast DMAC",
122 	"Packets received with multicast DMAC",
123 	"Packets received with broadcast DMAC",
124 	"Dropped packets",
125 	"Total frames received on interface",
126 	"Packets received with an octet count < 64",
127 	"Packets received with an octet count == 64",
128 	"Packets received with an octet count of 65-127",
129 	"Packets received with an octet count of 128-255",
130 	"Packets received with an octet count of 256-511",
131 	"Packets received with an octet count of 512-1023",
132 	"Packets received with an octet count of 1024-1518",
133 	"Packets received with an octet count of > 1518",
134 	"Oversized Packets",
135 	"Jabber Packets",
136 	"Fragmented Packets",
137 	"CBFC(class based flow control) pause frames received for class 0",
138 	"CBFC pause frames received for class 1",
139 	"CBFC pause frames received for class 2",
140 	"CBFC pause frames received for class 3",
141 	"CBFC pause frames received for class 4",
142 	"CBFC pause frames received for class 5",
143 	"CBFC pause frames received for class 6",
144 	"CBFC pause frames received for class 7",
145 	"CBFC pause frames received for class 8",
146 	"CBFC pause frames received for class 9",
147 	"CBFC pause frames received for class 10",
148 	"CBFC pause frames received for class 11",
149 	"CBFC pause frames received for class 12",
150 	"CBFC pause frames received for class 13",
151 	"CBFC pause frames received for class 14",
152 	"CBFC pause frames received for class 15",
153 	"MAC control packets received",
154 };
155 
156 static char *rpm_tx_stats_fields[] = {
157 	"Total octets sent on the interface",
158 	"Total octets transmitted OK",
159 	"Control/Pause frames sent",
160 	"Total frames transmitted OK",
161 	"Total frames sent with VLAN header",
162 	"Error Packets",
163 	"Packets sent to unicast DMAC",
164 	"Packets sent to the multicast DMAC",
165 	"Packets sent to a broadcast DMAC",
166 	"Packets sent with an octet count == 64",
167 	"Packets sent with an octet count of 65-127",
168 	"Packets sent with an octet count of 128-255",
169 	"Packets sent with an octet count of 256-511",
170 	"Packets sent with an octet count of 512-1023",
171 	"Packets sent with an octet count of 1024-1518",
172 	"Packets sent with an octet count of > 1518",
173 	"CBFC(class based flow control) pause frames transmitted for class 0",
174 	"CBFC pause frames transmitted for class 1",
175 	"CBFC pause frames transmitted for class 2",
176 	"CBFC pause frames transmitted for class 3",
177 	"CBFC pause frames transmitted for class 4",
178 	"CBFC pause frames transmitted for class 5",
179 	"CBFC pause frames transmitted for class 6",
180 	"CBFC pause frames transmitted for class 7",
181 	"CBFC pause frames transmitted for class 8",
182 	"CBFC pause frames transmitted for class 9",
183 	"CBFC pause frames transmitted for class 10",
184 	"CBFC pause frames transmitted for class 11",
185 	"CBFC pause frames transmitted for class 12",
186 	"CBFC pause frames transmitted for class 13",
187 	"CBFC pause frames transmitted for class 14",
188 	"CBFC pause frames transmitted for class 15",
189 	"MAC control packets sent",
190 	"Total frames sent on the interface"
191 };
192 
193 enum cpt_eng_type {
194 	CPT_AE_TYPE = 1,
195 	CPT_SE_TYPE = 2,
196 	CPT_IE_TYPE = 3,
197 };
198 
199 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
200 						blk_addr, NDC_AF_CONST) & 0xFF)
201 
202 #define rvu_dbg_NULL NULL
203 #define rvu_dbg_open_NULL NULL
204 
205 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)	\
206 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
207 { \
208 	return single_open(file, rvu_dbg_##read_op, inode->i_private); \
209 } \
210 static const struct file_operations rvu_dbg_##name##_fops = { \
211 	.owner		= THIS_MODULE, \
212 	.open		= rvu_dbg_open_##name, \
213 	.read		= seq_read, \
214 	.write		= rvu_dbg_##write_op, \
215 	.llseek		= seq_lseek, \
216 	.release	= single_release, \
217 }
218 
219 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
220 static const struct file_operations rvu_dbg_##name##_fops = { \
221 	.owner = THIS_MODULE, \
222 	.open = simple_open, \
223 	.read = rvu_dbg_##read_op, \
224 	.write = rvu_dbg_##write_op \
225 }
226 
227 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
228 
229 #define LMT_MAPTBL_ENTRY_SIZE 16
230 /* Dump LMTST map table */
231 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
232 					       char __user *buffer,
233 					       size_t count, loff_t *ppos)
234 {
235 	struct rvu *rvu = filp->private_data;
236 	u64 lmt_addr, val, tbl_base;
237 	int pf, vf, num_vfs, hw_vfs;
238 	void __iomem *lmt_map_base;
239 	int buf_size = 10240;
240 	size_t off = 0;
241 	int index = 0;
242 	char *buf;
243 	int ret;
244 
245 	/* don't allow partial reads */
246 	if (*ppos != 0)
247 		return 0;
248 
249 	buf = kzalloc(buf_size, GFP_KERNEL);
250 	if (!buf)
251 		return -ENOSPC;
252 
253 	tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
254 
255 	lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
256 	if (!lmt_map_base) {
257 		dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
258 		kfree(buf);
259 		return false;
260 	}
261 
262 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
263 			  "\n\t\t\t\t\tLmtst Map Table Entries");
264 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
265 			  "\n\t\t\t\t\t=======================");
266 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
267 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
268 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
269 			  "Lmtline Base (word 0)\t\t");
270 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
271 			  "Lmt Map Entry (word 1)");
272 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
273 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
274 		off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d  \t\t\t",
275 				    pf);
276 
277 		index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
278 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
279 				 (tbl_base + index));
280 		lmt_addr = readq(lmt_map_base + index);
281 		off += scnprintf(&buf[off], buf_size - 1 - off,
282 				 " 0x%016llx\t\t", lmt_addr);
283 		index += 8;
284 		val = readq(lmt_map_base + index);
285 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
286 				 val);
287 		/* Reading num of VFs per PF */
288 		rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
289 		for (vf = 0; vf < num_vfs; vf++) {
290 			index = (pf * rvu->hw->total_vfs * 16) +
291 				((vf + 1)  * LMT_MAPTBL_ENTRY_SIZE);
292 			off += scnprintf(&buf[off], buf_size - 1 - off,
293 					    "PF%d:VF%d  \t\t", pf, vf);
294 			off += scnprintf(&buf[off], buf_size - 1 - off,
295 					 " 0x%llx\t\t", (tbl_base + index));
296 			lmt_addr = readq(lmt_map_base + index);
297 			off += scnprintf(&buf[off], buf_size - 1 - off,
298 					 " 0x%016llx\t\t", lmt_addr);
299 			index += 8;
300 			val = readq(lmt_map_base + index);
301 			off += scnprintf(&buf[off], buf_size - 1 - off,
302 					 " 0x%016llx\n", val);
303 		}
304 	}
305 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\n");
306 
307 	ret = min(off, count);
308 	if (copy_to_user(buffer, buf, ret))
309 		ret = -EFAULT;
310 	kfree(buf);
311 
312 	iounmap(lmt_map_base);
313 	if (ret < 0)
314 		return ret;
315 
316 	*ppos = ret;
317 	return ret;
318 }
319 
320 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
321 
322 static void get_lf_str_list(struct rvu_block block, int pcifunc,
323 			    char *lfs)
324 {
325 	int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
326 
327 	for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
328 		if (lf >= block.lf.max)
329 			break;
330 
331 		if (block.fn_map[lf] != pcifunc)
332 			continue;
333 
334 		if (lf == prev_lf + 1) {
335 			prev_lf = lf;
336 			seq = 1;
337 			continue;
338 		}
339 
340 		if (seq)
341 			len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
342 		else
343 			len += (len ? sprintf(lfs + len, ",%d", lf) :
344 				      sprintf(lfs + len, "%d", lf));
345 
346 		prev_lf = lf;
347 		seq = 0;
348 	}
349 
350 	if (seq)
351 		len += sprintf(lfs + len, "-%d", prev_lf);
352 
353 	lfs[len] = '\0';
354 }
355 
356 static int get_max_column_width(struct rvu *rvu)
357 {
358 	int index, pf, vf, lf_str_size = 12, buf_size = 256;
359 	struct rvu_block block;
360 	u16 pcifunc;
361 	char *buf;
362 
363 	buf = kzalloc(buf_size, GFP_KERNEL);
364 	if (!buf)
365 		return -ENOMEM;
366 
367 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
368 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
369 			pcifunc = pf << 10 | vf;
370 			if (!pcifunc)
371 				continue;
372 
373 			for (index = 0; index < BLK_COUNT; index++) {
374 				block = rvu->hw->block[index];
375 				if (!strlen(block.name))
376 					continue;
377 
378 				get_lf_str_list(block, pcifunc, buf);
379 				if (lf_str_size <= strlen(buf))
380 					lf_str_size = strlen(buf) + 1;
381 			}
382 		}
383 	}
384 
385 	kfree(buf);
386 	return lf_str_size;
387 }
388 
389 /* Dumps current provisioning status of all RVU block LFs */
390 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
391 					  char __user *buffer,
392 					  size_t count, loff_t *ppos)
393 {
394 	int index, off = 0, flag = 0, len = 0, i = 0;
395 	struct rvu *rvu = filp->private_data;
396 	int bytes_not_copied = 0;
397 	struct rvu_block block;
398 	int pf, vf, pcifunc;
399 	int buf_size = 2048;
400 	int lf_str_size;
401 	char *lfs;
402 	char *buf;
403 
404 	/* don't allow partial reads */
405 	if (*ppos != 0)
406 		return 0;
407 
408 	buf = kzalloc(buf_size, GFP_KERNEL);
409 	if (!buf)
410 		return -ENOSPC;
411 
412 	/* Get the maximum width of a column */
413 	lf_str_size = get_max_column_width(rvu);
414 
415 	lfs = kzalloc(lf_str_size, GFP_KERNEL);
416 	if (!lfs) {
417 		kfree(buf);
418 		return -ENOMEM;
419 	}
420 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
421 			  "pcifunc");
422 	for (index = 0; index < BLK_COUNT; index++)
423 		if (strlen(rvu->hw->block[index].name)) {
424 			off += scnprintf(&buf[off], buf_size - 1 - off,
425 					 "%-*s", lf_str_size,
426 					 rvu->hw->block[index].name);
427 		}
428 
429 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
430 	bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
431 	if (bytes_not_copied)
432 		goto out;
433 
434 	i++;
435 	*ppos += off;
436 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
437 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
438 			off = 0;
439 			flag = 0;
440 			pcifunc = pf << 10 | vf;
441 			if (!pcifunc)
442 				continue;
443 
444 			if (vf) {
445 				sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
446 				off = scnprintf(&buf[off],
447 						buf_size - 1 - off,
448 						"%-*s", lf_str_size, lfs);
449 			} else {
450 				sprintf(lfs, "PF%d", pf);
451 				off = scnprintf(&buf[off],
452 						buf_size - 1 - off,
453 						"%-*s", lf_str_size, lfs);
454 			}
455 
456 			for (index = 0; index < BLK_COUNT; index++) {
457 				block = rvu->hw->block[index];
458 				if (!strlen(block.name))
459 					continue;
460 				len = 0;
461 				lfs[len] = '\0';
462 				get_lf_str_list(block, pcifunc, lfs);
463 				if (strlen(lfs))
464 					flag = 1;
465 
466 				off += scnprintf(&buf[off], buf_size - 1 - off,
467 						 "%-*s", lf_str_size, lfs);
468 			}
469 			if (flag) {
470 				off +=	scnprintf(&buf[off],
471 						  buf_size - 1 - off, "\n");
472 				bytes_not_copied = copy_to_user(buffer +
473 								(i * off),
474 								buf, off);
475 				if (bytes_not_copied)
476 					goto out;
477 
478 				i++;
479 				*ppos += off;
480 			}
481 		}
482 	}
483 
484 out:
485 	kfree(lfs);
486 	kfree(buf);
487 	if (bytes_not_copied)
488 		return -EFAULT;
489 
490 	return *ppos;
491 }
492 
493 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
494 
495 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
496 {
497 	struct rvu *rvu = filp->private;
498 	struct pci_dev *pdev = NULL;
499 	struct mac_ops *mac_ops;
500 	char cgx[10], lmac[10];
501 	struct rvu_pfvf *pfvf;
502 	int pf, domain, blkid;
503 	u8 cgx_id, lmac_id;
504 	u16 pcifunc;
505 
506 	domain = 2;
507 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
508 	/* There can be no CGX devices at all */
509 	if (!mac_ops)
510 		return 0;
511 	seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
512 		   mac_ops->name);
513 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
514 		if (!is_pf_cgxmapped(rvu, pf))
515 			continue;
516 
517 		pdev =  pci_get_domain_bus_and_slot(domain, pf + 1, 0);
518 		if (!pdev)
519 			continue;
520 
521 		cgx[0] = 0;
522 		lmac[0] = 0;
523 		pcifunc = pf << 10;
524 		pfvf = rvu_get_pfvf(rvu, pcifunc);
525 
526 		if (pfvf->nix_blkaddr == BLKADDR_NIX0)
527 			blkid = 0;
528 		else
529 			blkid = 1;
530 
531 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
532 				    &lmac_id);
533 		sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
534 		sprintf(lmac, "LMAC%d", lmac_id);
535 		seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
536 			   dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
537 	}
538 	return 0;
539 }
540 
541 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
542 
543 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
544 				u16 *pcifunc)
545 {
546 	struct rvu_block *block;
547 	struct rvu_hwinfo *hw;
548 
549 	hw = rvu->hw;
550 	block = &hw->block[blkaddr];
551 
552 	if (lf < 0 || lf >= block->lf.max) {
553 		dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
554 			 block->lf.max - 1);
555 		return false;
556 	}
557 
558 	*pcifunc = block->fn_map[lf];
559 	if (!*pcifunc) {
560 		dev_warn(rvu->dev,
561 			 "This LF is not attached to any RVU PFFUNC\n");
562 		return false;
563 	}
564 	return true;
565 }
566 
567 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
568 {
569 	char *buf;
570 
571 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
572 	if (!buf)
573 		return;
574 
575 	if (!pfvf->aura_ctx) {
576 		seq_puts(m, "Aura context is not initialized\n");
577 	} else {
578 		bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
579 					pfvf->aura_ctx->qsize);
580 		seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
581 		seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
582 	}
583 
584 	if (!pfvf->pool_ctx) {
585 		seq_puts(m, "Pool context is not initialized\n");
586 	} else {
587 		bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
588 					pfvf->pool_ctx->qsize);
589 		seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
590 		seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
591 	}
592 	kfree(buf);
593 }
594 
595 /* The 'qsize' entry dumps current Aura/Pool context Qsize
596  * and each context's current enable/disable status in a bitmap.
597  */
598 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
599 				 int blktype)
600 {
601 	void (*print_qsize)(struct seq_file *filp,
602 			    struct rvu_pfvf *pfvf) = NULL;
603 	struct dentry *current_dir;
604 	struct rvu_pfvf *pfvf;
605 	struct rvu *rvu;
606 	int qsize_id;
607 	u16 pcifunc;
608 	int blkaddr;
609 
610 	rvu = filp->private;
611 	switch (blktype) {
612 	case BLKTYPE_NPA:
613 		qsize_id = rvu->rvu_dbg.npa_qsize_id;
614 		print_qsize = print_npa_qsize;
615 		break;
616 
617 	case BLKTYPE_NIX:
618 		qsize_id = rvu->rvu_dbg.nix_qsize_id;
619 		print_qsize = print_nix_qsize;
620 		break;
621 
622 	default:
623 		return -EINVAL;
624 	}
625 
626 	if (blktype == BLKTYPE_NPA) {
627 		blkaddr = BLKADDR_NPA;
628 	} else {
629 		current_dir = filp->file->f_path.dentry->d_parent;
630 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
631 				   BLKADDR_NIX1 : BLKADDR_NIX0);
632 	}
633 
634 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
635 		return -EINVAL;
636 
637 	pfvf = rvu_get_pfvf(rvu, pcifunc);
638 	print_qsize(filp, pfvf);
639 
640 	return 0;
641 }
642 
643 static ssize_t rvu_dbg_qsize_write(struct file *filp,
644 				   const char __user *buffer, size_t count,
645 				   loff_t *ppos, int blktype)
646 {
647 	char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
648 	struct seq_file *seqfile = filp->private_data;
649 	char *cmd_buf, *cmd_buf_tmp, *subtoken;
650 	struct rvu *rvu = seqfile->private;
651 	struct dentry *current_dir;
652 	int blkaddr;
653 	u16 pcifunc;
654 	int ret, lf;
655 
656 	cmd_buf = memdup_user(buffer, count + 1);
657 	if (IS_ERR(cmd_buf))
658 		return -ENOMEM;
659 
660 	cmd_buf[count] = '\0';
661 
662 	cmd_buf_tmp = strchr(cmd_buf, '\n');
663 	if (cmd_buf_tmp) {
664 		*cmd_buf_tmp = '\0';
665 		count = cmd_buf_tmp - cmd_buf + 1;
666 	}
667 
668 	cmd_buf_tmp = cmd_buf;
669 	subtoken = strsep(&cmd_buf, " ");
670 	ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
671 	if (cmd_buf)
672 		ret = -EINVAL;
673 
674 	if (ret < 0 || !strncmp(subtoken, "help", 4)) {
675 		dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
676 		goto qsize_write_done;
677 	}
678 
679 	if (blktype == BLKTYPE_NPA) {
680 		blkaddr = BLKADDR_NPA;
681 	} else {
682 		current_dir = filp->f_path.dentry->d_parent;
683 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
684 				   BLKADDR_NIX1 : BLKADDR_NIX0);
685 	}
686 
687 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
688 		ret = -EINVAL;
689 		goto qsize_write_done;
690 	}
691 	if (blktype  == BLKTYPE_NPA)
692 		rvu->rvu_dbg.npa_qsize_id = lf;
693 	else
694 		rvu->rvu_dbg.nix_qsize_id = lf;
695 
696 qsize_write_done:
697 	kfree(cmd_buf_tmp);
698 	return ret ? ret : count;
699 }
700 
701 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
702 				       const char __user *buffer,
703 				       size_t count, loff_t *ppos)
704 {
705 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
706 					    BLKTYPE_NPA);
707 }
708 
709 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
710 {
711 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
712 }
713 
714 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
715 
716 /* Dumps given NPA Aura's context */
717 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
718 {
719 	struct npa_aura_s *aura = &rsp->aura;
720 	struct rvu *rvu = m->private;
721 
722 	seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
723 
724 	seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
725 		   aura->ena, aura->pool_caching);
726 	seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
727 		   aura->pool_way_mask, aura->avg_con);
728 	seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
729 		   aura->pool_drop_ena, aura->aura_drop_ena);
730 	seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
731 		   aura->bp_ena, aura->aura_drop);
732 	seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
733 		   aura->shift, aura->avg_level);
734 
735 	seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
736 		   (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
737 
738 	seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
739 		   (u64)aura->limit, aura->bp, aura->fc_ena);
740 
741 	if (!is_rvu_otx2(rvu))
742 		seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
743 	seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
744 		   aura->fc_up_crossing, aura->fc_stype);
745 	seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
746 
747 	seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
748 
749 	seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
750 		   aura->pool_drop, aura->update_time);
751 	seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
752 		   aura->err_int, aura->err_int_ena);
753 	seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
754 		   aura->thresh_int, aura->thresh_int_ena);
755 	seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
756 		   aura->thresh_up, aura->thresh_qint_idx);
757 	seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
758 
759 	seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
760 	if (!is_rvu_otx2(rvu))
761 		seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
762 }
763 
764 /* Dumps given NPA Pool's context */
765 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
766 {
767 	struct npa_pool_s *pool = &rsp->pool;
768 	struct rvu *rvu = m->private;
769 
770 	seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
771 
772 	seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
773 		   pool->ena, pool->nat_align);
774 	seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
775 		   pool->stack_caching, pool->stack_way_mask);
776 	seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
777 		   pool->buf_offset, pool->buf_size);
778 
779 	seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
780 		   pool->stack_max_pages, pool->stack_pages);
781 
782 	seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
783 
784 	seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
785 		   pool->stack_offset, pool->shift, pool->avg_level);
786 	seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
787 		   pool->avg_con, pool->fc_ena, pool->fc_stype);
788 	seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
789 		   pool->fc_hyst_bits, pool->fc_up_crossing);
790 	if (!is_rvu_otx2(rvu))
791 		seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
792 	seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
793 
794 	seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
795 
796 	seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
797 
798 	seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
799 
800 	seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
801 		   pool->err_int, pool->err_int_ena);
802 	seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
803 	seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
804 		   pool->thresh_int_ena, pool->thresh_up);
805 	seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
806 		   pool->thresh_qint_idx, pool->err_qint_idx);
807 	if (!is_rvu_otx2(rvu))
808 		seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
809 }
810 
811 /* Reads aura/pool's ctx from admin queue */
812 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
813 {
814 	void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
815 	struct npa_aq_enq_req aq_req;
816 	struct npa_aq_enq_rsp rsp;
817 	struct rvu_pfvf *pfvf;
818 	int aura, rc, max_id;
819 	int npalf, id, all;
820 	struct rvu *rvu;
821 	u16 pcifunc;
822 
823 	rvu = m->private;
824 
825 	switch (ctype) {
826 	case NPA_AQ_CTYPE_AURA:
827 		npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
828 		id = rvu->rvu_dbg.npa_aura_ctx.id;
829 		all = rvu->rvu_dbg.npa_aura_ctx.all;
830 		break;
831 
832 	case NPA_AQ_CTYPE_POOL:
833 		npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
834 		id = rvu->rvu_dbg.npa_pool_ctx.id;
835 		all = rvu->rvu_dbg.npa_pool_ctx.all;
836 		break;
837 	default:
838 		return -EINVAL;
839 	}
840 
841 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
842 		return -EINVAL;
843 
844 	pfvf = rvu_get_pfvf(rvu, pcifunc);
845 	if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
846 		seq_puts(m, "Aura context is not initialized\n");
847 		return -EINVAL;
848 	} else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
849 		seq_puts(m, "Pool context is not initialized\n");
850 		return -EINVAL;
851 	}
852 
853 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
854 	aq_req.hdr.pcifunc = pcifunc;
855 	aq_req.ctype = ctype;
856 	aq_req.op = NPA_AQ_INSTOP_READ;
857 	if (ctype == NPA_AQ_CTYPE_AURA) {
858 		max_id = pfvf->aura_ctx->qsize;
859 		print_npa_ctx = print_npa_aura_ctx;
860 	} else {
861 		max_id = pfvf->pool_ctx->qsize;
862 		print_npa_ctx = print_npa_pool_ctx;
863 	}
864 
865 	if (id < 0 || id >= max_id) {
866 		seq_printf(m, "Invalid %s, valid range is 0-%d\n",
867 			   (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
868 			max_id - 1);
869 		return -EINVAL;
870 	}
871 
872 	if (all)
873 		id = 0;
874 	else
875 		max_id = id + 1;
876 
877 	for (aura = id; aura < max_id; aura++) {
878 		aq_req.aura_id = aura;
879 		seq_printf(m, "======%s : %d=======\n",
880 			   (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
881 			aq_req.aura_id);
882 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
883 		if (rc) {
884 			seq_puts(m, "Failed to read context\n");
885 			return -EINVAL;
886 		}
887 		print_npa_ctx(m, &rsp);
888 	}
889 	return 0;
890 }
891 
892 static int write_npa_ctx(struct rvu *rvu, bool all,
893 			 int npalf, int id, int ctype)
894 {
895 	struct rvu_pfvf *pfvf;
896 	int max_id = 0;
897 	u16 pcifunc;
898 
899 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
900 		return -EINVAL;
901 
902 	pfvf = rvu_get_pfvf(rvu, pcifunc);
903 
904 	if (ctype == NPA_AQ_CTYPE_AURA) {
905 		if (!pfvf->aura_ctx) {
906 			dev_warn(rvu->dev, "Aura context is not initialized\n");
907 			return -EINVAL;
908 		}
909 		max_id = pfvf->aura_ctx->qsize;
910 	} else if (ctype == NPA_AQ_CTYPE_POOL) {
911 		if (!pfvf->pool_ctx) {
912 			dev_warn(rvu->dev, "Pool context is not initialized\n");
913 			return -EINVAL;
914 		}
915 		max_id = pfvf->pool_ctx->qsize;
916 	}
917 
918 	if (id < 0 || id >= max_id) {
919 		dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
920 			 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
921 			max_id - 1);
922 		return -EINVAL;
923 	}
924 
925 	switch (ctype) {
926 	case NPA_AQ_CTYPE_AURA:
927 		rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
928 		rvu->rvu_dbg.npa_aura_ctx.id = id;
929 		rvu->rvu_dbg.npa_aura_ctx.all = all;
930 		break;
931 
932 	case NPA_AQ_CTYPE_POOL:
933 		rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
934 		rvu->rvu_dbg.npa_pool_ctx.id = id;
935 		rvu->rvu_dbg.npa_pool_ctx.all = all;
936 		break;
937 	default:
938 		return -EINVAL;
939 	}
940 	return 0;
941 }
942 
943 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
944 				const char __user *buffer, int *npalf,
945 				int *id, bool *all)
946 {
947 	int bytes_not_copied;
948 	char *cmd_buf_tmp;
949 	char *subtoken;
950 	int ret;
951 
952 	bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
953 	if (bytes_not_copied)
954 		return -EFAULT;
955 
956 	cmd_buf[*count] = '\0';
957 	cmd_buf_tmp = strchr(cmd_buf, '\n');
958 
959 	if (cmd_buf_tmp) {
960 		*cmd_buf_tmp = '\0';
961 		*count = cmd_buf_tmp - cmd_buf + 1;
962 	}
963 
964 	subtoken = strsep(&cmd_buf, " ");
965 	ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
966 	if (ret < 0)
967 		return ret;
968 	subtoken = strsep(&cmd_buf, " ");
969 	if (subtoken && strcmp(subtoken, "all") == 0) {
970 		*all = true;
971 	} else {
972 		ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
973 		if (ret < 0)
974 			return ret;
975 	}
976 	if (cmd_buf)
977 		return -EINVAL;
978 	return ret;
979 }
980 
981 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
982 				     const char __user *buffer,
983 				     size_t count, loff_t *ppos, int ctype)
984 {
985 	char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
986 					"aura" : "pool";
987 	struct seq_file *seqfp = filp->private_data;
988 	struct rvu *rvu = seqfp->private;
989 	int npalf, id = 0, ret;
990 	bool all = false;
991 
992 	if ((*ppos != 0) || !count)
993 		return -EINVAL;
994 
995 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
996 	if (!cmd_buf)
997 		return count;
998 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
999 				   &npalf, &id, &all);
1000 	if (ret < 0) {
1001 		dev_info(rvu->dev,
1002 			 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1003 			 ctype_string, ctype_string);
1004 		goto done;
1005 	} else {
1006 		ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1007 	}
1008 done:
1009 	kfree(cmd_buf);
1010 	return ret ? ret : count;
1011 }
1012 
1013 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1014 					  const char __user *buffer,
1015 					  size_t count, loff_t *ppos)
1016 {
1017 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1018 				     NPA_AQ_CTYPE_AURA);
1019 }
1020 
1021 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1022 {
1023 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1024 }
1025 
1026 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1027 
1028 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1029 					  const char __user *buffer,
1030 					  size_t count, loff_t *ppos)
1031 {
1032 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1033 				     NPA_AQ_CTYPE_POOL);
1034 }
1035 
1036 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1037 {
1038 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1039 }
1040 
1041 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1042 
1043 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1044 			    int ctype, int transaction)
1045 {
1046 	u64 req, out_req, lat, cant_alloc;
1047 	struct nix_hw *nix_hw;
1048 	struct rvu *rvu;
1049 	int port;
1050 
1051 	if (blk_addr == BLKADDR_NDC_NPA0) {
1052 		rvu = s->private;
1053 	} else {
1054 		nix_hw = s->private;
1055 		rvu = nix_hw->rvu;
1056 	}
1057 
1058 	for (port = 0; port < NDC_MAX_PORT; port++) {
1059 		req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1060 						(port, ctype, transaction));
1061 		lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1062 						(port, ctype, transaction));
1063 		out_req = rvu_read64(rvu, blk_addr,
1064 				     NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1065 				     (port, ctype, transaction));
1066 		cant_alloc = rvu_read64(rvu, blk_addr,
1067 					NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1068 					(port, transaction));
1069 		seq_printf(s, "\nPort:%d\n", port);
1070 		seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1071 		seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1072 		seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1073 		seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1074 		seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1075 	}
1076 }
1077 
1078 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1079 {
1080 	seq_puts(s, "\n***** CACHE mode read stats *****\n");
1081 	ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1082 	seq_puts(s, "\n***** CACHE mode write stats *****\n");
1083 	ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1084 	seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1085 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1086 	seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1087 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1088 	return 0;
1089 }
1090 
1091 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1092 {
1093 	return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1094 }
1095 
1096 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1097 
1098 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1099 {
1100 	struct nix_hw *nix_hw;
1101 	struct rvu *rvu;
1102 	int bank, max_bank;
1103 
1104 	if (blk_addr == BLKADDR_NDC_NPA0) {
1105 		rvu = s->private;
1106 	} else {
1107 		nix_hw = s->private;
1108 		rvu = nix_hw->rvu;
1109 	}
1110 
1111 	max_bank = NDC_MAX_BANK(rvu, blk_addr);
1112 	for (bank = 0; bank < max_bank; bank++) {
1113 		seq_printf(s, "BANK:%d\n", bank);
1114 		seq_printf(s, "\tHits:\t%lld\n",
1115 			   (u64)rvu_read64(rvu, blk_addr,
1116 			   NDC_AF_BANKX_HIT_PC(bank)));
1117 		seq_printf(s, "\tMiss:\t%lld\n",
1118 			   (u64)rvu_read64(rvu, blk_addr,
1119 			    NDC_AF_BANKX_MISS_PC(bank)));
1120 	}
1121 	return 0;
1122 }
1123 
1124 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1125 {
1126 	struct nix_hw *nix_hw = filp->private;
1127 	int blkaddr = 0;
1128 	int ndc_idx = 0;
1129 
1130 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1131 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1132 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1133 
1134 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1135 }
1136 
1137 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1138 
1139 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1140 {
1141 	struct nix_hw *nix_hw = filp->private;
1142 	int blkaddr = 0;
1143 	int ndc_idx = 0;
1144 
1145 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1146 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1147 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1148 
1149 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1150 }
1151 
1152 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1153 
1154 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1155 					     void *unused)
1156 {
1157 	return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1158 }
1159 
1160 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1161 
1162 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1163 						void *unused)
1164 {
1165 	struct nix_hw *nix_hw = filp->private;
1166 	int ndc_idx = NPA0_U;
1167 	int blkaddr = 0;
1168 
1169 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1170 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1171 
1172 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1173 }
1174 
1175 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1176 
1177 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1178 						void *unused)
1179 {
1180 	struct nix_hw *nix_hw = filp->private;
1181 	int ndc_idx = NPA0_U;
1182 	int blkaddr = 0;
1183 
1184 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1185 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1186 
1187 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1188 }
1189 
1190 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1191 
1192 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1193 				   struct nix_cn10k_sq_ctx_s *sq_ctx)
1194 {
1195 	seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1196 		   sq_ctx->ena, sq_ctx->qint_idx);
1197 	seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1198 		   sq_ctx->substream, sq_ctx->sdp_mcast);
1199 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1200 		   sq_ctx->cq, sq_ctx->sqe_way_mask);
1201 
1202 	seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1203 		   sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1204 	seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1205 		   sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1206 	seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1207 		   sq_ctx->default_chan, sq_ctx->sqb_count);
1208 
1209 	seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1210 	seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1211 	seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1212 		   sq_ctx->sqb_aura, sq_ctx->sq_int);
1213 	seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1214 		   sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1215 
1216 	seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1217 		   sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1218 	seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1219 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1220 	seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1221 		   sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1222 	seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1223 		   sq_ctx->tail_offset, sq_ctx->smenq_offset);
1224 	seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1225 		   sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1226 
1227 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1228 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1229 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1230 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1231 		   sq_ctx->smenq_next_sqb);
1232 
1233 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1234 
1235 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1236 	seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1237 		   sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1238 	seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1239 		   sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1240 	seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1241 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1242 
1243 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1244 		   (u64)sq_ctx->scm_lso_rem);
1245 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1246 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1247 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1248 		   (u64)sq_ctx->dropped_octs);
1249 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1250 		   (u64)sq_ctx->dropped_pkts);
1251 }
1252 
1253 /* Dumps given nix_sq's context */
1254 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1255 {
1256 	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1257 	struct nix_hw *nix_hw = m->private;
1258 	struct rvu *rvu = nix_hw->rvu;
1259 
1260 	if (!is_rvu_otx2(rvu)) {
1261 		print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1262 		return;
1263 	}
1264 	seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1265 		   sq_ctx->sqe_way_mask, sq_ctx->cq);
1266 	seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1267 		   sq_ctx->sdp_mcast, sq_ctx->substream);
1268 	seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1269 		   sq_ctx->qint_idx, sq_ctx->ena);
1270 
1271 	seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1272 		   sq_ctx->sqb_count, sq_ctx->default_chan);
1273 	seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1274 		   sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1275 	seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1276 		   sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1277 
1278 	seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1279 		   sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1280 	seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1281 		   sq_ctx->sq_int, sq_ctx->sqb_aura);
1282 	seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1283 
1284 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1285 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1286 	seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1287 		   sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1288 	seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1289 		   sq_ctx->smenq_offset, sq_ctx->tail_offset);
1290 	seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1291 		   sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1292 	seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1293 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1294 	seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1295 		   sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1296 
1297 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1298 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1299 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1300 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1301 		   sq_ctx->smenq_next_sqb);
1302 
1303 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1304 
1305 	seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1306 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1307 	seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1308 		   sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1309 	seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1310 		   sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1311 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1312 
1313 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1314 		   (u64)sq_ctx->scm_lso_rem);
1315 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1316 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1317 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1318 		   (u64)sq_ctx->dropped_octs);
1319 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1320 		   (u64)sq_ctx->dropped_pkts);
1321 }
1322 
1323 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1324 				   struct nix_cn10k_rq_ctx_s *rq_ctx)
1325 {
1326 	seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1327 		   rq_ctx->ena, rq_ctx->sso_ena);
1328 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1329 		   rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1330 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1331 		   rq_ctx->cq, rq_ctx->lenerr_dis);
1332 	seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1333 		   rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1334 	seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1335 		   rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1336 	seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1337 		   rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1338 	seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1339 
1340 	seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1341 		   rq_ctx->spb_aura, rq_ctx->lpb_aura);
1342 	seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1343 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1344 		   rq_ctx->sso_grp, rq_ctx->sso_tt);
1345 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1346 		   rq_ctx->pb_caching, rq_ctx->wqe_caching);
1347 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1348 		   rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1349 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1350 		   rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1351 	seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1352 		   rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1353 
1354 	seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1355 	seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1356 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1357 	seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1358 		   rq_ctx->wqe_skip, rq_ctx->spb_ena);
1359 	seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1360 		   rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1361 	seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1362 		   rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1363 	seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1364 		   rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1365 
1366 	seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1367 		   rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1368 	seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1369 		   rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1370 	seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1371 		   rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1372 	seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1373 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1374 
1375 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1376 		   rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1377 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1378 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1379 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1380 		   rq_ctx->rq_int, rq_ctx->rq_int_ena);
1381 	seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1382 
1383 	seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1384 		   rq_ctx->ltag, rq_ctx->good_utag);
1385 	seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1386 		   rq_ctx->bad_utag, rq_ctx->flow_tagw);
1387 	seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1388 		   rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1389 	seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1390 		   rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1391 	seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1392 
1393 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1394 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1395 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1396 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1397 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1398 }
1399 
1400 /* Dumps given nix_rq's context */
1401 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1402 {
1403 	struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1404 	struct nix_hw *nix_hw = m->private;
1405 	struct rvu *rvu = nix_hw->rvu;
1406 
1407 	if (!is_rvu_otx2(rvu)) {
1408 		print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1409 		return;
1410 	}
1411 
1412 	seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1413 		   rq_ctx->wqe_aura, rq_ctx->substream);
1414 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1415 		   rq_ctx->cq, rq_ctx->ena_wqwd);
1416 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1417 		   rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1418 	seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1419 
1420 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1421 		   rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1422 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1423 		   rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1424 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1425 		   rq_ctx->pb_caching, rq_ctx->sso_tt);
1426 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1427 		   rq_ctx->sso_grp, rq_ctx->lpb_aura);
1428 	seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1429 
1430 	seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1431 		   rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1432 	seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1433 		   rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1434 	seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1435 		   rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1436 	seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1437 		   rq_ctx->spb_ena, rq_ctx->wqe_skip);
1438 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1439 
1440 	seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1441 		   rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1442 	seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1443 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1444 	seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1445 		   rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1446 	seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1447 		   rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1448 
1449 	seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1450 		   rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1451 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1452 		   rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1453 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1454 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1455 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1456 
1457 	seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1458 		   rq_ctx->flow_tagw, rq_ctx->bad_utag);
1459 	seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1460 		   rq_ctx->good_utag, rq_ctx->ltag);
1461 
1462 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1463 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1464 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1465 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1466 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1467 }
1468 
1469 /* Dumps given nix_cq's context */
1470 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1471 {
1472 	struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1473 
1474 	seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1475 
1476 	seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1477 	seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1478 		   cq_ctx->avg_con, cq_ctx->cint_idx);
1479 	seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1480 		   cq_ctx->cq_err, cq_ctx->qint_idx);
1481 	seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1482 		   cq_ctx->bpid, cq_ctx->bp_ena);
1483 
1484 	seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1485 		   cq_ctx->update_time, cq_ctx->avg_level);
1486 	seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1487 		   cq_ctx->head, cq_ctx->tail);
1488 
1489 	seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1490 		   cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1491 	seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1492 		   cq_ctx->qsize, cq_ctx->caching);
1493 	seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1494 		   cq_ctx->substream, cq_ctx->ena);
1495 	seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1496 		   cq_ctx->drop_ena, cq_ctx->drop);
1497 	seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1498 }
1499 
1500 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1501 					 void *unused, int ctype)
1502 {
1503 	void (*print_nix_ctx)(struct seq_file *filp,
1504 			      struct nix_aq_enq_rsp *rsp) = NULL;
1505 	struct nix_hw *nix_hw = filp->private;
1506 	struct rvu *rvu = nix_hw->rvu;
1507 	struct nix_aq_enq_req aq_req;
1508 	struct nix_aq_enq_rsp rsp;
1509 	char *ctype_string = NULL;
1510 	int qidx, rc, max_id = 0;
1511 	struct rvu_pfvf *pfvf;
1512 	int nixlf, id, all;
1513 	u16 pcifunc;
1514 
1515 	switch (ctype) {
1516 	case NIX_AQ_CTYPE_CQ:
1517 		nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1518 		id = rvu->rvu_dbg.nix_cq_ctx.id;
1519 		all = rvu->rvu_dbg.nix_cq_ctx.all;
1520 		break;
1521 
1522 	case NIX_AQ_CTYPE_SQ:
1523 		nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1524 		id = rvu->rvu_dbg.nix_sq_ctx.id;
1525 		all = rvu->rvu_dbg.nix_sq_ctx.all;
1526 		break;
1527 
1528 	case NIX_AQ_CTYPE_RQ:
1529 		nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1530 		id = rvu->rvu_dbg.nix_rq_ctx.id;
1531 		all = rvu->rvu_dbg.nix_rq_ctx.all;
1532 		break;
1533 
1534 	default:
1535 		return -EINVAL;
1536 	}
1537 
1538 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1539 		return -EINVAL;
1540 
1541 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1542 	if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1543 		seq_puts(filp, "SQ context is not initialized\n");
1544 		return -EINVAL;
1545 	} else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1546 		seq_puts(filp, "RQ context is not initialized\n");
1547 		return -EINVAL;
1548 	} else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1549 		seq_puts(filp, "CQ context is not initialized\n");
1550 		return -EINVAL;
1551 	}
1552 
1553 	if (ctype == NIX_AQ_CTYPE_SQ) {
1554 		max_id = pfvf->sq_ctx->qsize;
1555 		ctype_string = "sq";
1556 		print_nix_ctx = print_nix_sq_ctx;
1557 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1558 		max_id = pfvf->rq_ctx->qsize;
1559 		ctype_string = "rq";
1560 		print_nix_ctx = print_nix_rq_ctx;
1561 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1562 		max_id = pfvf->cq_ctx->qsize;
1563 		ctype_string = "cq";
1564 		print_nix_ctx = print_nix_cq_ctx;
1565 	}
1566 
1567 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1568 	aq_req.hdr.pcifunc = pcifunc;
1569 	aq_req.ctype = ctype;
1570 	aq_req.op = NIX_AQ_INSTOP_READ;
1571 	if (all)
1572 		id = 0;
1573 	else
1574 		max_id = id + 1;
1575 	for (qidx = id; qidx < max_id; qidx++) {
1576 		aq_req.qidx = qidx;
1577 		seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1578 			   ctype_string, nixlf, aq_req.qidx);
1579 		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1580 		if (rc) {
1581 			seq_puts(filp, "Failed to read the context\n");
1582 			return -EINVAL;
1583 		}
1584 		print_nix_ctx(filp, &rsp);
1585 	}
1586 	return 0;
1587 }
1588 
1589 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1590 			       int id, int ctype, char *ctype_string,
1591 			       struct seq_file *m)
1592 {
1593 	struct nix_hw *nix_hw = m->private;
1594 	struct rvu_pfvf *pfvf;
1595 	int max_id = 0;
1596 	u16 pcifunc;
1597 
1598 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1599 		return -EINVAL;
1600 
1601 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1602 
1603 	if (ctype == NIX_AQ_CTYPE_SQ) {
1604 		if (!pfvf->sq_ctx) {
1605 			dev_warn(rvu->dev, "SQ context is not initialized\n");
1606 			return -EINVAL;
1607 		}
1608 		max_id = pfvf->sq_ctx->qsize;
1609 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1610 		if (!pfvf->rq_ctx) {
1611 			dev_warn(rvu->dev, "RQ context is not initialized\n");
1612 			return -EINVAL;
1613 		}
1614 		max_id = pfvf->rq_ctx->qsize;
1615 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1616 		if (!pfvf->cq_ctx) {
1617 			dev_warn(rvu->dev, "CQ context is not initialized\n");
1618 			return -EINVAL;
1619 		}
1620 		max_id = pfvf->cq_ctx->qsize;
1621 	}
1622 
1623 	if (id < 0 || id >= max_id) {
1624 		dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1625 			 ctype_string, max_id - 1);
1626 		return -EINVAL;
1627 	}
1628 	switch (ctype) {
1629 	case NIX_AQ_CTYPE_CQ:
1630 		rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1631 		rvu->rvu_dbg.nix_cq_ctx.id = id;
1632 		rvu->rvu_dbg.nix_cq_ctx.all = all;
1633 		break;
1634 
1635 	case NIX_AQ_CTYPE_SQ:
1636 		rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1637 		rvu->rvu_dbg.nix_sq_ctx.id = id;
1638 		rvu->rvu_dbg.nix_sq_ctx.all = all;
1639 		break;
1640 
1641 	case NIX_AQ_CTYPE_RQ:
1642 		rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1643 		rvu->rvu_dbg.nix_rq_ctx.id = id;
1644 		rvu->rvu_dbg.nix_rq_ctx.all = all;
1645 		break;
1646 	default:
1647 		return -EINVAL;
1648 	}
1649 	return 0;
1650 }
1651 
1652 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
1653 					   const char __user *buffer,
1654 					   size_t count, loff_t *ppos,
1655 					   int ctype)
1656 {
1657 	struct seq_file *m = filp->private_data;
1658 	struct nix_hw *nix_hw = m->private;
1659 	struct rvu *rvu = nix_hw->rvu;
1660 	char *cmd_buf, *ctype_string;
1661 	int nixlf, id = 0, ret;
1662 	bool all = false;
1663 
1664 	if ((*ppos != 0) || !count)
1665 		return -EINVAL;
1666 
1667 	switch (ctype) {
1668 	case NIX_AQ_CTYPE_SQ:
1669 		ctype_string = "sq";
1670 		break;
1671 	case NIX_AQ_CTYPE_RQ:
1672 		ctype_string = "rq";
1673 		break;
1674 	case NIX_AQ_CTYPE_CQ:
1675 		ctype_string = "cq";
1676 		break;
1677 	default:
1678 		return -EINVAL;
1679 	}
1680 
1681 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1682 
1683 	if (!cmd_buf)
1684 		return count;
1685 
1686 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1687 				   &nixlf, &id, &all);
1688 	if (ret < 0) {
1689 		dev_info(rvu->dev,
1690 			 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
1691 			 ctype_string, ctype_string);
1692 		goto done;
1693 	} else {
1694 		ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
1695 					  ctype_string, m);
1696 	}
1697 done:
1698 	kfree(cmd_buf);
1699 	return ret ? ret : count;
1700 }
1701 
1702 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
1703 					const char __user *buffer,
1704 					size_t count, loff_t *ppos)
1705 {
1706 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1707 					    NIX_AQ_CTYPE_SQ);
1708 }
1709 
1710 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
1711 {
1712 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
1713 }
1714 
1715 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
1716 
1717 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
1718 					const char __user *buffer,
1719 					size_t count, loff_t *ppos)
1720 {
1721 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1722 					    NIX_AQ_CTYPE_RQ);
1723 }
1724 
1725 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
1726 {
1727 	return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
1728 }
1729 
1730 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
1731 
1732 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
1733 					const char __user *buffer,
1734 					size_t count, loff_t *ppos)
1735 {
1736 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1737 					    NIX_AQ_CTYPE_CQ);
1738 }
1739 
1740 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1741 {
1742 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1743 }
1744 
1745 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1746 
1747 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1748 				 unsigned long *bmap, char *qtype)
1749 {
1750 	char *buf;
1751 
1752 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1753 	if (!buf)
1754 		return;
1755 
1756 	bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1757 	seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1758 	seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1759 		   qtype, buf);
1760 	kfree(buf);
1761 }
1762 
1763 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1764 {
1765 	if (!pfvf->cq_ctx)
1766 		seq_puts(filp, "cq context is not initialized\n");
1767 	else
1768 		print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1769 				     "cq");
1770 
1771 	if (!pfvf->rq_ctx)
1772 		seq_puts(filp, "rq context is not initialized\n");
1773 	else
1774 		print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1775 				     "rq");
1776 
1777 	if (!pfvf->sq_ctx)
1778 		seq_puts(filp, "sq context is not initialized\n");
1779 	else
1780 		print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1781 				     "sq");
1782 }
1783 
1784 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1785 				       const char __user *buffer,
1786 				       size_t count, loff_t *ppos)
1787 {
1788 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1789 				   BLKTYPE_NIX);
1790 }
1791 
1792 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1793 {
1794 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1795 }
1796 
1797 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1798 
1799 static void print_band_prof_ctx(struct seq_file *m,
1800 				struct nix_bandprof_s *prof)
1801 {
1802 	char *str;
1803 
1804 	switch (prof->pc_mode) {
1805 	case NIX_RX_PC_MODE_VLAN:
1806 		str = "VLAN";
1807 		break;
1808 	case NIX_RX_PC_MODE_DSCP:
1809 		str = "DSCP";
1810 		break;
1811 	case NIX_RX_PC_MODE_GEN:
1812 		str = "Generic";
1813 		break;
1814 	case NIX_RX_PC_MODE_RSVD:
1815 		str = "Reserved";
1816 		break;
1817 	}
1818 	seq_printf(m, "W0: pc_mode\t\t%s\n", str);
1819 	str = (prof->icolor == 3) ? "Color blind" :
1820 		(prof->icolor == 0) ? "Green" :
1821 		(prof->icolor == 1) ? "Yellow" : "Red";
1822 	seq_printf(m, "W0: icolor\t\t%s\n", str);
1823 	seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
1824 	seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
1825 	seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
1826 	seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
1827 	seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
1828 	seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
1829 	seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
1830 	seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
1831 
1832 	seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
1833 	str = (prof->lmode == 0) ? "byte" : "packet";
1834 	seq_printf(m, "W1: lmode\t\t%s\n", str);
1835 	seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
1836 	seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
1837 	seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
1838 	seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
1839 	str = (prof->gc_action == 0) ? "PASS" :
1840 		(prof->gc_action == 1) ? "DROP" : "RED";
1841 	seq_printf(m, "W1: gc_action\t\t%s\n", str);
1842 	str = (prof->yc_action == 0) ? "PASS" :
1843 		(prof->yc_action == 1) ? "DROP" : "RED";
1844 	seq_printf(m, "W1: yc_action\t\t%s\n", str);
1845 	str = (prof->rc_action == 0) ? "PASS" :
1846 		(prof->rc_action == 1) ? "DROP" : "RED";
1847 	seq_printf(m, "W1: rc_action\t\t%s\n", str);
1848 	seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
1849 	seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
1850 	seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
1851 
1852 	seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
1853 	seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
1854 	seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
1855 	seq_printf(m, "W4: green_pkt_pass\t%lld\n",
1856 		   (u64)prof->green_pkt_pass);
1857 	seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
1858 		   (u64)prof->yellow_pkt_pass);
1859 	seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
1860 	seq_printf(m, "W7: green_octs_pass\t%lld\n",
1861 		   (u64)prof->green_octs_pass);
1862 	seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
1863 		   (u64)prof->yellow_octs_pass);
1864 	seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
1865 	seq_printf(m, "W10: green_pkt_drop\t%lld\n",
1866 		   (u64)prof->green_pkt_drop);
1867 	seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
1868 		   (u64)prof->yellow_pkt_drop);
1869 	seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
1870 	seq_printf(m, "W13: green_octs_drop\t%lld\n",
1871 		   (u64)prof->green_octs_drop);
1872 	seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
1873 		   (u64)prof->yellow_octs_drop);
1874 	seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
1875 	seq_puts(m, "==============================\n");
1876 }
1877 
1878 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
1879 {
1880 	struct nix_hw *nix_hw = m->private;
1881 	struct nix_cn10k_aq_enq_req aq_req;
1882 	struct nix_cn10k_aq_enq_rsp aq_rsp;
1883 	struct rvu *rvu = nix_hw->rvu;
1884 	struct nix_ipolicer *ipolicer;
1885 	int layer, prof_idx, idx, rc;
1886 	u16 pcifunc;
1887 	char *str;
1888 
1889 	/* Ingress policers do not exist on all platforms */
1890 	if (!nix_hw->ipolicer)
1891 		return 0;
1892 
1893 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1894 		if (layer == BAND_PROF_INVAL_LAYER)
1895 			continue;
1896 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1897 			(layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
1898 
1899 		seq_printf(m, "\n%s bandwidth profiles\n", str);
1900 		seq_puts(m, "=======================\n");
1901 
1902 		ipolicer = &nix_hw->ipolicer[layer];
1903 
1904 		for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
1905 			if (is_rsrc_free(&ipolicer->band_prof, idx))
1906 				continue;
1907 
1908 			prof_idx = (idx & 0x3FFF) | (layer << 14);
1909 			rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1910 						 0x00, NIX_AQ_CTYPE_BANDPROF,
1911 						 prof_idx);
1912 			if (rc) {
1913 				dev_err(rvu->dev,
1914 					"%s: Failed to fetch context of %s profile %d, err %d\n",
1915 					__func__, str, idx, rc);
1916 				return 0;
1917 			}
1918 			seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
1919 			pcifunc = ipolicer->pfvf_map[idx];
1920 			if (!(pcifunc & RVU_PFVF_FUNC_MASK))
1921 				seq_printf(m, "Allocated to :: PF %d\n",
1922 					   rvu_get_pf(pcifunc));
1923 			else
1924 				seq_printf(m, "Allocated to :: PF %d VF %d\n",
1925 					   rvu_get_pf(pcifunc),
1926 					   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1927 			print_band_prof_ctx(m, &aq_rsp.prof);
1928 		}
1929 	}
1930 	return 0;
1931 }
1932 
1933 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
1934 
1935 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
1936 {
1937 	struct nix_hw *nix_hw = m->private;
1938 	struct nix_ipolicer *ipolicer;
1939 	int layer;
1940 	char *str;
1941 
1942 	/* Ingress policers do not exist on all platforms */
1943 	if (!nix_hw->ipolicer)
1944 		return 0;
1945 
1946 	seq_puts(m, "\nBandwidth profile resource free count\n");
1947 	seq_puts(m, "=====================================\n");
1948 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1949 		if (layer == BAND_PROF_INVAL_LAYER)
1950 			continue;
1951 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1952 			(layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
1953 
1954 		ipolicer = &nix_hw->ipolicer[layer];
1955 		seq_printf(m, "%s :: Max: %4d  Free: %4d\n", str,
1956 			   ipolicer->band_prof.max,
1957 			   rvu_rsrc_free_count(&ipolicer->band_prof));
1958 	}
1959 	seq_puts(m, "=====================================\n");
1960 
1961 	return 0;
1962 }
1963 
1964 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
1965 
1966 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
1967 {
1968 	struct nix_hw *nix_hw;
1969 
1970 	if (!is_block_implemented(rvu->hw, blkaddr))
1971 		return;
1972 
1973 	if (blkaddr == BLKADDR_NIX0) {
1974 		rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1975 		nix_hw = &rvu->hw->nix[0];
1976 	} else {
1977 		rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
1978 						      rvu->rvu_dbg.root);
1979 		nix_hw = &rvu->hw->nix[1];
1980 	}
1981 
1982 	debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1983 			    &rvu_dbg_nix_sq_ctx_fops);
1984 	debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1985 			    &rvu_dbg_nix_rq_ctx_fops);
1986 	debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1987 			    &rvu_dbg_nix_cq_ctx_fops);
1988 	debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1989 			    &rvu_dbg_nix_ndc_tx_cache_fops);
1990 	debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1991 			    &rvu_dbg_nix_ndc_rx_cache_fops);
1992 	debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1993 			    &rvu_dbg_nix_ndc_tx_hits_miss_fops);
1994 	debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1995 			    &rvu_dbg_nix_ndc_rx_hits_miss_fops);
1996 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
1997 			    &rvu_dbg_nix_qsize_fops);
1998 	debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1999 			    &rvu_dbg_nix_band_prof_ctx_fops);
2000 	debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
2001 			    &rvu_dbg_nix_band_prof_rsrc_fops);
2002 }
2003 
2004 static void rvu_dbg_npa_init(struct rvu *rvu)
2005 {
2006 	rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2007 
2008 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2009 			    &rvu_dbg_npa_qsize_fops);
2010 	debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2011 			    &rvu_dbg_npa_aura_ctx_fops);
2012 	debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2013 			    &rvu_dbg_npa_pool_ctx_fops);
2014 	debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2015 			    &rvu_dbg_npa_ndc_cache_fops);
2016 	debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2017 			    &rvu_dbg_npa_ndc_hits_miss_fops);
2018 }
2019 
2020 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)				\
2021 	({								\
2022 		u64 cnt;						\
2023 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2024 					     NIX_STATS_RX, &(cnt));	\
2025 		if (!err)						\
2026 			seq_printf(s, "%s: %llu\n", name, cnt);		\
2027 		cnt;							\
2028 	})
2029 
2030 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)			\
2031 	({								\
2032 		u64 cnt;						\
2033 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2034 					  NIX_STATS_TX, &(cnt));	\
2035 		if (!err)						\
2036 			seq_printf(s, "%s: %llu\n", name, cnt);		\
2037 		cnt;							\
2038 	})
2039 
2040 static int cgx_print_stats(struct seq_file *s, int lmac_id)
2041 {
2042 	struct cgx_link_user_info linfo;
2043 	struct mac_ops *mac_ops;
2044 	void *cgxd = s->private;
2045 	u64 ucast, mcast, bcast;
2046 	int stat = 0, err = 0;
2047 	u64 tx_stat, rx_stat;
2048 	struct rvu *rvu;
2049 
2050 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2051 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2052 	if (!rvu)
2053 		return -ENODEV;
2054 
2055 	mac_ops = get_mac_ops(cgxd);
2056 	/* There can be no CGX devices at all */
2057 	if (!mac_ops)
2058 		return 0;
2059 
2060 	/* Link status */
2061 	seq_puts(s, "\n=======Link Status======\n\n");
2062 	err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2063 	if (err)
2064 		seq_puts(s, "Failed to read link status\n");
2065 	seq_printf(s, "\nLink is %s %d Mbps\n\n",
2066 		   linfo.link_up ? "UP" : "DOWN", linfo.speed);
2067 
2068 	/* Rx stats */
2069 	seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2070 		   mac_ops->name);
2071 	ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2072 	if (err)
2073 		return err;
2074 	mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2075 	if (err)
2076 		return err;
2077 	bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2078 	if (err)
2079 		return err;
2080 	seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2081 	PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2082 	if (err)
2083 		return err;
2084 	PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2085 	if (err)
2086 		return err;
2087 	PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2088 	if (err)
2089 		return err;
2090 
2091 	/* Tx stats */
2092 	seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2093 		   mac_ops->name);
2094 	ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2095 	if (err)
2096 		return err;
2097 	mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2098 	if (err)
2099 		return err;
2100 	bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2101 	if (err)
2102 		return err;
2103 	seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2104 	PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2105 	if (err)
2106 		return err;
2107 	PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2108 	if (err)
2109 		return err;
2110 
2111 	/* Rx stats */
2112 	seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2113 	while (stat < mac_ops->rx_stats_cnt) {
2114 		err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2115 		if (err)
2116 			return err;
2117 		if (is_rvu_otx2(rvu))
2118 			seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2119 				   rx_stat);
2120 		else
2121 			seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2122 				   rx_stat);
2123 		stat++;
2124 	}
2125 
2126 	/* Tx stats */
2127 	stat = 0;
2128 	seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2129 	while (stat < mac_ops->tx_stats_cnt) {
2130 		err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2131 		if (err)
2132 			return err;
2133 
2134 		if (is_rvu_otx2(rvu))
2135 			seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2136 				   tx_stat);
2137 		else
2138 			seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2139 				   tx_stat);
2140 		stat++;
2141 	}
2142 
2143 	return err;
2144 }
2145 
2146 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
2147 {
2148 	struct dentry *current_dir;
2149 	char *buf;
2150 
2151 	current_dir = filp->file->f_path.dentry->d_parent;
2152 	buf = strrchr(current_dir->d_name.name, 'c');
2153 	if (!buf)
2154 		return -EINVAL;
2155 
2156 	return kstrtoint(buf + 1, 10, lmac_id);
2157 }
2158 
2159 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
2160 {
2161 	int lmac_id, err;
2162 
2163 	err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2164 	if (!err)
2165 		return cgx_print_stats(filp, lmac_id);
2166 
2167 	return err;
2168 }
2169 
2170 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2171 
2172 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2173 {
2174 	struct pci_dev *pdev = NULL;
2175 	void *cgxd = s->private;
2176 	char *bcast, *mcast;
2177 	u16 index, domain;
2178 	u8 dmac[ETH_ALEN];
2179 	struct rvu *rvu;
2180 	u64 cfg, mac;
2181 	int pf;
2182 
2183 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2184 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2185 	if (!rvu)
2186 		return -ENODEV;
2187 
2188 	pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2189 	domain = 2;
2190 
2191 	pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2192 	if (!pdev)
2193 		return 0;
2194 
2195 	cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2196 	bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2197 	mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2198 
2199 	seq_puts(s,
2200 		 "PCI dev       RVUPF   BROADCAST  MULTICAST  FILTER-MODE\n");
2201 	seq_printf(s, "%s  PF%d  %9s  %9s",
2202 		   dev_name(&pdev->dev), pf, bcast, mcast);
2203 	if (cfg & CGX_DMAC_CAM_ACCEPT)
2204 		seq_printf(s, "%12s\n\n", "UNICAST");
2205 	else
2206 		seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2207 
2208 	seq_puts(s, "\nDMAC-INDEX  ADDRESS\n");
2209 
2210 	for (index = 0 ; index < 32 ; index++) {
2211 		cfg = cgx_read_dmac_entry(cgxd, index);
2212 		/* Display enabled dmac entries associated with current lmac */
2213 		if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2214 		    FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2215 			mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2216 			u64_to_ether_addr(mac, dmac);
2217 			seq_printf(s, "%7d     %pM\n", index, dmac);
2218 		}
2219 	}
2220 
2221 	return 0;
2222 }
2223 
2224 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2225 {
2226 	int err, lmac_id;
2227 
2228 	err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2229 	if (!err)
2230 		return cgx_print_dmac_flt(filp, lmac_id);
2231 
2232 	return err;
2233 }
2234 
2235 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2236 
2237 static void rvu_dbg_cgx_init(struct rvu *rvu)
2238 {
2239 	struct mac_ops *mac_ops;
2240 	unsigned long lmac_bmap;
2241 	int i, lmac_id;
2242 	char dname[20];
2243 	void *cgx;
2244 
2245 	if (!cgx_get_cgxcnt_max())
2246 		return;
2247 
2248 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2249 	if (!mac_ops)
2250 		return;
2251 
2252 	rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2253 						   rvu->rvu_dbg.root);
2254 
2255 	for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2256 		cgx = rvu_cgx_pdata(i, rvu);
2257 		if (!cgx)
2258 			continue;
2259 		lmac_bmap = cgx_get_lmac_bmap(cgx);
2260 		/* cgx debugfs dir */
2261 		sprintf(dname, "%s%d", mac_ops->name, i);
2262 		rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2263 						      rvu->rvu_dbg.cgx_root);
2264 
2265 		for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
2266 			/* lmac debugfs dir */
2267 			sprintf(dname, "lmac%d", lmac_id);
2268 			rvu->rvu_dbg.lmac =
2269 				debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2270 
2271 			debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2272 					    cgx, &rvu_dbg_cgx_stat_fops);
2273 			debugfs_create_file("mac_filter", 0600,
2274 					    rvu->rvu_dbg.lmac, cgx,
2275 					    &rvu_dbg_cgx_dmac_flt_fops);
2276 		}
2277 	}
2278 }
2279 
2280 /* NPC debugfs APIs */
2281 static void rvu_print_npc_mcam_info(struct seq_file *s,
2282 				    u16 pcifunc, int blkaddr)
2283 {
2284 	struct rvu *rvu = s->private;
2285 	int entry_acnt, entry_ecnt;
2286 	int cntr_acnt, cntr_ecnt;
2287 
2288 	rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2289 					  &entry_acnt, &entry_ecnt);
2290 	rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2291 					    &cntr_acnt, &cntr_ecnt);
2292 	if (!entry_acnt && !cntr_acnt)
2293 		return;
2294 
2295 	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2296 		seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2297 			   rvu_get_pf(pcifunc));
2298 	else
2299 		seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2300 			   rvu_get_pf(pcifunc),
2301 			   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2302 
2303 	if (entry_acnt) {
2304 		seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2305 		seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2306 	}
2307 	if (cntr_acnt) {
2308 		seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2309 		seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2310 	}
2311 }
2312 
2313 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2314 {
2315 	struct rvu *rvu = filp->private;
2316 	int pf, vf, numvfs, blkaddr;
2317 	struct npc_mcam *mcam;
2318 	u16 pcifunc, counters;
2319 	u64 cfg;
2320 
2321 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2322 	if (blkaddr < 0)
2323 		return -ENODEV;
2324 
2325 	mcam = &rvu->hw->mcam;
2326 	counters = rvu->hw->npc_counters;
2327 
2328 	seq_puts(filp, "\nNPC MCAM info:\n");
2329 	/* MCAM keywidth on receive and transmit sides */
2330 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2331 	cfg = (cfg >> 32) & 0x07;
2332 	seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2333 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2334 		   "224bits" : "448bits"));
2335 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2336 	cfg = (cfg >> 32) & 0x07;
2337 	seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2338 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2339 		   "224bits" : "448bits"));
2340 
2341 	mutex_lock(&mcam->lock);
2342 	/* MCAM entries */
2343 	seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2344 	seq_printf(filp, "\t\t Reserved \t: %d\n",
2345 		   mcam->total_entries - mcam->bmap_entries);
2346 	seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2347 
2348 	/* MCAM counters */
2349 	seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2350 	seq_printf(filp, "\t\t Reserved \t: %d\n",
2351 		   counters - mcam->counters.max);
2352 	seq_printf(filp, "\t\t Available \t: %d\n",
2353 		   rvu_rsrc_free_count(&mcam->counters));
2354 
2355 	if (mcam->bmap_entries == mcam->bmap_fcnt) {
2356 		mutex_unlock(&mcam->lock);
2357 		return 0;
2358 	}
2359 
2360 	seq_puts(filp, "\n\t\t Current allocation\n");
2361 	seq_puts(filp, "\t\t====================\n");
2362 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2363 		pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2364 		rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2365 
2366 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2367 		numvfs = (cfg >> 12) & 0xFF;
2368 		for (vf = 0; vf < numvfs; vf++) {
2369 			pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2370 			rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2371 		}
2372 	}
2373 
2374 	mutex_unlock(&mcam->lock);
2375 	return 0;
2376 }
2377 
2378 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2379 
2380 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2381 					     void *unused)
2382 {
2383 	struct rvu *rvu = filp->private;
2384 	struct npc_mcam *mcam;
2385 	int blkaddr;
2386 
2387 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2388 	if (blkaddr < 0)
2389 		return -ENODEV;
2390 
2391 	mcam = &rvu->hw->mcam;
2392 
2393 	seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2394 	seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2395 		   rvu_read64(rvu, blkaddr,
2396 			      NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2397 
2398 	return 0;
2399 }
2400 
2401 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2402 
2403 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2404 					struct rvu_npc_mcam_rule *rule)
2405 {
2406 	u8 bit;
2407 
2408 	for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2409 		seq_printf(s, "\t%s  ", npc_get_field_name(bit));
2410 		switch (bit) {
2411 		case NPC_DMAC:
2412 			seq_printf(s, "%pM ", rule->packet.dmac);
2413 			seq_printf(s, "mask %pM\n", rule->mask.dmac);
2414 			break;
2415 		case NPC_SMAC:
2416 			seq_printf(s, "%pM ", rule->packet.smac);
2417 			seq_printf(s, "mask %pM\n", rule->mask.smac);
2418 			break;
2419 		case NPC_ETYPE:
2420 			seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2421 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2422 			break;
2423 		case NPC_OUTER_VID:
2424 			seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2425 			seq_printf(s, "mask 0x%x\n",
2426 				   ntohs(rule->mask.vlan_tci));
2427 			break;
2428 		case NPC_TOS:
2429 			seq_printf(s, "%d ", rule->packet.tos);
2430 			seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2431 			break;
2432 		case NPC_SIP_IPV4:
2433 			seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2434 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2435 			break;
2436 		case NPC_DIP_IPV4:
2437 			seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2438 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2439 			break;
2440 		case NPC_SIP_IPV6:
2441 			seq_printf(s, "%pI6 ", rule->packet.ip6src);
2442 			seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2443 			break;
2444 		case NPC_DIP_IPV6:
2445 			seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2446 			seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2447 			break;
2448 		case NPC_SPORT_TCP:
2449 		case NPC_SPORT_UDP:
2450 		case NPC_SPORT_SCTP:
2451 			seq_printf(s, "%d ", ntohs(rule->packet.sport));
2452 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2453 			break;
2454 		case NPC_DPORT_TCP:
2455 		case NPC_DPORT_UDP:
2456 		case NPC_DPORT_SCTP:
2457 			seq_printf(s, "%d ", ntohs(rule->packet.dport));
2458 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2459 			break;
2460 		default:
2461 			seq_puts(s, "\n");
2462 			break;
2463 		}
2464 	}
2465 }
2466 
2467 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2468 					 struct rvu_npc_mcam_rule *rule)
2469 {
2470 	if (is_npc_intf_tx(rule->intf)) {
2471 		switch (rule->tx_action.op) {
2472 		case NIX_TX_ACTIONOP_DROP:
2473 			seq_puts(s, "\taction: Drop\n");
2474 			break;
2475 		case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2476 			seq_puts(s, "\taction: Unicast to default channel\n");
2477 			break;
2478 		case NIX_TX_ACTIONOP_UCAST_CHAN:
2479 			seq_printf(s, "\taction: Unicast to channel %d\n",
2480 				   rule->tx_action.index);
2481 			break;
2482 		case NIX_TX_ACTIONOP_MCAST:
2483 			seq_puts(s, "\taction: Multicast\n");
2484 			break;
2485 		case NIX_TX_ACTIONOP_DROP_VIOL:
2486 			seq_puts(s, "\taction: Lockdown Violation Drop\n");
2487 			break;
2488 		default:
2489 			break;
2490 		}
2491 	} else {
2492 		switch (rule->rx_action.op) {
2493 		case NIX_RX_ACTIONOP_DROP:
2494 			seq_puts(s, "\taction: Drop\n");
2495 			break;
2496 		case NIX_RX_ACTIONOP_UCAST:
2497 			seq_printf(s, "\taction: Direct to queue %d\n",
2498 				   rule->rx_action.index);
2499 			break;
2500 		case NIX_RX_ACTIONOP_RSS:
2501 			seq_puts(s, "\taction: RSS\n");
2502 			break;
2503 		case NIX_RX_ACTIONOP_UCAST_IPSEC:
2504 			seq_puts(s, "\taction: Unicast ipsec\n");
2505 			break;
2506 		case NIX_RX_ACTIONOP_MCAST:
2507 			seq_puts(s, "\taction: Multicast\n");
2508 			break;
2509 		default:
2510 			break;
2511 		}
2512 	}
2513 }
2514 
2515 static const char *rvu_dbg_get_intf_name(int intf)
2516 {
2517 	switch (intf) {
2518 	case NIX_INTFX_RX(0):
2519 		return "NIX0_RX";
2520 	case NIX_INTFX_RX(1):
2521 		return "NIX1_RX";
2522 	case NIX_INTFX_TX(0):
2523 		return "NIX0_TX";
2524 	case NIX_INTFX_TX(1):
2525 		return "NIX1_TX";
2526 	default:
2527 		break;
2528 	}
2529 
2530 	return "unknown";
2531 }
2532 
2533 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2534 {
2535 	struct rvu_npc_mcam_rule *iter;
2536 	struct rvu *rvu = s->private;
2537 	struct npc_mcam *mcam;
2538 	int pf, vf = -1;
2539 	bool enabled;
2540 	int blkaddr;
2541 	u16 target;
2542 	u64 hits;
2543 
2544 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2545 	if (blkaddr < 0)
2546 		return 0;
2547 
2548 	mcam = &rvu->hw->mcam;
2549 
2550 	mutex_lock(&mcam->lock);
2551 	list_for_each_entry(iter, &mcam->mcam_rules, list) {
2552 		pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2553 		seq_printf(s, "\n\tInstalled by: PF%d ", pf);
2554 
2555 		if (iter->owner & RVU_PFVF_FUNC_MASK) {
2556 			vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
2557 			seq_printf(s, "VF%d", vf);
2558 		}
2559 		seq_puts(s, "\n");
2560 
2561 		seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
2562 						    "RX" : "TX");
2563 		seq_printf(s, "\tinterface: %s\n",
2564 			   rvu_dbg_get_intf_name(iter->intf));
2565 		seq_printf(s, "\tmcam entry: %d\n", iter->entry);
2566 
2567 		rvu_dbg_npc_mcam_show_flows(s, iter);
2568 		if (is_npc_intf_rx(iter->intf)) {
2569 			target = iter->rx_action.pf_func;
2570 			pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2571 			seq_printf(s, "\tForward to: PF%d ", pf);
2572 
2573 			if (target & RVU_PFVF_FUNC_MASK) {
2574 				vf = (target & RVU_PFVF_FUNC_MASK) - 1;
2575 				seq_printf(s, "VF%d", vf);
2576 			}
2577 			seq_puts(s, "\n");
2578 			seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
2579 			seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
2580 		}
2581 
2582 		rvu_dbg_npc_mcam_show_action(s, iter);
2583 
2584 		enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
2585 		seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
2586 
2587 		if (!iter->has_cntr)
2588 			continue;
2589 		seq_printf(s, "\tcounter: %d\n", iter->cntr);
2590 
2591 		hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
2592 		seq_printf(s, "\thits: %lld\n", hits);
2593 	}
2594 	mutex_unlock(&mcam->lock);
2595 
2596 	return 0;
2597 }
2598 
2599 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
2600 
2601 static void rvu_dbg_npc_init(struct rvu *rvu)
2602 {
2603 	rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
2604 
2605 	debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
2606 			    &rvu_dbg_npc_mcam_info_fops);
2607 	debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
2608 			    &rvu_dbg_npc_mcam_rules_fops);
2609 	debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
2610 			    &rvu_dbg_npc_rx_miss_act_fops);
2611 }
2612 
2613 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
2614 {
2615 	struct cpt_ctx *ctx = filp->private;
2616 	u64 busy_sts = 0, free_sts = 0;
2617 	u32 e_min = 0, e_max = 0, e, i;
2618 	u16 max_ses, max_ies, max_aes;
2619 	struct rvu *rvu = ctx->rvu;
2620 	int blkaddr = ctx->blkaddr;
2621 	u64 reg;
2622 
2623 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2624 	max_ses = reg & 0xffff;
2625 	max_ies = (reg >> 16) & 0xffff;
2626 	max_aes = (reg >> 32) & 0xffff;
2627 
2628 	switch (eng_type) {
2629 	case CPT_AE_TYPE:
2630 		e_min = max_ses + max_ies;
2631 		e_max = max_ses + max_ies + max_aes;
2632 		break;
2633 	case CPT_SE_TYPE:
2634 		e_min = 0;
2635 		e_max = max_ses;
2636 		break;
2637 	case CPT_IE_TYPE:
2638 		e_min = max_ses;
2639 		e_max = max_ses + max_ies;
2640 		break;
2641 	default:
2642 		return -EINVAL;
2643 	}
2644 
2645 	for (e = e_min, i = 0; e < e_max; e++, i++) {
2646 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
2647 		if (reg & 0x1)
2648 			busy_sts |= 1ULL << i;
2649 
2650 		if (reg & 0x2)
2651 			free_sts |= 1ULL << i;
2652 	}
2653 	seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
2654 	seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
2655 
2656 	return 0;
2657 }
2658 
2659 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
2660 {
2661 	return cpt_eng_sts_display(filp, CPT_AE_TYPE);
2662 }
2663 
2664 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
2665 
2666 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
2667 {
2668 	return cpt_eng_sts_display(filp, CPT_SE_TYPE);
2669 }
2670 
2671 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
2672 
2673 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
2674 {
2675 	return cpt_eng_sts_display(filp, CPT_IE_TYPE);
2676 }
2677 
2678 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
2679 
2680 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
2681 {
2682 	struct cpt_ctx *ctx = filp->private;
2683 	u16 max_ses, max_ies, max_aes;
2684 	struct rvu *rvu = ctx->rvu;
2685 	int blkaddr = ctx->blkaddr;
2686 	u32 e_max, e;
2687 	u64 reg;
2688 
2689 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2690 	max_ses = reg & 0xffff;
2691 	max_ies = (reg >> 16) & 0xffff;
2692 	max_aes = (reg >> 32) & 0xffff;
2693 
2694 	e_max = max_ses + max_ies + max_aes;
2695 
2696 	seq_puts(filp, "===========================================\n");
2697 	for (e = 0; e < e_max; e++) {
2698 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
2699 		seq_printf(filp, "CPT Engine[%u] Group Enable   0x%02llx\n", e,
2700 			   reg & 0xff);
2701 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
2702 		seq_printf(filp, "CPT Engine[%u] Active Info    0x%llx\n", e,
2703 			   reg);
2704 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
2705 		seq_printf(filp, "CPT Engine[%u] Control        0x%llx\n", e,
2706 			   reg);
2707 		seq_puts(filp, "===========================================\n");
2708 	}
2709 	return 0;
2710 }
2711 
2712 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
2713 
2714 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
2715 {
2716 	struct cpt_ctx *ctx = filp->private;
2717 	int blkaddr = ctx->blkaddr;
2718 	struct rvu *rvu = ctx->rvu;
2719 	struct rvu_block *block;
2720 	struct rvu_hwinfo *hw;
2721 	u64 reg;
2722 	u32 lf;
2723 
2724 	hw = rvu->hw;
2725 	block = &hw->block[blkaddr];
2726 	if (!block->lf.bmap)
2727 		return -ENODEV;
2728 
2729 	seq_puts(filp, "===========================================\n");
2730 	for (lf = 0; lf < block->lf.max; lf++) {
2731 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
2732 		seq_printf(filp, "CPT Lf[%u] CTL          0x%llx\n", lf, reg);
2733 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
2734 		seq_printf(filp, "CPT Lf[%u] CTL2         0x%llx\n", lf, reg);
2735 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
2736 		seq_printf(filp, "CPT Lf[%u] PTR_CTL      0x%llx\n", lf, reg);
2737 		reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
2738 				(lf << block->lfshift));
2739 		seq_printf(filp, "CPT Lf[%u] CFG          0x%llx\n", lf, reg);
2740 		seq_puts(filp, "===========================================\n");
2741 	}
2742 	return 0;
2743 }
2744 
2745 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
2746 
2747 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
2748 {
2749 	struct cpt_ctx *ctx = filp->private;
2750 	struct rvu *rvu = ctx->rvu;
2751 	int blkaddr = ctx->blkaddr;
2752 	u64 reg0, reg1;
2753 
2754 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
2755 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
2756 	seq_printf(filp, "CPT_AF_FLTX_INT:       0x%llx 0x%llx\n", reg0, reg1);
2757 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
2758 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
2759 	seq_printf(filp, "CPT_AF_PSNX_EXE:       0x%llx 0x%llx\n", reg0, reg1);
2760 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
2761 	seq_printf(filp, "CPT_AF_PSNX_LF:        0x%llx\n", reg0);
2762 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
2763 	seq_printf(filp, "CPT_AF_RVU_INT:        0x%llx\n", reg0);
2764 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
2765 	seq_printf(filp, "CPT_AF_RAS_INT:        0x%llx\n", reg0);
2766 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
2767 	seq_printf(filp, "CPT_AF_EXE_ERR_INFO:   0x%llx\n", reg0);
2768 
2769 	return 0;
2770 }
2771 
2772 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
2773 
2774 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
2775 {
2776 	struct cpt_ctx *ctx = filp->private;
2777 	struct rvu *rvu = ctx->rvu;
2778 	int blkaddr = ctx->blkaddr;
2779 	u64 reg;
2780 
2781 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
2782 	seq_printf(filp, "CPT instruction requests   %llu\n", reg);
2783 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
2784 	seq_printf(filp, "CPT instruction latency    %llu\n", reg);
2785 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
2786 	seq_printf(filp, "CPT NCB read requests      %llu\n", reg);
2787 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
2788 	seq_printf(filp, "CPT NCB read latency       %llu\n", reg);
2789 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
2790 	seq_printf(filp, "CPT read requests caused by UC fills   %llu\n", reg);
2791 	reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
2792 	seq_printf(filp, "CPT active cycles pc       %llu\n", reg);
2793 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
2794 	seq_printf(filp, "CPT clock count pc         %llu\n", reg);
2795 
2796 	return 0;
2797 }
2798 
2799 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
2800 
2801 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
2802 {
2803 	struct cpt_ctx *ctx;
2804 
2805 	if (!is_block_implemented(rvu->hw, blkaddr))
2806 		return;
2807 
2808 	if (blkaddr == BLKADDR_CPT0) {
2809 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
2810 		ctx = &rvu->rvu_dbg.cpt_ctx[0];
2811 		ctx->blkaddr = BLKADDR_CPT0;
2812 		ctx->rvu = rvu;
2813 	} else {
2814 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
2815 						      rvu->rvu_dbg.root);
2816 		ctx = &rvu->rvu_dbg.cpt_ctx[1];
2817 		ctx->blkaddr = BLKADDR_CPT1;
2818 		ctx->rvu = rvu;
2819 	}
2820 
2821 	debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
2822 			    &rvu_dbg_cpt_pc_fops);
2823 	debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2824 			    &rvu_dbg_cpt_ae_sts_fops);
2825 	debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2826 			    &rvu_dbg_cpt_se_sts_fops);
2827 	debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2828 			    &rvu_dbg_cpt_ie_sts_fops);
2829 	debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
2830 			    &rvu_dbg_cpt_engines_info_fops);
2831 	debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
2832 			    &rvu_dbg_cpt_lfs_info_fops);
2833 	debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
2834 			    &rvu_dbg_cpt_err_info_fops);
2835 }
2836 
2837 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
2838 {
2839 	if (!is_rvu_otx2(rvu))
2840 		return "cn10k";
2841 	else
2842 		return "octeontx2";
2843 }
2844 
2845 void rvu_dbg_init(struct rvu *rvu)
2846 {
2847 	rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
2848 
2849 	debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
2850 			    &rvu_dbg_rsrc_status_fops);
2851 
2852 	if (!is_rvu_otx2(rvu))
2853 		debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
2854 				    rvu, &rvu_dbg_lmtst_map_table_fops);
2855 
2856 	if (!cgx_get_cgxcnt_max())
2857 		goto create;
2858 
2859 	if (is_rvu_otx2(rvu))
2860 		debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
2861 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2862 	else
2863 		debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
2864 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2865 
2866 create:
2867 	rvu_dbg_npa_init(rvu);
2868 	rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
2869 
2870 	rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
2871 	rvu_dbg_cgx_init(rvu);
2872 	rvu_dbg_npc_init(rvu);
2873 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
2874 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
2875 }
2876 
2877 void rvu_dbg_exit(struct rvu *rvu)
2878 {
2879 	debugfs_remove_recursive(rvu->rvu_dbg.root);
2880 }
2881 
2882 #endif /* CONFIG_DEBUG_FS */
2883