1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2019 Marvell.
5  *
6  */
7 
8 #ifdef CONFIG_DEBUG_FS
9 
10 #include <linux/fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 #include "npc.h"
21 #include "rvu_npc_hash.h"
22 
23 #define DEBUGFS_DIR_NAME "octeontx2"
24 
25 enum {
26 	CGX_STAT0,
27 	CGX_STAT1,
28 	CGX_STAT2,
29 	CGX_STAT3,
30 	CGX_STAT4,
31 	CGX_STAT5,
32 	CGX_STAT6,
33 	CGX_STAT7,
34 	CGX_STAT8,
35 	CGX_STAT9,
36 	CGX_STAT10,
37 	CGX_STAT11,
38 	CGX_STAT12,
39 	CGX_STAT13,
40 	CGX_STAT14,
41 	CGX_STAT15,
42 	CGX_STAT16,
43 	CGX_STAT17,
44 	CGX_STAT18,
45 };
46 
47 /* NIX TX stats */
48 enum nix_stat_lf_tx {
49 	TX_UCAST	= 0x0,
50 	TX_BCAST	= 0x1,
51 	TX_MCAST	= 0x2,
52 	TX_DROP		= 0x3,
53 	TX_OCTS		= 0x4,
54 	TX_STATS_ENUM_LAST,
55 };
56 
57 /* NIX RX stats */
58 enum nix_stat_lf_rx {
59 	RX_OCTS		= 0x0,
60 	RX_UCAST	= 0x1,
61 	RX_BCAST	= 0x2,
62 	RX_MCAST	= 0x3,
63 	RX_DROP		= 0x4,
64 	RX_DROP_OCTS	= 0x5,
65 	RX_FCS		= 0x6,
66 	RX_ERR		= 0x7,
67 	RX_DRP_BCAST	= 0x8,
68 	RX_DRP_MCAST	= 0x9,
69 	RX_DRP_L3BCAST	= 0xa,
70 	RX_DRP_L3MCAST	= 0xb,
71 	RX_STATS_ENUM_LAST,
72 };
73 
74 static char *cgx_rx_stats_fields[] = {
75 	[CGX_STAT0]	= "Received packets",
76 	[CGX_STAT1]	= "Octets of received packets",
77 	[CGX_STAT2]	= "Received PAUSE packets",
78 	[CGX_STAT3]	= "Received PAUSE and control packets",
79 	[CGX_STAT4]	= "Filtered DMAC0 (NIX-bound) packets",
80 	[CGX_STAT5]	= "Filtered DMAC0 (NIX-bound) octets",
81 	[CGX_STAT6]	= "Packets dropped due to RX FIFO full",
82 	[CGX_STAT7]	= "Octets dropped due to RX FIFO full",
83 	[CGX_STAT8]	= "Error packets",
84 	[CGX_STAT9]	= "Filtered DMAC1 (NCSI-bound) packets",
85 	[CGX_STAT10]	= "Filtered DMAC1 (NCSI-bound) octets",
86 	[CGX_STAT11]	= "NCSI-bound packets dropped",
87 	[CGX_STAT12]	= "NCSI-bound octets dropped",
88 };
89 
90 static char *cgx_tx_stats_fields[] = {
91 	[CGX_STAT0]	= "Packets dropped due to excessive collisions",
92 	[CGX_STAT1]	= "Packets dropped due to excessive deferral",
93 	[CGX_STAT2]	= "Multiple collisions before successful transmission",
94 	[CGX_STAT3]	= "Single collisions before successful transmission",
95 	[CGX_STAT4]	= "Total octets sent on the interface",
96 	[CGX_STAT5]	= "Total frames sent on the interface",
97 	[CGX_STAT6]	= "Packets sent with an octet count < 64",
98 	[CGX_STAT7]	= "Packets sent with an octet count == 64",
99 	[CGX_STAT8]	= "Packets sent with an octet count of 65-127",
100 	[CGX_STAT9]	= "Packets sent with an octet count of 128-255",
101 	[CGX_STAT10]	= "Packets sent with an octet count of 256-511",
102 	[CGX_STAT11]	= "Packets sent with an octet count of 512-1023",
103 	[CGX_STAT12]	= "Packets sent with an octet count of 1024-1518",
104 	[CGX_STAT13]	= "Packets sent with an octet count of > 1518",
105 	[CGX_STAT14]	= "Packets sent to a broadcast DMAC",
106 	[CGX_STAT15]	= "Packets sent to the multicast DMAC",
107 	[CGX_STAT16]	= "Transmit underflow and were truncated",
108 	[CGX_STAT17]	= "Control/PAUSE packets sent",
109 };
110 
111 static char *rpm_rx_stats_fields[] = {
112 	"Octets of received packets",
113 	"Octets of received packets with out error",
114 	"Received packets with alignment errors",
115 	"Control/PAUSE packets received",
116 	"Packets received with Frame too long Errors",
117 	"Packets received with a1nrange length Errors",
118 	"Received packets",
119 	"Packets received with FrameCheckSequenceErrors",
120 	"Packets received with VLAN header",
121 	"Error packets",
122 	"Packets received with unicast DMAC",
123 	"Packets received with multicast DMAC",
124 	"Packets received with broadcast DMAC",
125 	"Dropped packets",
126 	"Total frames received on interface",
127 	"Packets received with an octet count < 64",
128 	"Packets received with an octet count == 64",
129 	"Packets received with an octet count of 65-127",
130 	"Packets received with an octet count of 128-255",
131 	"Packets received with an octet count of 256-511",
132 	"Packets received with an octet count of 512-1023",
133 	"Packets received with an octet count of 1024-1518",
134 	"Packets received with an octet count of > 1518",
135 	"Oversized Packets",
136 	"Jabber Packets",
137 	"Fragmented Packets",
138 	"CBFC(class based flow control) pause frames received for class 0",
139 	"CBFC pause frames received for class 1",
140 	"CBFC pause frames received for class 2",
141 	"CBFC pause frames received for class 3",
142 	"CBFC pause frames received for class 4",
143 	"CBFC pause frames received for class 5",
144 	"CBFC pause frames received for class 6",
145 	"CBFC pause frames received for class 7",
146 	"CBFC pause frames received for class 8",
147 	"CBFC pause frames received for class 9",
148 	"CBFC pause frames received for class 10",
149 	"CBFC pause frames received for class 11",
150 	"CBFC pause frames received for class 12",
151 	"CBFC pause frames received for class 13",
152 	"CBFC pause frames received for class 14",
153 	"CBFC pause frames received for class 15",
154 	"MAC control packets received",
155 };
156 
157 static char *rpm_tx_stats_fields[] = {
158 	"Total octets sent on the interface",
159 	"Total octets transmitted OK",
160 	"Control/Pause frames sent",
161 	"Total frames transmitted OK",
162 	"Total frames sent with VLAN header",
163 	"Error Packets",
164 	"Packets sent to unicast DMAC",
165 	"Packets sent to the multicast DMAC",
166 	"Packets sent to a broadcast DMAC",
167 	"Packets sent with an octet count == 64",
168 	"Packets sent with an octet count of 65-127",
169 	"Packets sent with an octet count of 128-255",
170 	"Packets sent with an octet count of 256-511",
171 	"Packets sent with an octet count of 512-1023",
172 	"Packets sent with an octet count of 1024-1518",
173 	"Packets sent with an octet count of > 1518",
174 	"CBFC(class based flow control) pause frames transmitted for class 0",
175 	"CBFC pause frames transmitted for class 1",
176 	"CBFC pause frames transmitted for class 2",
177 	"CBFC pause frames transmitted for class 3",
178 	"CBFC pause frames transmitted for class 4",
179 	"CBFC pause frames transmitted for class 5",
180 	"CBFC pause frames transmitted for class 6",
181 	"CBFC pause frames transmitted for class 7",
182 	"CBFC pause frames transmitted for class 8",
183 	"CBFC pause frames transmitted for class 9",
184 	"CBFC pause frames transmitted for class 10",
185 	"CBFC pause frames transmitted for class 11",
186 	"CBFC pause frames transmitted for class 12",
187 	"CBFC pause frames transmitted for class 13",
188 	"CBFC pause frames transmitted for class 14",
189 	"CBFC pause frames transmitted for class 15",
190 	"MAC control packets sent",
191 	"Total frames sent on the interface"
192 };
193 
194 enum cpt_eng_type {
195 	CPT_AE_TYPE = 1,
196 	CPT_SE_TYPE = 2,
197 	CPT_IE_TYPE = 3,
198 };
199 
200 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
201 						blk_addr, NDC_AF_CONST) & 0xFF)
202 
203 #define rvu_dbg_NULL NULL
204 #define rvu_dbg_open_NULL NULL
205 
206 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)	\
207 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
208 { \
209 	return single_open(file, rvu_dbg_##read_op, inode->i_private); \
210 } \
211 static const struct file_operations rvu_dbg_##name##_fops = { \
212 	.owner		= THIS_MODULE, \
213 	.open		= rvu_dbg_open_##name, \
214 	.read		= seq_read, \
215 	.write		= rvu_dbg_##write_op, \
216 	.llseek		= seq_lseek, \
217 	.release	= single_release, \
218 }
219 
220 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
221 static const struct file_operations rvu_dbg_##name##_fops = { \
222 	.owner = THIS_MODULE, \
223 	.open = simple_open, \
224 	.read = rvu_dbg_##read_op, \
225 	.write = rvu_dbg_##write_op \
226 }
227 
228 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
229 
230 #define LMT_MAPTBL_ENTRY_SIZE 16
231 /* Dump LMTST map table */
232 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
233 					       char __user *buffer,
234 					       size_t count, loff_t *ppos)
235 {
236 	struct rvu *rvu = filp->private_data;
237 	u64 lmt_addr, val, tbl_base;
238 	int pf, vf, num_vfs, hw_vfs;
239 	void __iomem *lmt_map_base;
240 	int buf_size = 10240;
241 	size_t off = 0;
242 	int index = 0;
243 	char *buf;
244 	int ret;
245 
246 	/* don't allow partial reads */
247 	if (*ppos != 0)
248 		return 0;
249 
250 	buf = kzalloc(buf_size, GFP_KERNEL);
251 	if (!buf)
252 		return -ENOMEM;
253 
254 	tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
255 
256 	lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
257 	if (!lmt_map_base) {
258 		dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
259 		kfree(buf);
260 		return false;
261 	}
262 
263 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
264 			  "\n\t\t\t\t\tLmtst Map Table Entries");
265 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
266 			  "\n\t\t\t\t\t=======================");
267 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
268 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
269 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
270 			  "Lmtline Base (word 0)\t\t");
271 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
272 			  "Lmt Map Entry (word 1)");
273 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
274 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
275 		off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d  \t\t\t",
276 				    pf);
277 
278 		index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
279 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
280 				 (tbl_base + index));
281 		lmt_addr = readq(lmt_map_base + index);
282 		off += scnprintf(&buf[off], buf_size - 1 - off,
283 				 " 0x%016llx\t\t", lmt_addr);
284 		index += 8;
285 		val = readq(lmt_map_base + index);
286 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
287 				 val);
288 		/* Reading num of VFs per PF */
289 		rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
290 		for (vf = 0; vf < num_vfs; vf++) {
291 			index = (pf * rvu->hw->total_vfs * 16) +
292 				((vf + 1)  * LMT_MAPTBL_ENTRY_SIZE);
293 			off += scnprintf(&buf[off], buf_size - 1 - off,
294 					    "PF%d:VF%d  \t\t", pf, vf);
295 			off += scnprintf(&buf[off], buf_size - 1 - off,
296 					 " 0x%llx\t\t", (tbl_base + index));
297 			lmt_addr = readq(lmt_map_base + index);
298 			off += scnprintf(&buf[off], buf_size - 1 - off,
299 					 " 0x%016llx\t\t", lmt_addr);
300 			index += 8;
301 			val = readq(lmt_map_base + index);
302 			off += scnprintf(&buf[off], buf_size - 1 - off,
303 					 " 0x%016llx\n", val);
304 		}
305 	}
306 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\n");
307 
308 	ret = min(off, count);
309 	if (copy_to_user(buffer, buf, ret))
310 		ret = -EFAULT;
311 	kfree(buf);
312 
313 	iounmap(lmt_map_base);
314 	if (ret < 0)
315 		return ret;
316 
317 	*ppos = ret;
318 	return ret;
319 }
320 
321 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
322 
323 static void get_lf_str_list(struct rvu_block block, int pcifunc,
324 			    char *lfs)
325 {
326 	int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
327 
328 	for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
329 		if (lf >= block.lf.max)
330 			break;
331 
332 		if (block.fn_map[lf] != pcifunc)
333 			continue;
334 
335 		if (lf == prev_lf + 1) {
336 			prev_lf = lf;
337 			seq = 1;
338 			continue;
339 		}
340 
341 		if (seq)
342 			len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
343 		else
344 			len += (len ? sprintf(lfs + len, ",%d", lf) :
345 				      sprintf(lfs + len, "%d", lf));
346 
347 		prev_lf = lf;
348 		seq = 0;
349 	}
350 
351 	if (seq)
352 		len += sprintf(lfs + len, "-%d", prev_lf);
353 
354 	lfs[len] = '\0';
355 }
356 
357 static int get_max_column_width(struct rvu *rvu)
358 {
359 	int index, pf, vf, lf_str_size = 12, buf_size = 256;
360 	struct rvu_block block;
361 	u16 pcifunc;
362 	char *buf;
363 
364 	buf = kzalloc(buf_size, GFP_KERNEL);
365 	if (!buf)
366 		return -ENOMEM;
367 
368 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
369 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
370 			pcifunc = pf << 10 | vf;
371 			if (!pcifunc)
372 				continue;
373 
374 			for (index = 0; index < BLK_COUNT; index++) {
375 				block = rvu->hw->block[index];
376 				if (!strlen(block.name))
377 					continue;
378 
379 				get_lf_str_list(block, pcifunc, buf);
380 				if (lf_str_size <= strlen(buf))
381 					lf_str_size = strlen(buf) + 1;
382 			}
383 		}
384 	}
385 
386 	kfree(buf);
387 	return lf_str_size;
388 }
389 
390 /* Dumps current provisioning status of all RVU block LFs */
391 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
392 					  char __user *buffer,
393 					  size_t count, loff_t *ppos)
394 {
395 	int index, off = 0, flag = 0, len = 0, i = 0;
396 	struct rvu *rvu = filp->private_data;
397 	int bytes_not_copied = 0;
398 	struct rvu_block block;
399 	int pf, vf, pcifunc;
400 	int buf_size = 2048;
401 	int lf_str_size;
402 	char *lfs;
403 	char *buf;
404 
405 	/* don't allow partial reads */
406 	if (*ppos != 0)
407 		return 0;
408 
409 	buf = kzalloc(buf_size, GFP_KERNEL);
410 	if (!buf)
411 		return -ENOMEM;
412 
413 	/* Get the maximum width of a column */
414 	lf_str_size = get_max_column_width(rvu);
415 
416 	lfs = kzalloc(lf_str_size, GFP_KERNEL);
417 	if (!lfs) {
418 		kfree(buf);
419 		return -ENOMEM;
420 	}
421 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
422 			  "pcifunc");
423 	for (index = 0; index < BLK_COUNT; index++)
424 		if (strlen(rvu->hw->block[index].name)) {
425 			off += scnprintf(&buf[off], buf_size - 1 - off,
426 					 "%-*s", lf_str_size,
427 					 rvu->hw->block[index].name);
428 		}
429 
430 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
431 	bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
432 	if (bytes_not_copied)
433 		goto out;
434 
435 	i++;
436 	*ppos += off;
437 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
438 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
439 			off = 0;
440 			flag = 0;
441 			pcifunc = pf << 10 | vf;
442 			if (!pcifunc)
443 				continue;
444 
445 			if (vf) {
446 				sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
447 				off = scnprintf(&buf[off],
448 						buf_size - 1 - off,
449 						"%-*s", lf_str_size, lfs);
450 			} else {
451 				sprintf(lfs, "PF%d", pf);
452 				off = scnprintf(&buf[off],
453 						buf_size - 1 - off,
454 						"%-*s", lf_str_size, lfs);
455 			}
456 
457 			for (index = 0; index < BLK_COUNT; index++) {
458 				block = rvu->hw->block[index];
459 				if (!strlen(block.name))
460 					continue;
461 				len = 0;
462 				lfs[len] = '\0';
463 				get_lf_str_list(block, pcifunc, lfs);
464 				if (strlen(lfs))
465 					flag = 1;
466 
467 				off += scnprintf(&buf[off], buf_size - 1 - off,
468 						 "%-*s", lf_str_size, lfs);
469 			}
470 			if (flag) {
471 				off +=	scnprintf(&buf[off],
472 						  buf_size - 1 - off, "\n");
473 				bytes_not_copied = copy_to_user(buffer +
474 								(i * off),
475 								buf, off);
476 				if (bytes_not_copied)
477 					goto out;
478 
479 				i++;
480 				*ppos += off;
481 			}
482 		}
483 	}
484 
485 out:
486 	kfree(lfs);
487 	kfree(buf);
488 	if (bytes_not_copied)
489 		return -EFAULT;
490 
491 	return *ppos;
492 }
493 
494 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
495 
496 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
497 {
498 	struct rvu *rvu = filp->private;
499 	struct pci_dev *pdev = NULL;
500 	struct mac_ops *mac_ops;
501 	char cgx[10], lmac[10];
502 	struct rvu_pfvf *pfvf;
503 	int pf, domain, blkid;
504 	u8 cgx_id, lmac_id;
505 	u16 pcifunc;
506 
507 	domain = 2;
508 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
509 	/* There can be no CGX devices at all */
510 	if (!mac_ops)
511 		return 0;
512 	seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
513 		   mac_ops->name);
514 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
515 		if (!is_pf_cgxmapped(rvu, pf))
516 			continue;
517 
518 		pdev =  pci_get_domain_bus_and_slot(domain, pf + 1, 0);
519 		if (!pdev)
520 			continue;
521 
522 		cgx[0] = 0;
523 		lmac[0] = 0;
524 		pcifunc = pf << 10;
525 		pfvf = rvu_get_pfvf(rvu, pcifunc);
526 
527 		if (pfvf->nix_blkaddr == BLKADDR_NIX0)
528 			blkid = 0;
529 		else
530 			blkid = 1;
531 
532 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
533 				    &lmac_id);
534 		sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
535 		sprintf(lmac, "LMAC%d", lmac_id);
536 		seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
537 			   dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
538 	}
539 	return 0;
540 }
541 
542 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
543 
544 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
545 				u16 *pcifunc)
546 {
547 	struct rvu_block *block;
548 	struct rvu_hwinfo *hw;
549 
550 	hw = rvu->hw;
551 	block = &hw->block[blkaddr];
552 
553 	if (lf < 0 || lf >= block->lf.max) {
554 		dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
555 			 block->lf.max - 1);
556 		return false;
557 	}
558 
559 	*pcifunc = block->fn_map[lf];
560 	if (!*pcifunc) {
561 		dev_warn(rvu->dev,
562 			 "This LF is not attached to any RVU PFFUNC\n");
563 		return false;
564 	}
565 	return true;
566 }
567 
568 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
569 {
570 	char *buf;
571 
572 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
573 	if (!buf)
574 		return;
575 
576 	if (!pfvf->aura_ctx) {
577 		seq_puts(m, "Aura context is not initialized\n");
578 	} else {
579 		bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
580 					pfvf->aura_ctx->qsize);
581 		seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
582 		seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
583 	}
584 
585 	if (!pfvf->pool_ctx) {
586 		seq_puts(m, "Pool context is not initialized\n");
587 	} else {
588 		bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
589 					pfvf->pool_ctx->qsize);
590 		seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
591 		seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
592 	}
593 	kfree(buf);
594 }
595 
596 /* The 'qsize' entry dumps current Aura/Pool context Qsize
597  * and each context's current enable/disable status in a bitmap.
598  */
599 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
600 				 int blktype)
601 {
602 	void (*print_qsize)(struct seq_file *filp,
603 			    struct rvu_pfvf *pfvf) = NULL;
604 	struct dentry *current_dir;
605 	struct rvu_pfvf *pfvf;
606 	struct rvu *rvu;
607 	int qsize_id;
608 	u16 pcifunc;
609 	int blkaddr;
610 
611 	rvu = filp->private;
612 	switch (blktype) {
613 	case BLKTYPE_NPA:
614 		qsize_id = rvu->rvu_dbg.npa_qsize_id;
615 		print_qsize = print_npa_qsize;
616 		break;
617 
618 	case BLKTYPE_NIX:
619 		qsize_id = rvu->rvu_dbg.nix_qsize_id;
620 		print_qsize = print_nix_qsize;
621 		break;
622 
623 	default:
624 		return -EINVAL;
625 	}
626 
627 	if (blktype == BLKTYPE_NPA) {
628 		blkaddr = BLKADDR_NPA;
629 	} else {
630 		current_dir = filp->file->f_path.dentry->d_parent;
631 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
632 				   BLKADDR_NIX1 : BLKADDR_NIX0);
633 	}
634 
635 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
636 		return -EINVAL;
637 
638 	pfvf = rvu_get_pfvf(rvu, pcifunc);
639 	print_qsize(filp, pfvf);
640 
641 	return 0;
642 }
643 
644 static ssize_t rvu_dbg_qsize_write(struct file *filp,
645 				   const char __user *buffer, size_t count,
646 				   loff_t *ppos, int blktype)
647 {
648 	char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
649 	struct seq_file *seqfile = filp->private_data;
650 	char *cmd_buf, *cmd_buf_tmp, *subtoken;
651 	struct rvu *rvu = seqfile->private;
652 	struct dentry *current_dir;
653 	int blkaddr;
654 	u16 pcifunc;
655 	int ret, lf;
656 
657 	cmd_buf = memdup_user(buffer, count + 1);
658 	if (IS_ERR(cmd_buf))
659 		return -ENOMEM;
660 
661 	cmd_buf[count] = '\0';
662 
663 	cmd_buf_tmp = strchr(cmd_buf, '\n');
664 	if (cmd_buf_tmp) {
665 		*cmd_buf_tmp = '\0';
666 		count = cmd_buf_tmp - cmd_buf + 1;
667 	}
668 
669 	cmd_buf_tmp = cmd_buf;
670 	subtoken = strsep(&cmd_buf, " ");
671 	ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
672 	if (cmd_buf)
673 		ret = -EINVAL;
674 
675 	if (ret < 0 || !strncmp(subtoken, "help", 4)) {
676 		dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
677 		goto qsize_write_done;
678 	}
679 
680 	if (blktype == BLKTYPE_NPA) {
681 		blkaddr = BLKADDR_NPA;
682 	} else {
683 		current_dir = filp->f_path.dentry->d_parent;
684 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
685 				   BLKADDR_NIX1 : BLKADDR_NIX0);
686 	}
687 
688 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
689 		ret = -EINVAL;
690 		goto qsize_write_done;
691 	}
692 	if (blktype  == BLKTYPE_NPA)
693 		rvu->rvu_dbg.npa_qsize_id = lf;
694 	else
695 		rvu->rvu_dbg.nix_qsize_id = lf;
696 
697 qsize_write_done:
698 	kfree(cmd_buf_tmp);
699 	return ret ? ret : count;
700 }
701 
702 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
703 				       const char __user *buffer,
704 				       size_t count, loff_t *ppos)
705 {
706 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
707 					    BLKTYPE_NPA);
708 }
709 
710 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
711 {
712 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
713 }
714 
715 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
716 
717 /* Dumps given NPA Aura's context */
718 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
719 {
720 	struct npa_aura_s *aura = &rsp->aura;
721 	struct rvu *rvu = m->private;
722 
723 	seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
724 
725 	seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
726 		   aura->ena, aura->pool_caching);
727 	seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
728 		   aura->pool_way_mask, aura->avg_con);
729 	seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
730 		   aura->pool_drop_ena, aura->aura_drop_ena);
731 	seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
732 		   aura->bp_ena, aura->aura_drop);
733 	seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
734 		   aura->shift, aura->avg_level);
735 
736 	seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
737 		   (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
738 
739 	seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
740 		   (u64)aura->limit, aura->bp, aura->fc_ena);
741 
742 	if (!is_rvu_otx2(rvu))
743 		seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
744 	seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
745 		   aura->fc_up_crossing, aura->fc_stype);
746 	seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
747 
748 	seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
749 
750 	seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
751 		   aura->pool_drop, aura->update_time);
752 	seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
753 		   aura->err_int, aura->err_int_ena);
754 	seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
755 		   aura->thresh_int, aura->thresh_int_ena);
756 	seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
757 		   aura->thresh_up, aura->thresh_qint_idx);
758 	seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
759 
760 	seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
761 	if (!is_rvu_otx2(rvu))
762 		seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
763 }
764 
765 /* Dumps given NPA Pool's context */
766 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
767 {
768 	struct npa_pool_s *pool = &rsp->pool;
769 	struct rvu *rvu = m->private;
770 
771 	seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
772 
773 	seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
774 		   pool->ena, pool->nat_align);
775 	seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
776 		   pool->stack_caching, pool->stack_way_mask);
777 	seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
778 		   pool->buf_offset, pool->buf_size);
779 
780 	seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
781 		   pool->stack_max_pages, pool->stack_pages);
782 
783 	seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
784 
785 	seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
786 		   pool->stack_offset, pool->shift, pool->avg_level);
787 	seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
788 		   pool->avg_con, pool->fc_ena, pool->fc_stype);
789 	seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
790 		   pool->fc_hyst_bits, pool->fc_up_crossing);
791 	if (!is_rvu_otx2(rvu))
792 		seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
793 	seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
794 
795 	seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
796 
797 	seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
798 
799 	seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
800 
801 	seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
802 		   pool->err_int, pool->err_int_ena);
803 	seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
804 	seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
805 		   pool->thresh_int_ena, pool->thresh_up);
806 	seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
807 		   pool->thresh_qint_idx, pool->err_qint_idx);
808 	if (!is_rvu_otx2(rvu))
809 		seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
810 }
811 
812 /* Reads aura/pool's ctx from admin queue */
813 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
814 {
815 	void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
816 	struct npa_aq_enq_req aq_req;
817 	struct npa_aq_enq_rsp rsp;
818 	struct rvu_pfvf *pfvf;
819 	int aura, rc, max_id;
820 	int npalf, id, all;
821 	struct rvu *rvu;
822 	u16 pcifunc;
823 
824 	rvu = m->private;
825 
826 	switch (ctype) {
827 	case NPA_AQ_CTYPE_AURA:
828 		npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
829 		id = rvu->rvu_dbg.npa_aura_ctx.id;
830 		all = rvu->rvu_dbg.npa_aura_ctx.all;
831 		break;
832 
833 	case NPA_AQ_CTYPE_POOL:
834 		npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
835 		id = rvu->rvu_dbg.npa_pool_ctx.id;
836 		all = rvu->rvu_dbg.npa_pool_ctx.all;
837 		break;
838 	default:
839 		return -EINVAL;
840 	}
841 
842 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
843 		return -EINVAL;
844 
845 	pfvf = rvu_get_pfvf(rvu, pcifunc);
846 	if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
847 		seq_puts(m, "Aura context is not initialized\n");
848 		return -EINVAL;
849 	} else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
850 		seq_puts(m, "Pool context is not initialized\n");
851 		return -EINVAL;
852 	}
853 
854 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
855 	aq_req.hdr.pcifunc = pcifunc;
856 	aq_req.ctype = ctype;
857 	aq_req.op = NPA_AQ_INSTOP_READ;
858 	if (ctype == NPA_AQ_CTYPE_AURA) {
859 		max_id = pfvf->aura_ctx->qsize;
860 		print_npa_ctx = print_npa_aura_ctx;
861 	} else {
862 		max_id = pfvf->pool_ctx->qsize;
863 		print_npa_ctx = print_npa_pool_ctx;
864 	}
865 
866 	if (id < 0 || id >= max_id) {
867 		seq_printf(m, "Invalid %s, valid range is 0-%d\n",
868 			   (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
869 			max_id - 1);
870 		return -EINVAL;
871 	}
872 
873 	if (all)
874 		id = 0;
875 	else
876 		max_id = id + 1;
877 
878 	for (aura = id; aura < max_id; aura++) {
879 		aq_req.aura_id = aura;
880 		seq_printf(m, "======%s : %d=======\n",
881 			   (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
882 			aq_req.aura_id);
883 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
884 		if (rc) {
885 			seq_puts(m, "Failed to read context\n");
886 			return -EINVAL;
887 		}
888 		print_npa_ctx(m, &rsp);
889 	}
890 	return 0;
891 }
892 
893 static int write_npa_ctx(struct rvu *rvu, bool all,
894 			 int npalf, int id, int ctype)
895 {
896 	struct rvu_pfvf *pfvf;
897 	int max_id = 0;
898 	u16 pcifunc;
899 
900 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
901 		return -EINVAL;
902 
903 	pfvf = rvu_get_pfvf(rvu, pcifunc);
904 
905 	if (ctype == NPA_AQ_CTYPE_AURA) {
906 		if (!pfvf->aura_ctx) {
907 			dev_warn(rvu->dev, "Aura context is not initialized\n");
908 			return -EINVAL;
909 		}
910 		max_id = pfvf->aura_ctx->qsize;
911 	} else if (ctype == NPA_AQ_CTYPE_POOL) {
912 		if (!pfvf->pool_ctx) {
913 			dev_warn(rvu->dev, "Pool context is not initialized\n");
914 			return -EINVAL;
915 		}
916 		max_id = pfvf->pool_ctx->qsize;
917 	}
918 
919 	if (id < 0 || id >= max_id) {
920 		dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
921 			 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
922 			max_id - 1);
923 		return -EINVAL;
924 	}
925 
926 	switch (ctype) {
927 	case NPA_AQ_CTYPE_AURA:
928 		rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
929 		rvu->rvu_dbg.npa_aura_ctx.id = id;
930 		rvu->rvu_dbg.npa_aura_ctx.all = all;
931 		break;
932 
933 	case NPA_AQ_CTYPE_POOL:
934 		rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
935 		rvu->rvu_dbg.npa_pool_ctx.id = id;
936 		rvu->rvu_dbg.npa_pool_ctx.all = all;
937 		break;
938 	default:
939 		return -EINVAL;
940 	}
941 	return 0;
942 }
943 
944 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
945 				const char __user *buffer, int *npalf,
946 				int *id, bool *all)
947 {
948 	int bytes_not_copied;
949 	char *cmd_buf_tmp;
950 	char *subtoken;
951 	int ret;
952 
953 	bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
954 	if (bytes_not_copied)
955 		return -EFAULT;
956 
957 	cmd_buf[*count] = '\0';
958 	cmd_buf_tmp = strchr(cmd_buf, '\n');
959 
960 	if (cmd_buf_tmp) {
961 		*cmd_buf_tmp = '\0';
962 		*count = cmd_buf_tmp - cmd_buf + 1;
963 	}
964 
965 	subtoken = strsep(&cmd_buf, " ");
966 	ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
967 	if (ret < 0)
968 		return ret;
969 	subtoken = strsep(&cmd_buf, " ");
970 	if (subtoken && strcmp(subtoken, "all") == 0) {
971 		*all = true;
972 	} else {
973 		ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
974 		if (ret < 0)
975 			return ret;
976 	}
977 	if (cmd_buf)
978 		return -EINVAL;
979 	return ret;
980 }
981 
982 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
983 				     const char __user *buffer,
984 				     size_t count, loff_t *ppos, int ctype)
985 {
986 	char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
987 					"aura" : "pool";
988 	struct seq_file *seqfp = filp->private_data;
989 	struct rvu *rvu = seqfp->private;
990 	int npalf, id = 0, ret;
991 	bool all = false;
992 
993 	if ((*ppos != 0) || !count)
994 		return -EINVAL;
995 
996 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
997 	if (!cmd_buf)
998 		return count;
999 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1000 				   &npalf, &id, &all);
1001 	if (ret < 0) {
1002 		dev_info(rvu->dev,
1003 			 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1004 			 ctype_string, ctype_string);
1005 		goto done;
1006 	} else {
1007 		ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1008 	}
1009 done:
1010 	kfree(cmd_buf);
1011 	return ret ? ret : count;
1012 }
1013 
1014 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1015 					  const char __user *buffer,
1016 					  size_t count, loff_t *ppos)
1017 {
1018 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1019 				     NPA_AQ_CTYPE_AURA);
1020 }
1021 
1022 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1023 {
1024 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1025 }
1026 
1027 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1028 
1029 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1030 					  const char __user *buffer,
1031 					  size_t count, loff_t *ppos)
1032 {
1033 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1034 				     NPA_AQ_CTYPE_POOL);
1035 }
1036 
1037 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1038 {
1039 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1040 }
1041 
1042 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1043 
1044 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1045 			    int ctype, int transaction)
1046 {
1047 	u64 req, out_req, lat, cant_alloc;
1048 	struct nix_hw *nix_hw;
1049 	struct rvu *rvu;
1050 	int port;
1051 
1052 	if (blk_addr == BLKADDR_NDC_NPA0) {
1053 		rvu = s->private;
1054 	} else {
1055 		nix_hw = s->private;
1056 		rvu = nix_hw->rvu;
1057 	}
1058 
1059 	for (port = 0; port < NDC_MAX_PORT; port++) {
1060 		req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1061 						(port, ctype, transaction));
1062 		lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1063 						(port, ctype, transaction));
1064 		out_req = rvu_read64(rvu, blk_addr,
1065 				     NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1066 				     (port, ctype, transaction));
1067 		cant_alloc = rvu_read64(rvu, blk_addr,
1068 					NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1069 					(port, transaction));
1070 		seq_printf(s, "\nPort:%d\n", port);
1071 		seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1072 		seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1073 		seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1074 		seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1075 		seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1076 	}
1077 }
1078 
1079 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1080 {
1081 	seq_puts(s, "\n***** CACHE mode read stats *****\n");
1082 	ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1083 	seq_puts(s, "\n***** CACHE mode write stats *****\n");
1084 	ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1085 	seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1086 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1087 	seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1088 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1089 	return 0;
1090 }
1091 
1092 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1093 {
1094 	return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1095 }
1096 
1097 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1098 
1099 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1100 {
1101 	struct nix_hw *nix_hw;
1102 	struct rvu *rvu;
1103 	int bank, max_bank;
1104 
1105 	if (blk_addr == BLKADDR_NDC_NPA0) {
1106 		rvu = s->private;
1107 	} else {
1108 		nix_hw = s->private;
1109 		rvu = nix_hw->rvu;
1110 	}
1111 
1112 	max_bank = NDC_MAX_BANK(rvu, blk_addr);
1113 	for (bank = 0; bank < max_bank; bank++) {
1114 		seq_printf(s, "BANK:%d\n", bank);
1115 		seq_printf(s, "\tHits:\t%lld\n",
1116 			   (u64)rvu_read64(rvu, blk_addr,
1117 			   NDC_AF_BANKX_HIT_PC(bank)));
1118 		seq_printf(s, "\tMiss:\t%lld\n",
1119 			   (u64)rvu_read64(rvu, blk_addr,
1120 			    NDC_AF_BANKX_MISS_PC(bank)));
1121 	}
1122 	return 0;
1123 }
1124 
1125 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1126 {
1127 	struct nix_hw *nix_hw = filp->private;
1128 	int blkaddr = 0;
1129 	int ndc_idx = 0;
1130 
1131 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1132 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1133 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1134 
1135 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1136 }
1137 
1138 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1139 
1140 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1141 {
1142 	struct nix_hw *nix_hw = filp->private;
1143 	int blkaddr = 0;
1144 	int ndc_idx = 0;
1145 
1146 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1147 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1148 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1149 
1150 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1151 }
1152 
1153 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1154 
1155 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1156 					     void *unused)
1157 {
1158 	return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1159 }
1160 
1161 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1162 
1163 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1164 						void *unused)
1165 {
1166 	struct nix_hw *nix_hw = filp->private;
1167 	int ndc_idx = NPA0_U;
1168 	int blkaddr = 0;
1169 
1170 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1171 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1172 
1173 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1174 }
1175 
1176 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1177 
1178 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1179 						void *unused)
1180 {
1181 	struct nix_hw *nix_hw = filp->private;
1182 	int ndc_idx = NPA0_U;
1183 	int blkaddr = 0;
1184 
1185 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1186 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1187 
1188 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1189 }
1190 
1191 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1192 
1193 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1194 				   struct nix_cn10k_sq_ctx_s *sq_ctx)
1195 {
1196 	seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1197 		   sq_ctx->ena, sq_ctx->qint_idx);
1198 	seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1199 		   sq_ctx->substream, sq_ctx->sdp_mcast);
1200 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1201 		   sq_ctx->cq, sq_ctx->sqe_way_mask);
1202 
1203 	seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1204 		   sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1205 	seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1206 		   sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1207 	seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1208 		   sq_ctx->default_chan, sq_ctx->sqb_count);
1209 
1210 	seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1211 	seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1212 	seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1213 		   sq_ctx->sqb_aura, sq_ctx->sq_int);
1214 	seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1215 		   sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1216 
1217 	seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1218 		   sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1219 	seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1220 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1221 	seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1222 		   sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1223 	seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1224 		   sq_ctx->tail_offset, sq_ctx->smenq_offset);
1225 	seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1226 		   sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1227 
1228 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1229 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1230 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1231 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1232 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1233 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1234 		   sq_ctx->smenq_next_sqb);
1235 
1236 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1237 
1238 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1239 	seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1240 		   sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1241 	seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1242 		   sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1243 	seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1244 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1245 
1246 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1247 		   (u64)sq_ctx->scm_lso_rem);
1248 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1249 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1250 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1251 		   (u64)sq_ctx->dropped_octs);
1252 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1253 		   (u64)sq_ctx->dropped_pkts);
1254 }
1255 
1256 /* Dumps given nix_sq's context */
1257 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1258 {
1259 	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1260 	struct nix_hw *nix_hw = m->private;
1261 	struct rvu *rvu = nix_hw->rvu;
1262 
1263 	if (!is_rvu_otx2(rvu)) {
1264 		print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1265 		return;
1266 	}
1267 	seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1268 		   sq_ctx->sqe_way_mask, sq_ctx->cq);
1269 	seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1270 		   sq_ctx->sdp_mcast, sq_ctx->substream);
1271 	seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1272 		   sq_ctx->qint_idx, sq_ctx->ena);
1273 
1274 	seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1275 		   sq_ctx->sqb_count, sq_ctx->default_chan);
1276 	seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1277 		   sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1278 	seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1279 		   sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1280 
1281 	seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1282 		   sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1283 	seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1284 		   sq_ctx->sq_int, sq_ctx->sqb_aura);
1285 	seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1286 
1287 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1288 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1289 	seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1290 		   sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1291 	seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1292 		   sq_ctx->smenq_offset, sq_ctx->tail_offset);
1293 	seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1294 		   sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1295 	seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1296 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1297 	seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1298 		   sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1299 
1300 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1301 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1302 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1303 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1304 		   sq_ctx->smenq_next_sqb);
1305 
1306 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1307 
1308 	seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1309 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1310 	seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1311 		   sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1312 	seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1313 		   sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1314 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1315 
1316 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1317 		   (u64)sq_ctx->scm_lso_rem);
1318 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1319 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1320 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1321 		   (u64)sq_ctx->dropped_octs);
1322 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1323 		   (u64)sq_ctx->dropped_pkts);
1324 }
1325 
1326 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1327 				   struct nix_cn10k_rq_ctx_s *rq_ctx)
1328 {
1329 	seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1330 		   rq_ctx->ena, rq_ctx->sso_ena);
1331 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1332 		   rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1333 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1334 		   rq_ctx->cq, rq_ctx->lenerr_dis);
1335 	seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1336 		   rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1337 	seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1338 		   rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1339 	seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1340 		   rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1341 	seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1342 
1343 	seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1344 		   rq_ctx->spb_aura, rq_ctx->lpb_aura);
1345 	seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1346 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1347 		   rq_ctx->sso_grp, rq_ctx->sso_tt);
1348 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1349 		   rq_ctx->pb_caching, rq_ctx->wqe_caching);
1350 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1351 		   rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1352 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1353 		   rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1354 	seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1355 		   rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1356 
1357 	seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1358 	seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1359 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1360 	seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1361 		   rq_ctx->wqe_skip, rq_ctx->spb_ena);
1362 	seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1363 		   rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1364 	seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1365 		   rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1366 	seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1367 		   rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1368 
1369 	seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1370 		   rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1371 	seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1372 		   rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1373 	seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1374 		   rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1375 	seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1376 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1377 
1378 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1379 		   rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1380 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1381 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1382 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1383 		   rq_ctx->rq_int, rq_ctx->rq_int_ena);
1384 	seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1385 
1386 	seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1387 		   rq_ctx->ltag, rq_ctx->good_utag);
1388 	seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1389 		   rq_ctx->bad_utag, rq_ctx->flow_tagw);
1390 	seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1391 		   rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1392 	seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1393 		   rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1394 	seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1395 
1396 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1397 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1398 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1399 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1400 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1401 }
1402 
1403 /* Dumps given nix_rq's context */
1404 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1405 {
1406 	struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1407 	struct nix_hw *nix_hw = m->private;
1408 	struct rvu *rvu = nix_hw->rvu;
1409 
1410 	if (!is_rvu_otx2(rvu)) {
1411 		print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1412 		return;
1413 	}
1414 
1415 	seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1416 		   rq_ctx->wqe_aura, rq_ctx->substream);
1417 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1418 		   rq_ctx->cq, rq_ctx->ena_wqwd);
1419 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1420 		   rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1421 	seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1422 
1423 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1424 		   rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1425 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1426 		   rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1427 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1428 		   rq_ctx->pb_caching, rq_ctx->sso_tt);
1429 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1430 		   rq_ctx->sso_grp, rq_ctx->lpb_aura);
1431 	seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1432 
1433 	seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1434 		   rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1435 	seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1436 		   rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1437 	seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1438 		   rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1439 	seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1440 		   rq_ctx->spb_ena, rq_ctx->wqe_skip);
1441 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1442 
1443 	seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1444 		   rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1445 	seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1446 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1447 	seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1448 		   rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1449 	seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1450 		   rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1451 
1452 	seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1453 		   rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1454 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1455 		   rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1456 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1457 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1458 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1459 
1460 	seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1461 		   rq_ctx->flow_tagw, rq_ctx->bad_utag);
1462 	seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1463 		   rq_ctx->good_utag, rq_ctx->ltag);
1464 
1465 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1466 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1467 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1468 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1469 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1470 }
1471 
1472 /* Dumps given nix_cq's context */
1473 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1474 {
1475 	struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1476 
1477 	seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1478 
1479 	seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1480 	seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1481 		   cq_ctx->avg_con, cq_ctx->cint_idx);
1482 	seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1483 		   cq_ctx->cq_err, cq_ctx->qint_idx);
1484 	seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1485 		   cq_ctx->bpid, cq_ctx->bp_ena);
1486 
1487 	seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1488 		   cq_ctx->update_time, cq_ctx->avg_level);
1489 	seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1490 		   cq_ctx->head, cq_ctx->tail);
1491 
1492 	seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1493 		   cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1494 	seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1495 		   cq_ctx->qsize, cq_ctx->caching);
1496 	seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1497 		   cq_ctx->substream, cq_ctx->ena);
1498 	seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1499 		   cq_ctx->drop_ena, cq_ctx->drop);
1500 	seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1501 }
1502 
1503 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1504 					 void *unused, int ctype)
1505 {
1506 	void (*print_nix_ctx)(struct seq_file *filp,
1507 			      struct nix_aq_enq_rsp *rsp) = NULL;
1508 	struct nix_hw *nix_hw = filp->private;
1509 	struct rvu *rvu = nix_hw->rvu;
1510 	struct nix_aq_enq_req aq_req;
1511 	struct nix_aq_enq_rsp rsp;
1512 	char *ctype_string = NULL;
1513 	int qidx, rc, max_id = 0;
1514 	struct rvu_pfvf *pfvf;
1515 	int nixlf, id, all;
1516 	u16 pcifunc;
1517 
1518 	switch (ctype) {
1519 	case NIX_AQ_CTYPE_CQ:
1520 		nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1521 		id = rvu->rvu_dbg.nix_cq_ctx.id;
1522 		all = rvu->rvu_dbg.nix_cq_ctx.all;
1523 		break;
1524 
1525 	case NIX_AQ_CTYPE_SQ:
1526 		nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1527 		id = rvu->rvu_dbg.nix_sq_ctx.id;
1528 		all = rvu->rvu_dbg.nix_sq_ctx.all;
1529 		break;
1530 
1531 	case NIX_AQ_CTYPE_RQ:
1532 		nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1533 		id = rvu->rvu_dbg.nix_rq_ctx.id;
1534 		all = rvu->rvu_dbg.nix_rq_ctx.all;
1535 		break;
1536 
1537 	default:
1538 		return -EINVAL;
1539 	}
1540 
1541 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1542 		return -EINVAL;
1543 
1544 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1545 	if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1546 		seq_puts(filp, "SQ context is not initialized\n");
1547 		return -EINVAL;
1548 	} else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1549 		seq_puts(filp, "RQ context is not initialized\n");
1550 		return -EINVAL;
1551 	} else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1552 		seq_puts(filp, "CQ context is not initialized\n");
1553 		return -EINVAL;
1554 	}
1555 
1556 	if (ctype == NIX_AQ_CTYPE_SQ) {
1557 		max_id = pfvf->sq_ctx->qsize;
1558 		ctype_string = "sq";
1559 		print_nix_ctx = print_nix_sq_ctx;
1560 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1561 		max_id = pfvf->rq_ctx->qsize;
1562 		ctype_string = "rq";
1563 		print_nix_ctx = print_nix_rq_ctx;
1564 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1565 		max_id = pfvf->cq_ctx->qsize;
1566 		ctype_string = "cq";
1567 		print_nix_ctx = print_nix_cq_ctx;
1568 	}
1569 
1570 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1571 	aq_req.hdr.pcifunc = pcifunc;
1572 	aq_req.ctype = ctype;
1573 	aq_req.op = NIX_AQ_INSTOP_READ;
1574 	if (all)
1575 		id = 0;
1576 	else
1577 		max_id = id + 1;
1578 	for (qidx = id; qidx < max_id; qidx++) {
1579 		aq_req.qidx = qidx;
1580 		seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1581 			   ctype_string, nixlf, aq_req.qidx);
1582 		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1583 		if (rc) {
1584 			seq_puts(filp, "Failed to read the context\n");
1585 			return -EINVAL;
1586 		}
1587 		print_nix_ctx(filp, &rsp);
1588 	}
1589 	return 0;
1590 }
1591 
1592 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1593 			       int id, int ctype, char *ctype_string,
1594 			       struct seq_file *m)
1595 {
1596 	struct nix_hw *nix_hw = m->private;
1597 	struct rvu_pfvf *pfvf;
1598 	int max_id = 0;
1599 	u16 pcifunc;
1600 
1601 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1602 		return -EINVAL;
1603 
1604 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1605 
1606 	if (ctype == NIX_AQ_CTYPE_SQ) {
1607 		if (!pfvf->sq_ctx) {
1608 			dev_warn(rvu->dev, "SQ context is not initialized\n");
1609 			return -EINVAL;
1610 		}
1611 		max_id = pfvf->sq_ctx->qsize;
1612 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1613 		if (!pfvf->rq_ctx) {
1614 			dev_warn(rvu->dev, "RQ context is not initialized\n");
1615 			return -EINVAL;
1616 		}
1617 		max_id = pfvf->rq_ctx->qsize;
1618 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1619 		if (!pfvf->cq_ctx) {
1620 			dev_warn(rvu->dev, "CQ context is not initialized\n");
1621 			return -EINVAL;
1622 		}
1623 		max_id = pfvf->cq_ctx->qsize;
1624 	}
1625 
1626 	if (id < 0 || id >= max_id) {
1627 		dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1628 			 ctype_string, max_id - 1);
1629 		return -EINVAL;
1630 	}
1631 	switch (ctype) {
1632 	case NIX_AQ_CTYPE_CQ:
1633 		rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1634 		rvu->rvu_dbg.nix_cq_ctx.id = id;
1635 		rvu->rvu_dbg.nix_cq_ctx.all = all;
1636 		break;
1637 
1638 	case NIX_AQ_CTYPE_SQ:
1639 		rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1640 		rvu->rvu_dbg.nix_sq_ctx.id = id;
1641 		rvu->rvu_dbg.nix_sq_ctx.all = all;
1642 		break;
1643 
1644 	case NIX_AQ_CTYPE_RQ:
1645 		rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1646 		rvu->rvu_dbg.nix_rq_ctx.id = id;
1647 		rvu->rvu_dbg.nix_rq_ctx.all = all;
1648 		break;
1649 	default:
1650 		return -EINVAL;
1651 	}
1652 	return 0;
1653 }
1654 
1655 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
1656 					   const char __user *buffer,
1657 					   size_t count, loff_t *ppos,
1658 					   int ctype)
1659 {
1660 	struct seq_file *m = filp->private_data;
1661 	struct nix_hw *nix_hw = m->private;
1662 	struct rvu *rvu = nix_hw->rvu;
1663 	char *cmd_buf, *ctype_string;
1664 	int nixlf, id = 0, ret;
1665 	bool all = false;
1666 
1667 	if ((*ppos != 0) || !count)
1668 		return -EINVAL;
1669 
1670 	switch (ctype) {
1671 	case NIX_AQ_CTYPE_SQ:
1672 		ctype_string = "sq";
1673 		break;
1674 	case NIX_AQ_CTYPE_RQ:
1675 		ctype_string = "rq";
1676 		break;
1677 	case NIX_AQ_CTYPE_CQ:
1678 		ctype_string = "cq";
1679 		break;
1680 	default:
1681 		return -EINVAL;
1682 	}
1683 
1684 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1685 
1686 	if (!cmd_buf)
1687 		return count;
1688 
1689 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1690 				   &nixlf, &id, &all);
1691 	if (ret < 0) {
1692 		dev_info(rvu->dev,
1693 			 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
1694 			 ctype_string, ctype_string);
1695 		goto done;
1696 	} else {
1697 		ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
1698 					  ctype_string, m);
1699 	}
1700 done:
1701 	kfree(cmd_buf);
1702 	return ret ? ret : count;
1703 }
1704 
1705 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
1706 					const char __user *buffer,
1707 					size_t count, loff_t *ppos)
1708 {
1709 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1710 					    NIX_AQ_CTYPE_SQ);
1711 }
1712 
1713 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
1714 {
1715 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
1716 }
1717 
1718 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
1719 
1720 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
1721 					const char __user *buffer,
1722 					size_t count, loff_t *ppos)
1723 {
1724 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1725 					    NIX_AQ_CTYPE_RQ);
1726 }
1727 
1728 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
1729 {
1730 	return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
1731 }
1732 
1733 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
1734 
1735 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
1736 					const char __user *buffer,
1737 					size_t count, loff_t *ppos)
1738 {
1739 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1740 					    NIX_AQ_CTYPE_CQ);
1741 }
1742 
1743 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1744 {
1745 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1746 }
1747 
1748 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1749 
1750 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1751 				 unsigned long *bmap, char *qtype)
1752 {
1753 	char *buf;
1754 
1755 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1756 	if (!buf)
1757 		return;
1758 
1759 	bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1760 	seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1761 	seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1762 		   qtype, buf);
1763 	kfree(buf);
1764 }
1765 
1766 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1767 {
1768 	if (!pfvf->cq_ctx)
1769 		seq_puts(filp, "cq context is not initialized\n");
1770 	else
1771 		print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1772 				     "cq");
1773 
1774 	if (!pfvf->rq_ctx)
1775 		seq_puts(filp, "rq context is not initialized\n");
1776 	else
1777 		print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1778 				     "rq");
1779 
1780 	if (!pfvf->sq_ctx)
1781 		seq_puts(filp, "sq context is not initialized\n");
1782 	else
1783 		print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1784 				     "sq");
1785 }
1786 
1787 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1788 				       const char __user *buffer,
1789 				       size_t count, loff_t *ppos)
1790 {
1791 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1792 				   BLKTYPE_NIX);
1793 }
1794 
1795 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1796 {
1797 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1798 }
1799 
1800 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1801 
1802 static void print_band_prof_ctx(struct seq_file *m,
1803 				struct nix_bandprof_s *prof)
1804 {
1805 	char *str;
1806 
1807 	switch (prof->pc_mode) {
1808 	case NIX_RX_PC_MODE_VLAN:
1809 		str = "VLAN";
1810 		break;
1811 	case NIX_RX_PC_MODE_DSCP:
1812 		str = "DSCP";
1813 		break;
1814 	case NIX_RX_PC_MODE_GEN:
1815 		str = "Generic";
1816 		break;
1817 	case NIX_RX_PC_MODE_RSVD:
1818 		str = "Reserved";
1819 		break;
1820 	}
1821 	seq_printf(m, "W0: pc_mode\t\t%s\n", str);
1822 	str = (prof->icolor == 3) ? "Color blind" :
1823 		(prof->icolor == 0) ? "Green" :
1824 		(prof->icolor == 1) ? "Yellow" : "Red";
1825 	seq_printf(m, "W0: icolor\t\t%s\n", str);
1826 	seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
1827 	seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
1828 	seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
1829 	seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
1830 	seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
1831 	seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
1832 	seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
1833 	seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
1834 
1835 	seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
1836 	str = (prof->lmode == 0) ? "byte" : "packet";
1837 	seq_printf(m, "W1: lmode\t\t%s\n", str);
1838 	seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
1839 	seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
1840 	seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
1841 	seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
1842 	str = (prof->gc_action == 0) ? "PASS" :
1843 		(prof->gc_action == 1) ? "DROP" : "RED";
1844 	seq_printf(m, "W1: gc_action\t\t%s\n", str);
1845 	str = (prof->yc_action == 0) ? "PASS" :
1846 		(prof->yc_action == 1) ? "DROP" : "RED";
1847 	seq_printf(m, "W1: yc_action\t\t%s\n", str);
1848 	str = (prof->rc_action == 0) ? "PASS" :
1849 		(prof->rc_action == 1) ? "DROP" : "RED";
1850 	seq_printf(m, "W1: rc_action\t\t%s\n", str);
1851 	seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
1852 	seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
1853 	seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
1854 
1855 	seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
1856 	seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
1857 	seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
1858 	seq_printf(m, "W4: green_pkt_pass\t%lld\n",
1859 		   (u64)prof->green_pkt_pass);
1860 	seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
1861 		   (u64)prof->yellow_pkt_pass);
1862 	seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
1863 	seq_printf(m, "W7: green_octs_pass\t%lld\n",
1864 		   (u64)prof->green_octs_pass);
1865 	seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
1866 		   (u64)prof->yellow_octs_pass);
1867 	seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
1868 	seq_printf(m, "W10: green_pkt_drop\t%lld\n",
1869 		   (u64)prof->green_pkt_drop);
1870 	seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
1871 		   (u64)prof->yellow_pkt_drop);
1872 	seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
1873 	seq_printf(m, "W13: green_octs_drop\t%lld\n",
1874 		   (u64)prof->green_octs_drop);
1875 	seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
1876 		   (u64)prof->yellow_octs_drop);
1877 	seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
1878 	seq_puts(m, "==============================\n");
1879 }
1880 
1881 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
1882 {
1883 	struct nix_hw *nix_hw = m->private;
1884 	struct nix_cn10k_aq_enq_req aq_req;
1885 	struct nix_cn10k_aq_enq_rsp aq_rsp;
1886 	struct rvu *rvu = nix_hw->rvu;
1887 	struct nix_ipolicer *ipolicer;
1888 	int layer, prof_idx, idx, rc;
1889 	u16 pcifunc;
1890 	char *str;
1891 
1892 	/* Ingress policers do not exist on all platforms */
1893 	if (!nix_hw->ipolicer)
1894 		return 0;
1895 
1896 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1897 		if (layer == BAND_PROF_INVAL_LAYER)
1898 			continue;
1899 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1900 			(layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
1901 
1902 		seq_printf(m, "\n%s bandwidth profiles\n", str);
1903 		seq_puts(m, "=======================\n");
1904 
1905 		ipolicer = &nix_hw->ipolicer[layer];
1906 
1907 		for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
1908 			if (is_rsrc_free(&ipolicer->band_prof, idx))
1909 				continue;
1910 
1911 			prof_idx = (idx & 0x3FFF) | (layer << 14);
1912 			rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1913 						 0x00, NIX_AQ_CTYPE_BANDPROF,
1914 						 prof_idx);
1915 			if (rc) {
1916 				dev_err(rvu->dev,
1917 					"%s: Failed to fetch context of %s profile %d, err %d\n",
1918 					__func__, str, idx, rc);
1919 				return 0;
1920 			}
1921 			seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
1922 			pcifunc = ipolicer->pfvf_map[idx];
1923 			if (!(pcifunc & RVU_PFVF_FUNC_MASK))
1924 				seq_printf(m, "Allocated to :: PF %d\n",
1925 					   rvu_get_pf(pcifunc));
1926 			else
1927 				seq_printf(m, "Allocated to :: PF %d VF %d\n",
1928 					   rvu_get_pf(pcifunc),
1929 					   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1930 			print_band_prof_ctx(m, &aq_rsp.prof);
1931 		}
1932 	}
1933 	return 0;
1934 }
1935 
1936 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
1937 
1938 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
1939 {
1940 	struct nix_hw *nix_hw = m->private;
1941 	struct nix_ipolicer *ipolicer;
1942 	int layer;
1943 	char *str;
1944 
1945 	/* Ingress policers do not exist on all platforms */
1946 	if (!nix_hw->ipolicer)
1947 		return 0;
1948 
1949 	seq_puts(m, "\nBandwidth profile resource free count\n");
1950 	seq_puts(m, "=====================================\n");
1951 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1952 		if (layer == BAND_PROF_INVAL_LAYER)
1953 			continue;
1954 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1955 			(layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
1956 
1957 		ipolicer = &nix_hw->ipolicer[layer];
1958 		seq_printf(m, "%s :: Max: %4d  Free: %4d\n", str,
1959 			   ipolicer->band_prof.max,
1960 			   rvu_rsrc_free_count(&ipolicer->band_prof));
1961 	}
1962 	seq_puts(m, "=====================================\n");
1963 
1964 	return 0;
1965 }
1966 
1967 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
1968 
1969 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
1970 {
1971 	struct nix_hw *nix_hw;
1972 
1973 	if (!is_block_implemented(rvu->hw, blkaddr))
1974 		return;
1975 
1976 	if (blkaddr == BLKADDR_NIX0) {
1977 		rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1978 		nix_hw = &rvu->hw->nix[0];
1979 	} else {
1980 		rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
1981 						      rvu->rvu_dbg.root);
1982 		nix_hw = &rvu->hw->nix[1];
1983 	}
1984 
1985 	debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1986 			    &rvu_dbg_nix_sq_ctx_fops);
1987 	debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1988 			    &rvu_dbg_nix_rq_ctx_fops);
1989 	debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1990 			    &rvu_dbg_nix_cq_ctx_fops);
1991 	debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1992 			    &rvu_dbg_nix_ndc_tx_cache_fops);
1993 	debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1994 			    &rvu_dbg_nix_ndc_rx_cache_fops);
1995 	debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1996 			    &rvu_dbg_nix_ndc_tx_hits_miss_fops);
1997 	debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1998 			    &rvu_dbg_nix_ndc_rx_hits_miss_fops);
1999 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
2000 			    &rvu_dbg_nix_qsize_fops);
2001 	debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2002 			    &rvu_dbg_nix_band_prof_ctx_fops);
2003 	debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
2004 			    &rvu_dbg_nix_band_prof_rsrc_fops);
2005 }
2006 
2007 static void rvu_dbg_npa_init(struct rvu *rvu)
2008 {
2009 	rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2010 
2011 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2012 			    &rvu_dbg_npa_qsize_fops);
2013 	debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2014 			    &rvu_dbg_npa_aura_ctx_fops);
2015 	debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2016 			    &rvu_dbg_npa_pool_ctx_fops);
2017 	debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2018 			    &rvu_dbg_npa_ndc_cache_fops);
2019 	debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2020 			    &rvu_dbg_npa_ndc_hits_miss_fops);
2021 }
2022 
2023 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)				\
2024 	({								\
2025 		u64 cnt;						\
2026 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2027 					     NIX_STATS_RX, &(cnt));	\
2028 		if (!err)						\
2029 			seq_printf(s, "%s: %llu\n", name, cnt);		\
2030 		cnt;							\
2031 	})
2032 
2033 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)			\
2034 	({								\
2035 		u64 cnt;						\
2036 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2037 					  NIX_STATS_TX, &(cnt));	\
2038 		if (!err)						\
2039 			seq_printf(s, "%s: %llu\n", name, cnt);		\
2040 		cnt;							\
2041 	})
2042 
2043 static int cgx_print_stats(struct seq_file *s, int lmac_id)
2044 {
2045 	struct cgx_link_user_info linfo;
2046 	struct mac_ops *mac_ops;
2047 	void *cgxd = s->private;
2048 	u64 ucast, mcast, bcast;
2049 	int stat = 0, err = 0;
2050 	u64 tx_stat, rx_stat;
2051 	struct rvu *rvu;
2052 
2053 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2054 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2055 	if (!rvu)
2056 		return -ENODEV;
2057 
2058 	mac_ops = get_mac_ops(cgxd);
2059 	/* There can be no CGX devices at all */
2060 	if (!mac_ops)
2061 		return 0;
2062 
2063 	/* Link status */
2064 	seq_puts(s, "\n=======Link Status======\n\n");
2065 	err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2066 	if (err)
2067 		seq_puts(s, "Failed to read link status\n");
2068 	seq_printf(s, "\nLink is %s %d Mbps\n\n",
2069 		   linfo.link_up ? "UP" : "DOWN", linfo.speed);
2070 
2071 	/* Rx stats */
2072 	seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2073 		   mac_ops->name);
2074 	ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2075 	if (err)
2076 		return err;
2077 	mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2078 	if (err)
2079 		return err;
2080 	bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2081 	if (err)
2082 		return err;
2083 	seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2084 	PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2085 	if (err)
2086 		return err;
2087 	PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2088 	if (err)
2089 		return err;
2090 	PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2091 	if (err)
2092 		return err;
2093 
2094 	/* Tx stats */
2095 	seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2096 		   mac_ops->name);
2097 	ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2098 	if (err)
2099 		return err;
2100 	mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2101 	if (err)
2102 		return err;
2103 	bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2104 	if (err)
2105 		return err;
2106 	seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2107 	PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2108 	if (err)
2109 		return err;
2110 	PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2111 	if (err)
2112 		return err;
2113 
2114 	/* Rx stats */
2115 	seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2116 	while (stat < mac_ops->rx_stats_cnt) {
2117 		err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2118 		if (err)
2119 			return err;
2120 		if (is_rvu_otx2(rvu))
2121 			seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2122 				   rx_stat);
2123 		else
2124 			seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2125 				   rx_stat);
2126 		stat++;
2127 	}
2128 
2129 	/* Tx stats */
2130 	stat = 0;
2131 	seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2132 	while (stat < mac_ops->tx_stats_cnt) {
2133 		err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2134 		if (err)
2135 			return err;
2136 
2137 		if (is_rvu_otx2(rvu))
2138 			seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2139 				   tx_stat);
2140 		else
2141 			seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2142 				   tx_stat);
2143 		stat++;
2144 	}
2145 
2146 	return err;
2147 }
2148 
2149 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
2150 {
2151 	struct dentry *current_dir;
2152 	char *buf;
2153 
2154 	current_dir = filp->file->f_path.dentry->d_parent;
2155 	buf = strrchr(current_dir->d_name.name, 'c');
2156 	if (!buf)
2157 		return -EINVAL;
2158 
2159 	return kstrtoint(buf + 1, 10, lmac_id);
2160 }
2161 
2162 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
2163 {
2164 	int lmac_id, err;
2165 
2166 	err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2167 	if (!err)
2168 		return cgx_print_stats(filp, lmac_id);
2169 
2170 	return err;
2171 }
2172 
2173 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2174 
2175 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2176 {
2177 	struct pci_dev *pdev = NULL;
2178 	void *cgxd = s->private;
2179 	char *bcast, *mcast;
2180 	u16 index, domain;
2181 	u8 dmac[ETH_ALEN];
2182 	struct rvu *rvu;
2183 	u64 cfg, mac;
2184 	int pf;
2185 
2186 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2187 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2188 	if (!rvu)
2189 		return -ENODEV;
2190 
2191 	pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2192 	domain = 2;
2193 
2194 	pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2195 	if (!pdev)
2196 		return 0;
2197 
2198 	cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2199 	bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2200 	mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2201 
2202 	seq_puts(s,
2203 		 "PCI dev       RVUPF   BROADCAST  MULTICAST  FILTER-MODE\n");
2204 	seq_printf(s, "%s  PF%d  %9s  %9s",
2205 		   dev_name(&pdev->dev), pf, bcast, mcast);
2206 	if (cfg & CGX_DMAC_CAM_ACCEPT)
2207 		seq_printf(s, "%12s\n\n", "UNICAST");
2208 	else
2209 		seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2210 
2211 	seq_puts(s, "\nDMAC-INDEX  ADDRESS\n");
2212 
2213 	for (index = 0 ; index < 32 ; index++) {
2214 		cfg = cgx_read_dmac_entry(cgxd, index);
2215 		/* Display enabled dmac entries associated with current lmac */
2216 		if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2217 		    FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2218 			mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2219 			u64_to_ether_addr(mac, dmac);
2220 			seq_printf(s, "%7d     %pM\n", index, dmac);
2221 		}
2222 	}
2223 
2224 	return 0;
2225 }
2226 
2227 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2228 {
2229 	int err, lmac_id;
2230 
2231 	err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2232 	if (!err)
2233 		return cgx_print_dmac_flt(filp, lmac_id);
2234 
2235 	return err;
2236 }
2237 
2238 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2239 
2240 static void rvu_dbg_cgx_init(struct rvu *rvu)
2241 {
2242 	struct mac_ops *mac_ops;
2243 	unsigned long lmac_bmap;
2244 	int i, lmac_id;
2245 	char dname[20];
2246 	void *cgx;
2247 
2248 	if (!cgx_get_cgxcnt_max())
2249 		return;
2250 
2251 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2252 	if (!mac_ops)
2253 		return;
2254 
2255 	rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2256 						   rvu->rvu_dbg.root);
2257 
2258 	for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2259 		cgx = rvu_cgx_pdata(i, rvu);
2260 		if (!cgx)
2261 			continue;
2262 		lmac_bmap = cgx_get_lmac_bmap(cgx);
2263 		/* cgx debugfs dir */
2264 		sprintf(dname, "%s%d", mac_ops->name, i);
2265 		rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2266 						      rvu->rvu_dbg.cgx_root);
2267 
2268 		for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
2269 			/* lmac debugfs dir */
2270 			sprintf(dname, "lmac%d", lmac_id);
2271 			rvu->rvu_dbg.lmac =
2272 				debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2273 
2274 			debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2275 					    cgx, &rvu_dbg_cgx_stat_fops);
2276 			debugfs_create_file("mac_filter", 0600,
2277 					    rvu->rvu_dbg.lmac, cgx,
2278 					    &rvu_dbg_cgx_dmac_flt_fops);
2279 		}
2280 	}
2281 }
2282 
2283 /* NPC debugfs APIs */
2284 static void rvu_print_npc_mcam_info(struct seq_file *s,
2285 				    u16 pcifunc, int blkaddr)
2286 {
2287 	struct rvu *rvu = s->private;
2288 	int entry_acnt, entry_ecnt;
2289 	int cntr_acnt, cntr_ecnt;
2290 
2291 	rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2292 					  &entry_acnt, &entry_ecnt);
2293 	rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2294 					    &cntr_acnt, &cntr_ecnt);
2295 	if (!entry_acnt && !cntr_acnt)
2296 		return;
2297 
2298 	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2299 		seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2300 			   rvu_get_pf(pcifunc));
2301 	else
2302 		seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2303 			   rvu_get_pf(pcifunc),
2304 			   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2305 
2306 	if (entry_acnt) {
2307 		seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2308 		seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2309 	}
2310 	if (cntr_acnt) {
2311 		seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2312 		seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2313 	}
2314 }
2315 
2316 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2317 {
2318 	struct rvu *rvu = filp->private;
2319 	int pf, vf, numvfs, blkaddr;
2320 	struct npc_mcam *mcam;
2321 	u16 pcifunc, counters;
2322 	u64 cfg;
2323 
2324 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2325 	if (blkaddr < 0)
2326 		return -ENODEV;
2327 
2328 	mcam = &rvu->hw->mcam;
2329 	counters = rvu->hw->npc_counters;
2330 
2331 	seq_puts(filp, "\nNPC MCAM info:\n");
2332 	/* MCAM keywidth on receive and transmit sides */
2333 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2334 	cfg = (cfg >> 32) & 0x07;
2335 	seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2336 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2337 		   "224bits" : "448bits"));
2338 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2339 	cfg = (cfg >> 32) & 0x07;
2340 	seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2341 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2342 		   "224bits" : "448bits"));
2343 
2344 	mutex_lock(&mcam->lock);
2345 	/* MCAM entries */
2346 	seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2347 	seq_printf(filp, "\t\t Reserved \t: %d\n",
2348 		   mcam->total_entries - mcam->bmap_entries);
2349 	seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2350 
2351 	/* MCAM counters */
2352 	seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2353 	seq_printf(filp, "\t\t Reserved \t: %d\n",
2354 		   counters - mcam->counters.max);
2355 	seq_printf(filp, "\t\t Available \t: %d\n",
2356 		   rvu_rsrc_free_count(&mcam->counters));
2357 
2358 	if (mcam->bmap_entries == mcam->bmap_fcnt) {
2359 		mutex_unlock(&mcam->lock);
2360 		return 0;
2361 	}
2362 
2363 	seq_puts(filp, "\n\t\t Current allocation\n");
2364 	seq_puts(filp, "\t\t====================\n");
2365 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2366 		pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2367 		rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2368 
2369 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2370 		numvfs = (cfg >> 12) & 0xFF;
2371 		for (vf = 0; vf < numvfs; vf++) {
2372 			pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2373 			rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2374 		}
2375 	}
2376 
2377 	mutex_unlock(&mcam->lock);
2378 	return 0;
2379 }
2380 
2381 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2382 
2383 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2384 					     void *unused)
2385 {
2386 	struct rvu *rvu = filp->private;
2387 	struct npc_mcam *mcam;
2388 	int blkaddr;
2389 
2390 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2391 	if (blkaddr < 0)
2392 		return -ENODEV;
2393 
2394 	mcam = &rvu->hw->mcam;
2395 
2396 	seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2397 	seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2398 		   rvu_read64(rvu, blkaddr,
2399 			      NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2400 
2401 	return 0;
2402 }
2403 
2404 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2405 
2406 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2407 					struct rvu_npc_mcam_rule *rule)
2408 {
2409 	u8 bit;
2410 
2411 	for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2412 		seq_printf(s, "\t%s  ", npc_get_field_name(bit));
2413 		switch (bit) {
2414 		case NPC_DMAC:
2415 			seq_printf(s, "%pM ", rule->packet.dmac);
2416 			seq_printf(s, "mask %pM\n", rule->mask.dmac);
2417 			break;
2418 		case NPC_SMAC:
2419 			seq_printf(s, "%pM ", rule->packet.smac);
2420 			seq_printf(s, "mask %pM\n", rule->mask.smac);
2421 			break;
2422 		case NPC_ETYPE:
2423 			seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2424 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2425 			break;
2426 		case NPC_OUTER_VID:
2427 			seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2428 			seq_printf(s, "mask 0x%x\n",
2429 				   ntohs(rule->mask.vlan_tci));
2430 			break;
2431 		case NPC_TOS:
2432 			seq_printf(s, "%d ", rule->packet.tos);
2433 			seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2434 			break;
2435 		case NPC_SIP_IPV4:
2436 			seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2437 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2438 			break;
2439 		case NPC_DIP_IPV4:
2440 			seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2441 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2442 			break;
2443 		case NPC_SIP_IPV6:
2444 			seq_printf(s, "%pI6 ", rule->packet.ip6src);
2445 			seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2446 			break;
2447 		case NPC_DIP_IPV6:
2448 			seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2449 			seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2450 			break;
2451 		case NPC_SPORT_TCP:
2452 		case NPC_SPORT_UDP:
2453 		case NPC_SPORT_SCTP:
2454 			seq_printf(s, "%d ", ntohs(rule->packet.sport));
2455 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2456 			break;
2457 		case NPC_DPORT_TCP:
2458 		case NPC_DPORT_UDP:
2459 		case NPC_DPORT_SCTP:
2460 			seq_printf(s, "%d ", ntohs(rule->packet.dport));
2461 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2462 			break;
2463 		default:
2464 			seq_puts(s, "\n");
2465 			break;
2466 		}
2467 	}
2468 }
2469 
2470 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2471 					 struct rvu_npc_mcam_rule *rule)
2472 {
2473 	if (is_npc_intf_tx(rule->intf)) {
2474 		switch (rule->tx_action.op) {
2475 		case NIX_TX_ACTIONOP_DROP:
2476 			seq_puts(s, "\taction: Drop\n");
2477 			break;
2478 		case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2479 			seq_puts(s, "\taction: Unicast to default channel\n");
2480 			break;
2481 		case NIX_TX_ACTIONOP_UCAST_CHAN:
2482 			seq_printf(s, "\taction: Unicast to channel %d\n",
2483 				   rule->tx_action.index);
2484 			break;
2485 		case NIX_TX_ACTIONOP_MCAST:
2486 			seq_puts(s, "\taction: Multicast\n");
2487 			break;
2488 		case NIX_TX_ACTIONOP_DROP_VIOL:
2489 			seq_puts(s, "\taction: Lockdown Violation Drop\n");
2490 			break;
2491 		default:
2492 			break;
2493 		}
2494 	} else {
2495 		switch (rule->rx_action.op) {
2496 		case NIX_RX_ACTIONOP_DROP:
2497 			seq_puts(s, "\taction: Drop\n");
2498 			break;
2499 		case NIX_RX_ACTIONOP_UCAST:
2500 			seq_printf(s, "\taction: Direct to queue %d\n",
2501 				   rule->rx_action.index);
2502 			break;
2503 		case NIX_RX_ACTIONOP_RSS:
2504 			seq_puts(s, "\taction: RSS\n");
2505 			break;
2506 		case NIX_RX_ACTIONOP_UCAST_IPSEC:
2507 			seq_puts(s, "\taction: Unicast ipsec\n");
2508 			break;
2509 		case NIX_RX_ACTIONOP_MCAST:
2510 			seq_puts(s, "\taction: Multicast\n");
2511 			break;
2512 		default:
2513 			break;
2514 		}
2515 	}
2516 }
2517 
2518 static const char *rvu_dbg_get_intf_name(int intf)
2519 {
2520 	switch (intf) {
2521 	case NIX_INTFX_RX(0):
2522 		return "NIX0_RX";
2523 	case NIX_INTFX_RX(1):
2524 		return "NIX1_RX";
2525 	case NIX_INTFX_TX(0):
2526 		return "NIX0_TX";
2527 	case NIX_INTFX_TX(1):
2528 		return "NIX1_TX";
2529 	default:
2530 		break;
2531 	}
2532 
2533 	return "unknown";
2534 }
2535 
2536 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2537 {
2538 	struct rvu_npc_mcam_rule *iter;
2539 	struct rvu *rvu = s->private;
2540 	struct npc_mcam *mcam;
2541 	int pf, vf = -1;
2542 	bool enabled;
2543 	int blkaddr;
2544 	u16 target;
2545 	u64 hits;
2546 
2547 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2548 	if (blkaddr < 0)
2549 		return 0;
2550 
2551 	mcam = &rvu->hw->mcam;
2552 
2553 	mutex_lock(&mcam->lock);
2554 	list_for_each_entry(iter, &mcam->mcam_rules, list) {
2555 		pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2556 		seq_printf(s, "\n\tInstalled by: PF%d ", pf);
2557 
2558 		if (iter->owner & RVU_PFVF_FUNC_MASK) {
2559 			vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
2560 			seq_printf(s, "VF%d", vf);
2561 		}
2562 		seq_puts(s, "\n");
2563 
2564 		seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
2565 						    "RX" : "TX");
2566 		seq_printf(s, "\tinterface: %s\n",
2567 			   rvu_dbg_get_intf_name(iter->intf));
2568 		seq_printf(s, "\tmcam entry: %d\n", iter->entry);
2569 
2570 		rvu_dbg_npc_mcam_show_flows(s, iter);
2571 		if (is_npc_intf_rx(iter->intf)) {
2572 			target = iter->rx_action.pf_func;
2573 			pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2574 			seq_printf(s, "\tForward to: PF%d ", pf);
2575 
2576 			if (target & RVU_PFVF_FUNC_MASK) {
2577 				vf = (target & RVU_PFVF_FUNC_MASK) - 1;
2578 				seq_printf(s, "VF%d", vf);
2579 			}
2580 			seq_puts(s, "\n");
2581 			seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
2582 			seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
2583 		}
2584 
2585 		rvu_dbg_npc_mcam_show_action(s, iter);
2586 
2587 		enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
2588 		seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
2589 
2590 		if (!iter->has_cntr)
2591 			continue;
2592 		seq_printf(s, "\tcounter: %d\n", iter->cntr);
2593 
2594 		hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
2595 		seq_printf(s, "\thits: %lld\n", hits);
2596 	}
2597 	mutex_unlock(&mcam->lock);
2598 
2599 	return 0;
2600 }
2601 
2602 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
2603 
2604 static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
2605 {
2606 	struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
2607 	struct npc_exact_table_entry *cam_entry;
2608 	struct npc_exact_table *table;
2609 	struct rvu *rvu = s->private;
2610 	int i, j;
2611 
2612 	u8 bitmap = 0;
2613 
2614 	table = rvu->hw->table;
2615 
2616 	mutex_lock(&table->lock);
2617 
2618 	/* Check if there is at least one entry in mem table */
2619 	if (!table->mem_tbl_entry_cnt)
2620 		goto dump_cam_table;
2621 
2622 	/* Print table headers */
2623 	seq_puts(s, "\n\tExact Match MEM Table\n");
2624 	seq_puts(s, "Index\t");
2625 
2626 	for (i = 0; i < table->mem_table.ways; i++) {
2627 		mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
2628 							struct npc_exact_table_entry, list);
2629 
2630 		seq_printf(s, "Way-%d\t\t\t\t\t", i);
2631 	}
2632 
2633 	seq_puts(s, "\n");
2634 	for (i = 0; i < table->mem_table.ways; i++)
2635 		seq_puts(s, "\tChan  MAC                     \t");
2636 
2637 	seq_puts(s, "\n\n");
2638 
2639 	/* Print mem table entries */
2640 	for (i = 0; i < table->mem_table.depth; i++) {
2641 		bitmap = 0;
2642 		for (j = 0; j < table->mem_table.ways; j++) {
2643 			if (!mem_entry[j])
2644 				continue;
2645 
2646 			if (mem_entry[j]->index != i)
2647 				continue;
2648 
2649 			bitmap |= BIT(j);
2650 		}
2651 
2652 		/* No valid entries */
2653 		if (!bitmap)
2654 			continue;
2655 
2656 		seq_printf(s, "%d\t", i);
2657 		for (j = 0; j < table->mem_table.ways; j++) {
2658 			if (!(bitmap & BIT(j))) {
2659 				seq_puts(s, "nil\t\t\t\t\t");
2660 				continue;
2661 			}
2662 
2663 			seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
2664 				   mem_entry[j]->mac);
2665 			mem_entry[j] = list_next_entry(mem_entry[j], list);
2666 		}
2667 		seq_puts(s, "\n");
2668 	}
2669 
2670 dump_cam_table:
2671 
2672 	if (!table->cam_tbl_entry_cnt)
2673 		goto done;
2674 
2675 	seq_puts(s, "\n\tExact Match CAM Table\n");
2676 	seq_puts(s, "index\tchan\tMAC\n");
2677 
2678 	/* Traverse cam table entries */
2679 	list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
2680 		seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
2681 			   cam_entry->mac);
2682 	}
2683 
2684 done:
2685 	mutex_unlock(&table->lock);
2686 	return 0;
2687 }
2688 
2689 RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
2690 
2691 static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
2692 {
2693 	struct npc_exact_table *table;
2694 	struct rvu *rvu = s->private;
2695 	int i;
2696 
2697 	table = rvu->hw->table;
2698 
2699 	seq_puts(s, "\n\tExact Table Info\n");
2700 	seq_printf(s, "Exact Match Feature : %s\n",
2701 		   rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
2702 	if (!rvu->hw->cap.npc_exact_match_enabled)
2703 		return 0;
2704 
2705 	seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
2706 	for (i = 0; i < table->num_drop_rules; i++)
2707 		seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
2708 
2709 	seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
2710 	for (i = 0; i < table->num_drop_rules; i++)
2711 		seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
2712 
2713 	seq_puts(s, "\n\tMEM Table Info\n");
2714 	seq_printf(s, "Ways : %d\n", table->mem_table.ways);
2715 	seq_printf(s, "Depth : %d\n", table->mem_table.depth);
2716 	seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
2717 	seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
2718 	seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
2719 
2720 	seq_puts(s, "\n\tCAM Table Info\n");
2721 	seq_printf(s, "Depth : %d\n", table->cam_table.depth);
2722 
2723 	return 0;
2724 }
2725 
2726 RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
2727 
2728 static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
2729 {
2730 	struct npc_exact_table *table;
2731 	struct rvu *rvu = s->private;
2732 	struct npc_key_field *field;
2733 	u16 chan, pcifunc;
2734 	int blkaddr, i;
2735 	u64 cfg, cam1;
2736 	char *str;
2737 
2738 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2739 	table = rvu->hw->table;
2740 
2741 	field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
2742 
2743 	seq_puts(s, "\n\t Exact Hit on drop status\n");
2744 	seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
2745 
2746 	for (i = 0; i < table->num_drop_rules; i++) {
2747 		pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
2748 		cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
2749 
2750 		/* channel will be always in keyword 0 */
2751 		cam1 = rvu_read64(rvu, blkaddr,
2752 				  NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
2753 		chan = field->kw_mask[0] & cam1;
2754 
2755 		str = (cfg & 1) ? "enabled" : "disabled";
2756 
2757 		seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
2758 			   rvu_read64(rvu, blkaddr,
2759 				      NPC_AF_MATCH_STATX(table->counter_idx[i])),
2760 			   chan, str);
2761 	}
2762 
2763 	return 0;
2764 }
2765 
2766 RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
2767 
2768 static void rvu_dbg_npc_init(struct rvu *rvu)
2769 {
2770 	rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
2771 
2772 	debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
2773 			    &rvu_dbg_npc_mcam_info_fops);
2774 	debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
2775 			    &rvu_dbg_npc_mcam_rules_fops);
2776 
2777 	debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
2778 			    &rvu_dbg_npc_rx_miss_act_fops);
2779 
2780 	if (!rvu->hw->cap.npc_exact_match_enabled)
2781 		return;
2782 
2783 	debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
2784 			    &rvu_dbg_npc_exact_entries_fops);
2785 
2786 	debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
2787 			    &rvu_dbg_npc_exact_info_fops);
2788 
2789 	debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
2790 			    &rvu_dbg_npc_exact_drop_cnt_fops);
2791 
2792 }
2793 
2794 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
2795 {
2796 	struct cpt_ctx *ctx = filp->private;
2797 	u64 busy_sts = 0, free_sts = 0;
2798 	u32 e_min = 0, e_max = 0, e, i;
2799 	u16 max_ses, max_ies, max_aes;
2800 	struct rvu *rvu = ctx->rvu;
2801 	int blkaddr = ctx->blkaddr;
2802 	u64 reg;
2803 
2804 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2805 	max_ses = reg & 0xffff;
2806 	max_ies = (reg >> 16) & 0xffff;
2807 	max_aes = (reg >> 32) & 0xffff;
2808 
2809 	switch (eng_type) {
2810 	case CPT_AE_TYPE:
2811 		e_min = max_ses + max_ies;
2812 		e_max = max_ses + max_ies + max_aes;
2813 		break;
2814 	case CPT_SE_TYPE:
2815 		e_min = 0;
2816 		e_max = max_ses;
2817 		break;
2818 	case CPT_IE_TYPE:
2819 		e_min = max_ses;
2820 		e_max = max_ses + max_ies;
2821 		break;
2822 	default:
2823 		return -EINVAL;
2824 	}
2825 
2826 	for (e = e_min, i = 0; e < e_max; e++, i++) {
2827 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
2828 		if (reg & 0x1)
2829 			busy_sts |= 1ULL << i;
2830 
2831 		if (reg & 0x2)
2832 			free_sts |= 1ULL << i;
2833 	}
2834 	seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
2835 	seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
2836 
2837 	return 0;
2838 }
2839 
2840 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
2841 {
2842 	return cpt_eng_sts_display(filp, CPT_AE_TYPE);
2843 }
2844 
2845 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
2846 
2847 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
2848 {
2849 	return cpt_eng_sts_display(filp, CPT_SE_TYPE);
2850 }
2851 
2852 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
2853 
2854 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
2855 {
2856 	return cpt_eng_sts_display(filp, CPT_IE_TYPE);
2857 }
2858 
2859 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
2860 
2861 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
2862 {
2863 	struct cpt_ctx *ctx = filp->private;
2864 	u16 max_ses, max_ies, max_aes;
2865 	struct rvu *rvu = ctx->rvu;
2866 	int blkaddr = ctx->blkaddr;
2867 	u32 e_max, e;
2868 	u64 reg;
2869 
2870 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2871 	max_ses = reg & 0xffff;
2872 	max_ies = (reg >> 16) & 0xffff;
2873 	max_aes = (reg >> 32) & 0xffff;
2874 
2875 	e_max = max_ses + max_ies + max_aes;
2876 
2877 	seq_puts(filp, "===========================================\n");
2878 	for (e = 0; e < e_max; e++) {
2879 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
2880 		seq_printf(filp, "CPT Engine[%u] Group Enable   0x%02llx\n", e,
2881 			   reg & 0xff);
2882 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
2883 		seq_printf(filp, "CPT Engine[%u] Active Info    0x%llx\n", e,
2884 			   reg);
2885 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
2886 		seq_printf(filp, "CPT Engine[%u] Control        0x%llx\n", e,
2887 			   reg);
2888 		seq_puts(filp, "===========================================\n");
2889 	}
2890 	return 0;
2891 }
2892 
2893 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
2894 
2895 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
2896 {
2897 	struct cpt_ctx *ctx = filp->private;
2898 	int blkaddr = ctx->blkaddr;
2899 	struct rvu *rvu = ctx->rvu;
2900 	struct rvu_block *block;
2901 	struct rvu_hwinfo *hw;
2902 	u64 reg;
2903 	u32 lf;
2904 
2905 	hw = rvu->hw;
2906 	block = &hw->block[blkaddr];
2907 	if (!block->lf.bmap)
2908 		return -ENODEV;
2909 
2910 	seq_puts(filp, "===========================================\n");
2911 	for (lf = 0; lf < block->lf.max; lf++) {
2912 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
2913 		seq_printf(filp, "CPT Lf[%u] CTL          0x%llx\n", lf, reg);
2914 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
2915 		seq_printf(filp, "CPT Lf[%u] CTL2         0x%llx\n", lf, reg);
2916 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
2917 		seq_printf(filp, "CPT Lf[%u] PTR_CTL      0x%llx\n", lf, reg);
2918 		reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
2919 				(lf << block->lfshift));
2920 		seq_printf(filp, "CPT Lf[%u] CFG          0x%llx\n", lf, reg);
2921 		seq_puts(filp, "===========================================\n");
2922 	}
2923 	return 0;
2924 }
2925 
2926 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
2927 
2928 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
2929 {
2930 	struct cpt_ctx *ctx = filp->private;
2931 	struct rvu *rvu = ctx->rvu;
2932 	int blkaddr = ctx->blkaddr;
2933 	u64 reg0, reg1;
2934 
2935 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
2936 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
2937 	seq_printf(filp, "CPT_AF_FLTX_INT:       0x%llx 0x%llx\n", reg0, reg1);
2938 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
2939 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
2940 	seq_printf(filp, "CPT_AF_PSNX_EXE:       0x%llx 0x%llx\n", reg0, reg1);
2941 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
2942 	seq_printf(filp, "CPT_AF_PSNX_LF:        0x%llx\n", reg0);
2943 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
2944 	seq_printf(filp, "CPT_AF_RVU_INT:        0x%llx\n", reg0);
2945 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
2946 	seq_printf(filp, "CPT_AF_RAS_INT:        0x%llx\n", reg0);
2947 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
2948 	seq_printf(filp, "CPT_AF_EXE_ERR_INFO:   0x%llx\n", reg0);
2949 
2950 	return 0;
2951 }
2952 
2953 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
2954 
2955 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
2956 {
2957 	struct cpt_ctx *ctx = filp->private;
2958 	struct rvu *rvu = ctx->rvu;
2959 	int blkaddr = ctx->blkaddr;
2960 	u64 reg;
2961 
2962 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
2963 	seq_printf(filp, "CPT instruction requests   %llu\n", reg);
2964 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
2965 	seq_printf(filp, "CPT instruction latency    %llu\n", reg);
2966 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
2967 	seq_printf(filp, "CPT NCB read requests      %llu\n", reg);
2968 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
2969 	seq_printf(filp, "CPT NCB read latency       %llu\n", reg);
2970 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
2971 	seq_printf(filp, "CPT read requests caused by UC fills   %llu\n", reg);
2972 	reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
2973 	seq_printf(filp, "CPT active cycles pc       %llu\n", reg);
2974 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
2975 	seq_printf(filp, "CPT clock count pc         %llu\n", reg);
2976 
2977 	return 0;
2978 }
2979 
2980 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
2981 
2982 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
2983 {
2984 	struct cpt_ctx *ctx;
2985 
2986 	if (!is_block_implemented(rvu->hw, blkaddr))
2987 		return;
2988 
2989 	if (blkaddr == BLKADDR_CPT0) {
2990 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
2991 		ctx = &rvu->rvu_dbg.cpt_ctx[0];
2992 		ctx->blkaddr = BLKADDR_CPT0;
2993 		ctx->rvu = rvu;
2994 	} else {
2995 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
2996 						      rvu->rvu_dbg.root);
2997 		ctx = &rvu->rvu_dbg.cpt_ctx[1];
2998 		ctx->blkaddr = BLKADDR_CPT1;
2999 		ctx->rvu = rvu;
3000 	}
3001 
3002 	debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
3003 			    &rvu_dbg_cpt_pc_fops);
3004 	debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3005 			    &rvu_dbg_cpt_ae_sts_fops);
3006 	debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3007 			    &rvu_dbg_cpt_se_sts_fops);
3008 	debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3009 			    &rvu_dbg_cpt_ie_sts_fops);
3010 	debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
3011 			    &rvu_dbg_cpt_engines_info_fops);
3012 	debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
3013 			    &rvu_dbg_cpt_lfs_info_fops);
3014 	debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
3015 			    &rvu_dbg_cpt_err_info_fops);
3016 }
3017 
3018 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
3019 {
3020 	if (!is_rvu_otx2(rvu))
3021 		return "cn10k";
3022 	else
3023 		return "octeontx2";
3024 }
3025 
3026 void rvu_dbg_init(struct rvu *rvu)
3027 {
3028 	rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
3029 
3030 	debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
3031 			    &rvu_dbg_rsrc_status_fops);
3032 
3033 	if (!is_rvu_otx2(rvu))
3034 		debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
3035 				    rvu, &rvu_dbg_lmtst_map_table_fops);
3036 
3037 	if (!cgx_get_cgxcnt_max())
3038 		goto create;
3039 
3040 	if (is_rvu_otx2(rvu))
3041 		debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
3042 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3043 	else
3044 		debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
3045 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3046 
3047 create:
3048 	rvu_dbg_npa_init(rvu);
3049 	rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
3050 
3051 	rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
3052 	rvu_dbg_cgx_init(rvu);
3053 	rvu_dbg_npc_init(rvu);
3054 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
3055 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
3056 }
3057 
3058 void rvu_dbg_exit(struct rvu *rvu)
3059 {
3060 	debugfs_remove_recursive(rvu->rvu_dbg.root);
3061 }
3062 
3063 #endif /* CONFIG_DEBUG_FS */
3064