1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2019 Marvell.
5  *
6  */
7 
8 #ifdef CONFIG_DEBUG_FS
9 
10 #include <linux/fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 #include "npc.h"
21 
22 #define DEBUGFS_DIR_NAME "octeontx2"
23 
24 enum {
25 	CGX_STAT0,
26 	CGX_STAT1,
27 	CGX_STAT2,
28 	CGX_STAT3,
29 	CGX_STAT4,
30 	CGX_STAT5,
31 	CGX_STAT6,
32 	CGX_STAT7,
33 	CGX_STAT8,
34 	CGX_STAT9,
35 	CGX_STAT10,
36 	CGX_STAT11,
37 	CGX_STAT12,
38 	CGX_STAT13,
39 	CGX_STAT14,
40 	CGX_STAT15,
41 	CGX_STAT16,
42 	CGX_STAT17,
43 	CGX_STAT18,
44 };
45 
46 /* NIX TX stats */
47 enum nix_stat_lf_tx {
48 	TX_UCAST	= 0x0,
49 	TX_BCAST	= 0x1,
50 	TX_MCAST	= 0x2,
51 	TX_DROP		= 0x3,
52 	TX_OCTS		= 0x4,
53 	TX_STATS_ENUM_LAST,
54 };
55 
56 /* NIX RX stats */
57 enum nix_stat_lf_rx {
58 	RX_OCTS		= 0x0,
59 	RX_UCAST	= 0x1,
60 	RX_BCAST	= 0x2,
61 	RX_MCAST	= 0x3,
62 	RX_DROP		= 0x4,
63 	RX_DROP_OCTS	= 0x5,
64 	RX_FCS		= 0x6,
65 	RX_ERR		= 0x7,
66 	RX_DRP_BCAST	= 0x8,
67 	RX_DRP_MCAST	= 0x9,
68 	RX_DRP_L3BCAST	= 0xa,
69 	RX_DRP_L3MCAST	= 0xb,
70 	RX_STATS_ENUM_LAST,
71 };
72 
73 static char *cgx_rx_stats_fields[] = {
74 	[CGX_STAT0]	= "Received packets",
75 	[CGX_STAT1]	= "Octets of received packets",
76 	[CGX_STAT2]	= "Received PAUSE packets",
77 	[CGX_STAT3]	= "Received PAUSE and control packets",
78 	[CGX_STAT4]	= "Filtered DMAC0 (NIX-bound) packets",
79 	[CGX_STAT5]	= "Filtered DMAC0 (NIX-bound) octets",
80 	[CGX_STAT6]	= "Packets dropped due to RX FIFO full",
81 	[CGX_STAT7]	= "Octets dropped due to RX FIFO full",
82 	[CGX_STAT8]	= "Error packets",
83 	[CGX_STAT9]	= "Filtered DMAC1 (NCSI-bound) packets",
84 	[CGX_STAT10]	= "Filtered DMAC1 (NCSI-bound) octets",
85 	[CGX_STAT11]	= "NCSI-bound packets dropped",
86 	[CGX_STAT12]	= "NCSI-bound octets dropped",
87 };
88 
89 static char *cgx_tx_stats_fields[] = {
90 	[CGX_STAT0]	= "Packets dropped due to excessive collisions",
91 	[CGX_STAT1]	= "Packets dropped due to excessive deferral",
92 	[CGX_STAT2]	= "Multiple collisions before successful transmission",
93 	[CGX_STAT3]	= "Single collisions before successful transmission",
94 	[CGX_STAT4]	= "Total octets sent on the interface",
95 	[CGX_STAT5]	= "Total frames sent on the interface",
96 	[CGX_STAT6]	= "Packets sent with an octet count < 64",
97 	[CGX_STAT7]	= "Packets sent with an octet count == 64",
98 	[CGX_STAT8]	= "Packets sent with an octet count of 65–127",
99 	[CGX_STAT9]	= "Packets sent with an octet count of 128-255",
100 	[CGX_STAT10]	= "Packets sent with an octet count of 256-511",
101 	[CGX_STAT11]	= "Packets sent with an octet count of 512-1023",
102 	[CGX_STAT12]	= "Packets sent with an octet count of 1024-1518",
103 	[CGX_STAT13]	= "Packets sent with an octet count of > 1518",
104 	[CGX_STAT14]	= "Packets sent to a broadcast DMAC",
105 	[CGX_STAT15]	= "Packets sent to the multicast DMAC",
106 	[CGX_STAT16]	= "Transmit underflow and were truncated",
107 	[CGX_STAT17]	= "Control/PAUSE packets sent",
108 };
109 
110 static char *rpm_rx_stats_fields[] = {
111 	"Octets of received packets",
112 	"Octets of received packets with out error",
113 	"Received packets with alignment errors",
114 	"Control/PAUSE packets received",
115 	"Packets received with Frame too long Errors",
116 	"Packets received with a1nrange length Errors",
117 	"Received packets",
118 	"Packets received with FrameCheckSequenceErrors",
119 	"Packets received with VLAN header",
120 	"Error packets",
121 	"Packets received with unicast DMAC",
122 	"Packets received with multicast DMAC",
123 	"Packets received with broadcast DMAC",
124 	"Dropped packets",
125 	"Total frames received on interface",
126 	"Packets received with an octet count < 64",
127 	"Packets received with an octet count == 64",
128 	"Packets received with an octet count of 65–127",
129 	"Packets received with an octet count of 128-255",
130 	"Packets received with an octet count of 256-511",
131 	"Packets received with an octet count of 512-1023",
132 	"Packets received with an octet count of 1024-1518",
133 	"Packets received with an octet count of > 1518",
134 	"Oversized Packets",
135 	"Jabber Packets",
136 	"Fragmented Packets",
137 	"CBFC(class based flow control) pause frames received for class 0",
138 	"CBFC pause frames received for class 1",
139 	"CBFC pause frames received for class 2",
140 	"CBFC pause frames received for class 3",
141 	"CBFC pause frames received for class 4",
142 	"CBFC pause frames received for class 5",
143 	"CBFC pause frames received for class 6",
144 	"CBFC pause frames received for class 7",
145 	"CBFC pause frames received for class 8",
146 	"CBFC pause frames received for class 9",
147 	"CBFC pause frames received for class 10",
148 	"CBFC pause frames received for class 11",
149 	"CBFC pause frames received for class 12",
150 	"CBFC pause frames received for class 13",
151 	"CBFC pause frames received for class 14",
152 	"CBFC pause frames received for class 15",
153 	"MAC control packets received",
154 };
155 
156 static char *rpm_tx_stats_fields[] = {
157 	"Total octets sent on the interface",
158 	"Total octets transmitted OK",
159 	"Control/Pause frames sent",
160 	"Total frames transmitted OK",
161 	"Total frames sent with VLAN header",
162 	"Error Packets",
163 	"Packets sent to unicast DMAC",
164 	"Packets sent to the multicast DMAC",
165 	"Packets sent to a broadcast DMAC",
166 	"Packets sent with an octet count == 64",
167 	"Packets sent with an octet count of 65–127",
168 	"Packets sent with an octet count of 128-255",
169 	"Packets sent with an octet count of 256-511",
170 	"Packets sent with an octet count of 512-1023",
171 	"Packets sent with an octet count of 1024-1518",
172 	"Packets sent with an octet count of > 1518",
173 	"CBFC(class based flow control) pause frames transmitted for class 0",
174 	"CBFC pause frames transmitted for class 1",
175 	"CBFC pause frames transmitted for class 2",
176 	"CBFC pause frames transmitted for class 3",
177 	"CBFC pause frames transmitted for class 4",
178 	"CBFC pause frames transmitted for class 5",
179 	"CBFC pause frames transmitted for class 6",
180 	"CBFC pause frames transmitted for class 7",
181 	"CBFC pause frames transmitted for class 8",
182 	"CBFC pause frames transmitted for class 9",
183 	"CBFC pause frames transmitted for class 10",
184 	"CBFC pause frames transmitted for class 11",
185 	"CBFC pause frames transmitted for class 12",
186 	"CBFC pause frames transmitted for class 13",
187 	"CBFC pause frames transmitted for class 14",
188 	"CBFC pause frames transmitted for class 15",
189 	"MAC control packets sent",
190 	"Total frames sent on the interface"
191 };
192 
193 enum cpt_eng_type {
194 	CPT_AE_TYPE = 1,
195 	CPT_SE_TYPE = 2,
196 	CPT_IE_TYPE = 3,
197 };
198 
199 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
200 						blk_addr, NDC_AF_CONST) & 0xFF)
201 
202 #define rvu_dbg_NULL NULL
203 #define rvu_dbg_open_NULL NULL
204 
205 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)	\
206 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
207 { \
208 	return single_open(file, rvu_dbg_##read_op, inode->i_private); \
209 } \
210 static const struct file_operations rvu_dbg_##name##_fops = { \
211 	.owner		= THIS_MODULE, \
212 	.open		= rvu_dbg_open_##name, \
213 	.read		= seq_read, \
214 	.write		= rvu_dbg_##write_op, \
215 	.llseek		= seq_lseek, \
216 	.release	= single_release, \
217 }
218 
219 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
220 static const struct file_operations rvu_dbg_##name##_fops = { \
221 	.owner = THIS_MODULE, \
222 	.open = simple_open, \
223 	.read = rvu_dbg_##read_op, \
224 	.write = rvu_dbg_##write_op \
225 }
226 
227 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
228 
229 /* Dumps current provisioning status of all RVU block LFs */
230 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
231 					  char __user *buffer,
232 					  size_t count, loff_t *ppos)
233 {
234 	int index, off = 0, flag = 0, go_back = 0, len = 0;
235 	struct rvu *rvu = filp->private_data;
236 	int lf, pf, vf, pcifunc;
237 	struct rvu_block block;
238 	int bytes_not_copied;
239 	int lf_str_size = 12;
240 	int buf_size = 2048;
241 	char *lfs;
242 	char *buf;
243 
244 	/* don't allow partial reads */
245 	if (*ppos != 0)
246 		return 0;
247 
248 	buf = kzalloc(buf_size, GFP_KERNEL);
249 	if (!buf)
250 		return -ENOSPC;
251 
252 	lfs = kzalloc(lf_str_size, GFP_KERNEL);
253 	if (!lfs) {
254 		kfree(buf);
255 		return -ENOMEM;
256 	}
257 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
258 			  "pcifunc");
259 	for (index = 0; index < BLK_COUNT; index++)
260 		if (strlen(rvu->hw->block[index].name)) {
261 			off += scnprintf(&buf[off], buf_size - 1 - off,
262 					 "%-*s", lf_str_size,
263 					 rvu->hw->block[index].name);
264 		}
265 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
266 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
267 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
268 			pcifunc = pf << 10 | vf;
269 			if (!pcifunc)
270 				continue;
271 
272 			if (vf) {
273 				sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
274 				go_back = scnprintf(&buf[off],
275 						    buf_size - 1 - off,
276 						    "%-*s", lf_str_size, lfs);
277 			} else {
278 				sprintf(lfs, "PF%d", pf);
279 				go_back = scnprintf(&buf[off],
280 						    buf_size - 1 - off,
281 						    "%-*s", lf_str_size, lfs);
282 			}
283 
284 			off += go_back;
285 			for (index = 0; index < BLKTYPE_MAX; index++) {
286 				block = rvu->hw->block[index];
287 				if (!strlen(block.name))
288 					continue;
289 				len = 0;
290 				lfs[len] = '\0';
291 				for (lf = 0; lf < block.lf.max; lf++) {
292 					if (block.fn_map[lf] != pcifunc)
293 						continue;
294 					flag = 1;
295 					len += sprintf(&lfs[len], "%d,", lf);
296 				}
297 
298 				if (flag)
299 					len--;
300 				lfs[len] = '\0';
301 				off += scnprintf(&buf[off], buf_size - 1 - off,
302 						 "%-*s", lf_str_size, lfs);
303 				if (!strlen(lfs))
304 					go_back += lf_str_size;
305 			}
306 			if (!flag)
307 				off -= go_back;
308 			else
309 				flag = 0;
310 			off--;
311 			off +=	scnprintf(&buf[off], buf_size - 1 - off, "\n");
312 		}
313 	}
314 
315 	bytes_not_copied = copy_to_user(buffer, buf, off);
316 	kfree(lfs);
317 	kfree(buf);
318 
319 	if (bytes_not_copied)
320 		return -EFAULT;
321 
322 	*ppos = off;
323 	return off;
324 }
325 
326 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
327 
328 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
329 {
330 	struct rvu *rvu = filp->private;
331 	struct pci_dev *pdev = NULL;
332 	struct mac_ops *mac_ops;
333 	char cgx[10], lmac[10];
334 	struct rvu_pfvf *pfvf;
335 	int pf, domain, blkid;
336 	u8 cgx_id, lmac_id;
337 	u16 pcifunc;
338 
339 	domain = 2;
340 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
341 	/* There can be no CGX devices at all */
342 	if (!mac_ops)
343 		return 0;
344 	seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
345 		   mac_ops->name);
346 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
347 		if (!is_pf_cgxmapped(rvu, pf))
348 			continue;
349 
350 		pdev =  pci_get_domain_bus_and_slot(domain, pf + 1, 0);
351 		if (!pdev)
352 			continue;
353 
354 		cgx[0] = 0;
355 		lmac[0] = 0;
356 		pcifunc = pf << 10;
357 		pfvf = rvu_get_pfvf(rvu, pcifunc);
358 
359 		if (pfvf->nix_blkaddr == BLKADDR_NIX0)
360 			blkid = 0;
361 		else
362 			blkid = 1;
363 
364 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
365 				    &lmac_id);
366 		sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
367 		sprintf(lmac, "LMAC%d", lmac_id);
368 		seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
369 			   dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
370 	}
371 	return 0;
372 }
373 
374 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
375 
376 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
377 				u16 *pcifunc)
378 {
379 	struct rvu_block *block;
380 	struct rvu_hwinfo *hw;
381 
382 	hw = rvu->hw;
383 	block = &hw->block[blkaddr];
384 
385 	if (lf < 0 || lf >= block->lf.max) {
386 		dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
387 			 block->lf.max - 1);
388 		return false;
389 	}
390 
391 	*pcifunc = block->fn_map[lf];
392 	if (!*pcifunc) {
393 		dev_warn(rvu->dev,
394 			 "This LF is not attached to any RVU PFFUNC\n");
395 		return false;
396 	}
397 	return true;
398 }
399 
400 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
401 {
402 	char *buf;
403 
404 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
405 	if (!buf)
406 		return;
407 
408 	if (!pfvf->aura_ctx) {
409 		seq_puts(m, "Aura context is not initialized\n");
410 	} else {
411 		bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
412 					pfvf->aura_ctx->qsize);
413 		seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
414 		seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
415 	}
416 
417 	if (!pfvf->pool_ctx) {
418 		seq_puts(m, "Pool context is not initialized\n");
419 	} else {
420 		bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
421 					pfvf->pool_ctx->qsize);
422 		seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
423 		seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
424 	}
425 	kfree(buf);
426 }
427 
428 /* The 'qsize' entry dumps current Aura/Pool context Qsize
429  * and each context's current enable/disable status in a bitmap.
430  */
431 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
432 				 int blktype)
433 {
434 	void (*print_qsize)(struct seq_file *filp,
435 			    struct rvu_pfvf *pfvf) = NULL;
436 	struct dentry *current_dir;
437 	struct rvu_pfvf *pfvf;
438 	struct rvu *rvu;
439 	int qsize_id;
440 	u16 pcifunc;
441 	int blkaddr;
442 
443 	rvu = filp->private;
444 	switch (blktype) {
445 	case BLKTYPE_NPA:
446 		qsize_id = rvu->rvu_dbg.npa_qsize_id;
447 		print_qsize = print_npa_qsize;
448 		break;
449 
450 	case BLKTYPE_NIX:
451 		qsize_id = rvu->rvu_dbg.nix_qsize_id;
452 		print_qsize = print_nix_qsize;
453 		break;
454 
455 	default:
456 		return -EINVAL;
457 	}
458 
459 	if (blktype == BLKTYPE_NPA) {
460 		blkaddr = BLKADDR_NPA;
461 	} else {
462 		current_dir = filp->file->f_path.dentry->d_parent;
463 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
464 				   BLKADDR_NIX1 : BLKADDR_NIX0);
465 	}
466 
467 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
468 		return -EINVAL;
469 
470 	pfvf = rvu_get_pfvf(rvu, pcifunc);
471 	print_qsize(filp, pfvf);
472 
473 	return 0;
474 }
475 
476 static ssize_t rvu_dbg_qsize_write(struct file *filp,
477 				   const char __user *buffer, size_t count,
478 				   loff_t *ppos, int blktype)
479 {
480 	char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
481 	struct seq_file *seqfile = filp->private_data;
482 	char *cmd_buf, *cmd_buf_tmp, *subtoken;
483 	struct rvu *rvu = seqfile->private;
484 	struct dentry *current_dir;
485 	int blkaddr;
486 	u16 pcifunc;
487 	int ret, lf;
488 
489 	cmd_buf = memdup_user(buffer, count + 1);
490 	if (IS_ERR(cmd_buf))
491 		return -ENOMEM;
492 
493 	cmd_buf[count] = '\0';
494 
495 	cmd_buf_tmp = strchr(cmd_buf, '\n');
496 	if (cmd_buf_tmp) {
497 		*cmd_buf_tmp = '\0';
498 		count = cmd_buf_tmp - cmd_buf + 1;
499 	}
500 
501 	cmd_buf_tmp = cmd_buf;
502 	subtoken = strsep(&cmd_buf, " ");
503 	ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
504 	if (cmd_buf)
505 		ret = -EINVAL;
506 
507 	if (!strncmp(subtoken, "help", 4) || ret < 0) {
508 		dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
509 		goto qsize_write_done;
510 	}
511 
512 	if (blktype == BLKTYPE_NPA) {
513 		blkaddr = BLKADDR_NPA;
514 	} else {
515 		current_dir = filp->f_path.dentry->d_parent;
516 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
517 				   BLKADDR_NIX1 : BLKADDR_NIX0);
518 	}
519 
520 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
521 		ret = -EINVAL;
522 		goto qsize_write_done;
523 	}
524 	if (blktype  == BLKTYPE_NPA)
525 		rvu->rvu_dbg.npa_qsize_id = lf;
526 	else
527 		rvu->rvu_dbg.nix_qsize_id = lf;
528 
529 qsize_write_done:
530 	kfree(cmd_buf_tmp);
531 	return ret ? ret : count;
532 }
533 
534 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
535 				       const char __user *buffer,
536 				       size_t count, loff_t *ppos)
537 {
538 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
539 					    BLKTYPE_NPA);
540 }
541 
542 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
543 {
544 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
545 }
546 
547 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
548 
549 /* Dumps given NPA Aura's context */
550 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
551 {
552 	struct npa_aura_s *aura = &rsp->aura;
553 	struct rvu *rvu = m->private;
554 
555 	seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
556 
557 	seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
558 		   aura->ena, aura->pool_caching);
559 	seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
560 		   aura->pool_way_mask, aura->avg_con);
561 	seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
562 		   aura->pool_drop_ena, aura->aura_drop_ena);
563 	seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
564 		   aura->bp_ena, aura->aura_drop);
565 	seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
566 		   aura->shift, aura->avg_level);
567 
568 	seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
569 		   (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
570 
571 	seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
572 		   (u64)aura->limit, aura->bp, aura->fc_ena);
573 
574 	if (!is_rvu_otx2(rvu))
575 		seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
576 	seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
577 		   aura->fc_up_crossing, aura->fc_stype);
578 	seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
579 
580 	seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
581 
582 	seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
583 		   aura->pool_drop, aura->update_time);
584 	seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
585 		   aura->err_int, aura->err_int_ena);
586 	seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
587 		   aura->thresh_int, aura->thresh_int_ena);
588 	seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
589 		   aura->thresh_up, aura->thresh_qint_idx);
590 	seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
591 
592 	seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
593 	if (!is_rvu_otx2(rvu))
594 		seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
595 }
596 
597 /* Dumps given NPA Pool's context */
598 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
599 {
600 	struct npa_pool_s *pool = &rsp->pool;
601 	struct rvu *rvu = m->private;
602 
603 	seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
604 
605 	seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
606 		   pool->ena, pool->nat_align);
607 	seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
608 		   pool->stack_caching, pool->stack_way_mask);
609 	seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
610 		   pool->buf_offset, pool->buf_size);
611 
612 	seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
613 		   pool->stack_max_pages, pool->stack_pages);
614 
615 	seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
616 
617 	seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
618 		   pool->stack_offset, pool->shift, pool->avg_level);
619 	seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
620 		   pool->avg_con, pool->fc_ena, pool->fc_stype);
621 	seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
622 		   pool->fc_hyst_bits, pool->fc_up_crossing);
623 	if (!is_rvu_otx2(rvu))
624 		seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
625 	seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
626 
627 	seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
628 
629 	seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
630 
631 	seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
632 
633 	seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
634 		   pool->err_int, pool->err_int_ena);
635 	seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
636 	seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
637 		   pool->thresh_int_ena, pool->thresh_up);
638 	seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
639 		   pool->thresh_qint_idx, pool->err_qint_idx);
640 	if (!is_rvu_otx2(rvu))
641 		seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
642 }
643 
644 /* Reads aura/pool's ctx from admin queue */
645 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
646 {
647 	void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
648 	struct npa_aq_enq_req aq_req;
649 	struct npa_aq_enq_rsp rsp;
650 	struct rvu_pfvf *pfvf;
651 	int aura, rc, max_id;
652 	int npalf, id, all;
653 	struct rvu *rvu;
654 	u16 pcifunc;
655 
656 	rvu = m->private;
657 
658 	switch (ctype) {
659 	case NPA_AQ_CTYPE_AURA:
660 		npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
661 		id = rvu->rvu_dbg.npa_aura_ctx.id;
662 		all = rvu->rvu_dbg.npa_aura_ctx.all;
663 		break;
664 
665 	case NPA_AQ_CTYPE_POOL:
666 		npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
667 		id = rvu->rvu_dbg.npa_pool_ctx.id;
668 		all = rvu->rvu_dbg.npa_pool_ctx.all;
669 		break;
670 	default:
671 		return -EINVAL;
672 	}
673 
674 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
675 		return -EINVAL;
676 
677 	pfvf = rvu_get_pfvf(rvu, pcifunc);
678 	if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
679 		seq_puts(m, "Aura context is not initialized\n");
680 		return -EINVAL;
681 	} else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
682 		seq_puts(m, "Pool context is not initialized\n");
683 		return -EINVAL;
684 	}
685 
686 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
687 	aq_req.hdr.pcifunc = pcifunc;
688 	aq_req.ctype = ctype;
689 	aq_req.op = NPA_AQ_INSTOP_READ;
690 	if (ctype == NPA_AQ_CTYPE_AURA) {
691 		max_id = pfvf->aura_ctx->qsize;
692 		print_npa_ctx = print_npa_aura_ctx;
693 	} else {
694 		max_id = pfvf->pool_ctx->qsize;
695 		print_npa_ctx = print_npa_pool_ctx;
696 	}
697 
698 	if (id < 0 || id >= max_id) {
699 		seq_printf(m, "Invalid %s, valid range is 0-%d\n",
700 			   (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
701 			max_id - 1);
702 		return -EINVAL;
703 	}
704 
705 	if (all)
706 		id = 0;
707 	else
708 		max_id = id + 1;
709 
710 	for (aura = id; aura < max_id; aura++) {
711 		aq_req.aura_id = aura;
712 		seq_printf(m, "======%s : %d=======\n",
713 			   (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
714 			aq_req.aura_id);
715 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
716 		if (rc) {
717 			seq_puts(m, "Failed to read context\n");
718 			return -EINVAL;
719 		}
720 		print_npa_ctx(m, &rsp);
721 	}
722 	return 0;
723 }
724 
725 static int write_npa_ctx(struct rvu *rvu, bool all,
726 			 int npalf, int id, int ctype)
727 {
728 	struct rvu_pfvf *pfvf;
729 	int max_id = 0;
730 	u16 pcifunc;
731 
732 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
733 		return -EINVAL;
734 
735 	pfvf = rvu_get_pfvf(rvu, pcifunc);
736 
737 	if (ctype == NPA_AQ_CTYPE_AURA) {
738 		if (!pfvf->aura_ctx) {
739 			dev_warn(rvu->dev, "Aura context is not initialized\n");
740 			return -EINVAL;
741 		}
742 		max_id = pfvf->aura_ctx->qsize;
743 	} else if (ctype == NPA_AQ_CTYPE_POOL) {
744 		if (!pfvf->pool_ctx) {
745 			dev_warn(rvu->dev, "Pool context is not initialized\n");
746 			return -EINVAL;
747 		}
748 		max_id = pfvf->pool_ctx->qsize;
749 	}
750 
751 	if (id < 0 || id >= max_id) {
752 		dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
753 			 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
754 			max_id - 1);
755 		return -EINVAL;
756 	}
757 
758 	switch (ctype) {
759 	case NPA_AQ_CTYPE_AURA:
760 		rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
761 		rvu->rvu_dbg.npa_aura_ctx.id = id;
762 		rvu->rvu_dbg.npa_aura_ctx.all = all;
763 		break;
764 
765 	case NPA_AQ_CTYPE_POOL:
766 		rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
767 		rvu->rvu_dbg.npa_pool_ctx.id = id;
768 		rvu->rvu_dbg.npa_pool_ctx.all = all;
769 		break;
770 	default:
771 		return -EINVAL;
772 	}
773 	return 0;
774 }
775 
776 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
777 				const char __user *buffer, int *npalf,
778 				int *id, bool *all)
779 {
780 	int bytes_not_copied;
781 	char *cmd_buf_tmp;
782 	char *subtoken;
783 	int ret;
784 
785 	bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
786 	if (bytes_not_copied)
787 		return -EFAULT;
788 
789 	cmd_buf[*count] = '\0';
790 	cmd_buf_tmp = strchr(cmd_buf, '\n');
791 
792 	if (cmd_buf_tmp) {
793 		*cmd_buf_tmp = '\0';
794 		*count = cmd_buf_tmp - cmd_buf + 1;
795 	}
796 
797 	subtoken = strsep(&cmd_buf, " ");
798 	ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
799 	if (ret < 0)
800 		return ret;
801 	subtoken = strsep(&cmd_buf, " ");
802 	if (subtoken && strcmp(subtoken, "all") == 0) {
803 		*all = true;
804 	} else {
805 		ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
806 		if (ret < 0)
807 			return ret;
808 	}
809 	if (cmd_buf)
810 		return -EINVAL;
811 	return ret;
812 }
813 
814 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
815 				     const char __user *buffer,
816 				     size_t count, loff_t *ppos, int ctype)
817 {
818 	char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
819 					"aura" : "pool";
820 	struct seq_file *seqfp = filp->private_data;
821 	struct rvu *rvu = seqfp->private;
822 	int npalf, id = 0, ret;
823 	bool all = false;
824 
825 	if ((*ppos != 0) || !count)
826 		return -EINVAL;
827 
828 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
829 	if (!cmd_buf)
830 		return count;
831 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
832 				   &npalf, &id, &all);
833 	if (ret < 0) {
834 		dev_info(rvu->dev,
835 			 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
836 			 ctype_string, ctype_string);
837 		goto done;
838 	} else {
839 		ret = write_npa_ctx(rvu, all, npalf, id, ctype);
840 	}
841 done:
842 	kfree(cmd_buf);
843 	return ret ? ret : count;
844 }
845 
846 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
847 					  const char __user *buffer,
848 					  size_t count, loff_t *ppos)
849 {
850 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
851 				     NPA_AQ_CTYPE_AURA);
852 }
853 
854 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
855 {
856 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
857 }
858 
859 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
860 
861 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
862 					  const char __user *buffer,
863 					  size_t count, loff_t *ppos)
864 {
865 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
866 				     NPA_AQ_CTYPE_POOL);
867 }
868 
869 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
870 {
871 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
872 }
873 
874 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
875 
876 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
877 			    int ctype, int transaction)
878 {
879 	u64 req, out_req, lat, cant_alloc;
880 	struct nix_hw *nix_hw;
881 	struct rvu *rvu;
882 	int port;
883 
884 	if (blk_addr == BLKADDR_NDC_NPA0) {
885 		rvu = s->private;
886 	} else {
887 		nix_hw = s->private;
888 		rvu = nix_hw->rvu;
889 	}
890 
891 	for (port = 0; port < NDC_MAX_PORT; port++) {
892 		req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
893 						(port, ctype, transaction));
894 		lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
895 						(port, ctype, transaction));
896 		out_req = rvu_read64(rvu, blk_addr,
897 				     NDC_AF_PORTX_RTX_RWX_OSTDN_PC
898 				     (port, ctype, transaction));
899 		cant_alloc = rvu_read64(rvu, blk_addr,
900 					NDC_AF_PORTX_RTX_CANT_ALLOC_PC
901 					(port, transaction));
902 		seq_printf(s, "\nPort:%d\n", port);
903 		seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
904 		seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
905 		seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
906 		seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
907 		seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
908 	}
909 }
910 
911 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
912 {
913 	seq_puts(s, "\n***** CACHE mode read stats *****\n");
914 	ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
915 	seq_puts(s, "\n***** CACHE mode write stats *****\n");
916 	ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
917 	seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
918 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
919 	seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
920 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
921 	return 0;
922 }
923 
924 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
925 {
926 	return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
927 }
928 
929 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
930 
931 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
932 {
933 	struct nix_hw *nix_hw;
934 	struct rvu *rvu;
935 	int bank, max_bank;
936 
937 	if (blk_addr == BLKADDR_NDC_NPA0) {
938 		rvu = s->private;
939 	} else {
940 		nix_hw = s->private;
941 		rvu = nix_hw->rvu;
942 	}
943 
944 	max_bank = NDC_MAX_BANK(rvu, blk_addr);
945 	for (bank = 0; bank < max_bank; bank++) {
946 		seq_printf(s, "BANK:%d\n", bank);
947 		seq_printf(s, "\tHits:\t%lld\n",
948 			   (u64)rvu_read64(rvu, blk_addr,
949 			   NDC_AF_BANKX_HIT_PC(bank)));
950 		seq_printf(s, "\tMiss:\t%lld\n",
951 			   (u64)rvu_read64(rvu, blk_addr,
952 			    NDC_AF_BANKX_MISS_PC(bank)));
953 	}
954 	return 0;
955 }
956 
957 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
958 {
959 	struct nix_hw *nix_hw = filp->private;
960 	int blkaddr = 0;
961 	int ndc_idx = 0;
962 
963 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
964 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
965 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
966 
967 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
968 }
969 
970 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
971 
972 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
973 {
974 	struct nix_hw *nix_hw = filp->private;
975 	int blkaddr = 0;
976 	int ndc_idx = 0;
977 
978 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
979 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
980 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
981 
982 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
983 }
984 
985 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
986 
987 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
988 					     void *unused)
989 {
990 	return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
991 }
992 
993 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
994 
995 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
996 						void *unused)
997 {
998 	struct nix_hw *nix_hw = filp->private;
999 	int ndc_idx = NPA0_U;
1000 	int blkaddr = 0;
1001 
1002 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1003 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1004 
1005 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1006 }
1007 
1008 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1009 
1010 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1011 						void *unused)
1012 {
1013 	struct nix_hw *nix_hw = filp->private;
1014 	int ndc_idx = NPA0_U;
1015 	int blkaddr = 0;
1016 
1017 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1018 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1019 
1020 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1021 }
1022 
1023 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1024 
1025 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1026 				   struct nix_cn10k_sq_ctx_s *sq_ctx)
1027 {
1028 	seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1029 		   sq_ctx->ena, sq_ctx->qint_idx);
1030 	seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1031 		   sq_ctx->substream, sq_ctx->sdp_mcast);
1032 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1033 		   sq_ctx->cq, sq_ctx->sqe_way_mask);
1034 
1035 	seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1036 		   sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1037 	seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1038 		   sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1039 	seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1040 		   sq_ctx->default_chan, sq_ctx->sqb_count);
1041 
1042 	seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1043 	seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1044 	seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1045 		   sq_ctx->sqb_aura, sq_ctx->sq_int);
1046 	seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1047 		   sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1048 
1049 	seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1050 		   sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1051 	seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1052 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1053 	seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1054 		   sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1055 	seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1056 		   sq_ctx->tail_offset, sq_ctx->smenq_offset);
1057 	seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1058 		   sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1059 
1060 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1061 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1062 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1063 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1064 		   sq_ctx->smenq_next_sqb);
1065 
1066 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1067 
1068 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1069 	seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1070 		   sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1071 	seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1072 		   sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1073 	seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1074 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1075 
1076 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1077 		   (u64)sq_ctx->scm_lso_rem);
1078 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1079 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1080 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1081 		   (u64)sq_ctx->dropped_octs);
1082 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1083 		   (u64)sq_ctx->dropped_pkts);
1084 }
1085 
1086 /* Dumps given nix_sq's context */
1087 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1088 {
1089 	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1090 	struct nix_hw *nix_hw = m->private;
1091 	struct rvu *rvu = nix_hw->rvu;
1092 
1093 	if (!is_rvu_otx2(rvu)) {
1094 		print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1095 		return;
1096 	}
1097 	seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1098 		   sq_ctx->sqe_way_mask, sq_ctx->cq);
1099 	seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1100 		   sq_ctx->sdp_mcast, sq_ctx->substream);
1101 	seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1102 		   sq_ctx->qint_idx, sq_ctx->ena);
1103 
1104 	seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1105 		   sq_ctx->sqb_count, sq_ctx->default_chan);
1106 	seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1107 		   sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1108 	seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1109 		   sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1110 
1111 	seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1112 		   sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1113 	seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1114 		   sq_ctx->sq_int, sq_ctx->sqb_aura);
1115 	seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1116 
1117 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1118 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1119 	seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1120 		   sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1121 	seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1122 		   sq_ctx->smenq_offset, sq_ctx->tail_offset);
1123 	seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1124 		   sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1125 	seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1126 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1127 	seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1128 		   sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1129 
1130 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1131 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1132 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1133 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1134 		   sq_ctx->smenq_next_sqb);
1135 
1136 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1137 
1138 	seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1139 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1140 	seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1141 		   sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1142 	seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1143 		   sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1144 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1145 
1146 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1147 		   (u64)sq_ctx->scm_lso_rem);
1148 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1149 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1150 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1151 		   (u64)sq_ctx->dropped_octs);
1152 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1153 		   (u64)sq_ctx->dropped_pkts);
1154 }
1155 
1156 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1157 				   struct nix_cn10k_rq_ctx_s *rq_ctx)
1158 {
1159 	seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1160 		   rq_ctx->ena, rq_ctx->sso_ena);
1161 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1162 		   rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1163 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1164 		   rq_ctx->cq, rq_ctx->lenerr_dis);
1165 	seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1166 		   rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1167 	seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1168 		   rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1169 	seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1170 		   rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1171 	seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1172 
1173 	seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1174 		   rq_ctx->spb_aura, rq_ctx->lpb_aura);
1175 	seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1176 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1177 		   rq_ctx->sso_grp, rq_ctx->sso_tt);
1178 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1179 		   rq_ctx->pb_caching, rq_ctx->wqe_caching);
1180 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1181 		   rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1182 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1183 		   rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1184 	seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1185 		   rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1186 
1187 	seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1188 	seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1189 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1190 	seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1191 		   rq_ctx->wqe_skip, rq_ctx->spb_ena);
1192 	seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1193 		   rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1194 	seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1195 		   rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1196 	seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1197 		   rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1198 
1199 	seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1200 		   rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1201 	seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1202 		   rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1203 	seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1204 		   rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1205 	seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1206 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1207 
1208 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1209 		   rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1210 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1211 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1212 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1213 		   rq_ctx->rq_int, rq_ctx->rq_int_ena);
1214 	seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1215 
1216 	seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1217 		   rq_ctx->ltag, rq_ctx->good_utag);
1218 	seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1219 		   rq_ctx->bad_utag, rq_ctx->flow_tagw);
1220 	seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1221 		   rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1222 	seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1223 		   rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1224 	seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1225 
1226 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1227 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1228 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1229 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1230 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1231 }
1232 
1233 /* Dumps given nix_rq's context */
1234 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1235 {
1236 	struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1237 	struct nix_hw *nix_hw = m->private;
1238 	struct rvu *rvu = nix_hw->rvu;
1239 
1240 	if (!is_rvu_otx2(rvu)) {
1241 		print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1242 		return;
1243 	}
1244 
1245 	seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1246 		   rq_ctx->wqe_aura, rq_ctx->substream);
1247 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1248 		   rq_ctx->cq, rq_ctx->ena_wqwd);
1249 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1250 		   rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1251 	seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1252 
1253 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1254 		   rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1255 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1256 		   rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1257 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1258 		   rq_ctx->pb_caching, rq_ctx->sso_tt);
1259 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1260 		   rq_ctx->sso_grp, rq_ctx->lpb_aura);
1261 	seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1262 
1263 	seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1264 		   rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1265 	seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1266 		   rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1267 	seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1268 		   rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1269 	seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1270 		   rq_ctx->spb_ena, rq_ctx->wqe_skip);
1271 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1272 
1273 	seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1274 		   rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1275 	seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1276 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1277 	seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1278 		   rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1279 	seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1280 		   rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1281 
1282 	seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1283 		   rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1284 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1285 		   rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1286 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1287 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1288 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1289 
1290 	seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1291 		   rq_ctx->flow_tagw, rq_ctx->bad_utag);
1292 	seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1293 		   rq_ctx->good_utag, rq_ctx->ltag);
1294 
1295 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1296 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1297 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1298 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1299 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1300 }
1301 
1302 /* Dumps given nix_cq's context */
1303 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1304 {
1305 	struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1306 
1307 	seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1308 
1309 	seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1310 	seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1311 		   cq_ctx->avg_con, cq_ctx->cint_idx);
1312 	seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1313 		   cq_ctx->cq_err, cq_ctx->qint_idx);
1314 	seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1315 		   cq_ctx->bpid, cq_ctx->bp_ena);
1316 
1317 	seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1318 		   cq_ctx->update_time, cq_ctx->avg_level);
1319 	seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1320 		   cq_ctx->head, cq_ctx->tail);
1321 
1322 	seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1323 		   cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1324 	seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1325 		   cq_ctx->qsize, cq_ctx->caching);
1326 	seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1327 		   cq_ctx->substream, cq_ctx->ena);
1328 	seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1329 		   cq_ctx->drop_ena, cq_ctx->drop);
1330 	seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1331 }
1332 
1333 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1334 					 void *unused, int ctype)
1335 {
1336 	void (*print_nix_ctx)(struct seq_file *filp,
1337 			      struct nix_aq_enq_rsp *rsp) = NULL;
1338 	struct nix_hw *nix_hw = filp->private;
1339 	struct rvu *rvu = nix_hw->rvu;
1340 	struct nix_aq_enq_req aq_req;
1341 	struct nix_aq_enq_rsp rsp;
1342 	char *ctype_string = NULL;
1343 	int qidx, rc, max_id = 0;
1344 	struct rvu_pfvf *pfvf;
1345 	int nixlf, id, all;
1346 	u16 pcifunc;
1347 
1348 	switch (ctype) {
1349 	case NIX_AQ_CTYPE_CQ:
1350 		nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1351 		id = rvu->rvu_dbg.nix_cq_ctx.id;
1352 		all = rvu->rvu_dbg.nix_cq_ctx.all;
1353 		break;
1354 
1355 	case NIX_AQ_CTYPE_SQ:
1356 		nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1357 		id = rvu->rvu_dbg.nix_sq_ctx.id;
1358 		all = rvu->rvu_dbg.nix_sq_ctx.all;
1359 		break;
1360 
1361 	case NIX_AQ_CTYPE_RQ:
1362 		nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1363 		id = rvu->rvu_dbg.nix_rq_ctx.id;
1364 		all = rvu->rvu_dbg.nix_rq_ctx.all;
1365 		break;
1366 
1367 	default:
1368 		return -EINVAL;
1369 	}
1370 
1371 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1372 		return -EINVAL;
1373 
1374 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1375 	if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1376 		seq_puts(filp, "SQ context is not initialized\n");
1377 		return -EINVAL;
1378 	} else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1379 		seq_puts(filp, "RQ context is not initialized\n");
1380 		return -EINVAL;
1381 	} else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1382 		seq_puts(filp, "CQ context is not initialized\n");
1383 		return -EINVAL;
1384 	}
1385 
1386 	if (ctype == NIX_AQ_CTYPE_SQ) {
1387 		max_id = pfvf->sq_ctx->qsize;
1388 		ctype_string = "sq";
1389 		print_nix_ctx = print_nix_sq_ctx;
1390 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1391 		max_id = pfvf->rq_ctx->qsize;
1392 		ctype_string = "rq";
1393 		print_nix_ctx = print_nix_rq_ctx;
1394 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1395 		max_id = pfvf->cq_ctx->qsize;
1396 		ctype_string = "cq";
1397 		print_nix_ctx = print_nix_cq_ctx;
1398 	}
1399 
1400 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1401 	aq_req.hdr.pcifunc = pcifunc;
1402 	aq_req.ctype = ctype;
1403 	aq_req.op = NIX_AQ_INSTOP_READ;
1404 	if (all)
1405 		id = 0;
1406 	else
1407 		max_id = id + 1;
1408 	for (qidx = id; qidx < max_id; qidx++) {
1409 		aq_req.qidx = qidx;
1410 		seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1411 			   ctype_string, nixlf, aq_req.qidx);
1412 		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1413 		if (rc) {
1414 			seq_puts(filp, "Failed to read the context\n");
1415 			return -EINVAL;
1416 		}
1417 		print_nix_ctx(filp, &rsp);
1418 	}
1419 	return 0;
1420 }
1421 
1422 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1423 			       int id, int ctype, char *ctype_string,
1424 			       struct seq_file *m)
1425 {
1426 	struct nix_hw *nix_hw = m->private;
1427 	struct rvu_pfvf *pfvf;
1428 	int max_id = 0;
1429 	u16 pcifunc;
1430 
1431 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1432 		return -EINVAL;
1433 
1434 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1435 
1436 	if (ctype == NIX_AQ_CTYPE_SQ) {
1437 		if (!pfvf->sq_ctx) {
1438 			dev_warn(rvu->dev, "SQ context is not initialized\n");
1439 			return -EINVAL;
1440 		}
1441 		max_id = pfvf->sq_ctx->qsize;
1442 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1443 		if (!pfvf->rq_ctx) {
1444 			dev_warn(rvu->dev, "RQ context is not initialized\n");
1445 			return -EINVAL;
1446 		}
1447 		max_id = pfvf->rq_ctx->qsize;
1448 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1449 		if (!pfvf->cq_ctx) {
1450 			dev_warn(rvu->dev, "CQ context is not initialized\n");
1451 			return -EINVAL;
1452 		}
1453 		max_id = pfvf->cq_ctx->qsize;
1454 	}
1455 
1456 	if (id < 0 || id >= max_id) {
1457 		dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1458 			 ctype_string, max_id - 1);
1459 		return -EINVAL;
1460 	}
1461 	switch (ctype) {
1462 	case NIX_AQ_CTYPE_CQ:
1463 		rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1464 		rvu->rvu_dbg.nix_cq_ctx.id = id;
1465 		rvu->rvu_dbg.nix_cq_ctx.all = all;
1466 		break;
1467 
1468 	case NIX_AQ_CTYPE_SQ:
1469 		rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1470 		rvu->rvu_dbg.nix_sq_ctx.id = id;
1471 		rvu->rvu_dbg.nix_sq_ctx.all = all;
1472 		break;
1473 
1474 	case NIX_AQ_CTYPE_RQ:
1475 		rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1476 		rvu->rvu_dbg.nix_rq_ctx.id = id;
1477 		rvu->rvu_dbg.nix_rq_ctx.all = all;
1478 		break;
1479 	default:
1480 		return -EINVAL;
1481 	}
1482 	return 0;
1483 }
1484 
1485 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
1486 					   const char __user *buffer,
1487 					   size_t count, loff_t *ppos,
1488 					   int ctype)
1489 {
1490 	struct seq_file *m = filp->private_data;
1491 	struct nix_hw *nix_hw = m->private;
1492 	struct rvu *rvu = nix_hw->rvu;
1493 	char *cmd_buf, *ctype_string;
1494 	int nixlf, id = 0, ret;
1495 	bool all = false;
1496 
1497 	if ((*ppos != 0) || !count)
1498 		return -EINVAL;
1499 
1500 	switch (ctype) {
1501 	case NIX_AQ_CTYPE_SQ:
1502 		ctype_string = "sq";
1503 		break;
1504 	case NIX_AQ_CTYPE_RQ:
1505 		ctype_string = "rq";
1506 		break;
1507 	case NIX_AQ_CTYPE_CQ:
1508 		ctype_string = "cq";
1509 		break;
1510 	default:
1511 		return -EINVAL;
1512 	}
1513 
1514 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1515 
1516 	if (!cmd_buf)
1517 		return count;
1518 
1519 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1520 				   &nixlf, &id, &all);
1521 	if (ret < 0) {
1522 		dev_info(rvu->dev,
1523 			 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
1524 			 ctype_string, ctype_string);
1525 		goto done;
1526 	} else {
1527 		ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
1528 					  ctype_string, m);
1529 	}
1530 done:
1531 	kfree(cmd_buf);
1532 	return ret ? ret : count;
1533 }
1534 
1535 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
1536 					const char __user *buffer,
1537 					size_t count, loff_t *ppos)
1538 {
1539 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1540 					    NIX_AQ_CTYPE_SQ);
1541 }
1542 
1543 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
1544 {
1545 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
1546 }
1547 
1548 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
1549 
1550 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
1551 					const char __user *buffer,
1552 					size_t count, loff_t *ppos)
1553 {
1554 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1555 					    NIX_AQ_CTYPE_RQ);
1556 }
1557 
1558 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
1559 {
1560 	return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
1561 }
1562 
1563 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
1564 
1565 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
1566 					const char __user *buffer,
1567 					size_t count, loff_t *ppos)
1568 {
1569 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1570 					    NIX_AQ_CTYPE_CQ);
1571 }
1572 
1573 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1574 {
1575 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1576 }
1577 
1578 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1579 
1580 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1581 				 unsigned long *bmap, char *qtype)
1582 {
1583 	char *buf;
1584 
1585 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1586 	if (!buf)
1587 		return;
1588 
1589 	bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1590 	seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1591 	seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1592 		   qtype, buf);
1593 	kfree(buf);
1594 }
1595 
1596 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1597 {
1598 	if (!pfvf->cq_ctx)
1599 		seq_puts(filp, "cq context is not initialized\n");
1600 	else
1601 		print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1602 				     "cq");
1603 
1604 	if (!pfvf->rq_ctx)
1605 		seq_puts(filp, "rq context is not initialized\n");
1606 	else
1607 		print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1608 				     "rq");
1609 
1610 	if (!pfvf->sq_ctx)
1611 		seq_puts(filp, "sq context is not initialized\n");
1612 	else
1613 		print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1614 				     "sq");
1615 }
1616 
1617 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1618 				       const char __user *buffer,
1619 				       size_t count, loff_t *ppos)
1620 {
1621 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1622 				   BLKTYPE_NIX);
1623 }
1624 
1625 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1626 {
1627 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1628 }
1629 
1630 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1631 
1632 static void print_band_prof_ctx(struct seq_file *m,
1633 				struct nix_bandprof_s *prof)
1634 {
1635 	char *str;
1636 
1637 	switch (prof->pc_mode) {
1638 	case NIX_RX_PC_MODE_VLAN:
1639 		str = "VLAN";
1640 		break;
1641 	case NIX_RX_PC_MODE_DSCP:
1642 		str = "DSCP";
1643 		break;
1644 	case NIX_RX_PC_MODE_GEN:
1645 		str = "Generic";
1646 		break;
1647 	case NIX_RX_PC_MODE_RSVD:
1648 		str = "Reserved";
1649 		break;
1650 	}
1651 	seq_printf(m, "W0: pc_mode\t\t%s\n", str);
1652 	str = (prof->icolor == 3) ? "Color blind" :
1653 		(prof->icolor == 0) ? "Green" :
1654 		(prof->icolor == 1) ? "Yellow" : "Red";
1655 	seq_printf(m, "W0: icolor\t\t%s\n", str);
1656 	seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
1657 	seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
1658 	seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
1659 	seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
1660 	seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
1661 	seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
1662 	seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
1663 	seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
1664 
1665 	seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
1666 	str = (prof->lmode == 0) ? "byte" : "packet";
1667 	seq_printf(m, "W1: lmode\t\t%s\n", str);
1668 	seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
1669 	seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
1670 	seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
1671 	seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
1672 	str = (prof->gc_action == 0) ? "PASS" :
1673 		(prof->gc_action == 1) ? "DROP" : "RED";
1674 	seq_printf(m, "W1: gc_action\t\t%s\n", str);
1675 	str = (prof->yc_action == 0) ? "PASS" :
1676 		(prof->yc_action == 1) ? "DROP" : "RED";
1677 	seq_printf(m, "W1: yc_action\t\t%s\n", str);
1678 	str = (prof->rc_action == 0) ? "PASS" :
1679 		(prof->rc_action == 1) ? "DROP" : "RED";
1680 	seq_printf(m, "W1: rc_action\t\t%s\n", str);
1681 	seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
1682 	seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
1683 	seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
1684 
1685 	seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
1686 	seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
1687 	seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
1688 	seq_printf(m, "W4: green_pkt_pass\t%lld\n",
1689 		   (u64)prof->green_pkt_pass);
1690 	seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
1691 		   (u64)prof->yellow_pkt_pass);
1692 	seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
1693 	seq_printf(m, "W7: green_octs_pass\t%lld\n",
1694 		   (u64)prof->green_octs_pass);
1695 	seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
1696 		   (u64)prof->yellow_octs_pass);
1697 	seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
1698 	seq_printf(m, "W10: green_pkt_drop\t%lld\n",
1699 		   (u64)prof->green_pkt_drop);
1700 	seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
1701 		   (u64)prof->yellow_pkt_drop);
1702 	seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
1703 	seq_printf(m, "W13: green_octs_drop\t%lld\n",
1704 		   (u64)prof->green_octs_drop);
1705 	seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
1706 		   (u64)prof->yellow_octs_drop);
1707 	seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
1708 	seq_puts(m, "==============================\n");
1709 }
1710 
1711 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
1712 {
1713 	struct nix_hw *nix_hw = m->private;
1714 	struct nix_cn10k_aq_enq_req aq_req;
1715 	struct nix_cn10k_aq_enq_rsp aq_rsp;
1716 	struct rvu *rvu = nix_hw->rvu;
1717 	struct nix_ipolicer *ipolicer;
1718 	int layer, prof_idx, idx, rc;
1719 	u16 pcifunc;
1720 	char *str;
1721 
1722 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1723 		if (layer == BAND_PROF_INVAL_LAYER)
1724 			continue;
1725 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1726 			(layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
1727 
1728 		seq_printf(m, "\n%s bandwidth profiles\n", str);
1729 		seq_puts(m, "=======================\n");
1730 
1731 		ipolicer = &nix_hw->ipolicer[layer];
1732 
1733 		for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
1734 			if (is_rsrc_free(&ipolicer->band_prof, idx))
1735 				continue;
1736 
1737 			prof_idx = (idx & 0x3FFF) | (layer << 14);
1738 			rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1739 						 0x00, NIX_AQ_CTYPE_BANDPROF,
1740 						 prof_idx);
1741 			if (rc) {
1742 				dev_err(rvu->dev,
1743 					"%s: Failed to fetch context of %s profile %d, err %d\n",
1744 					__func__, str, idx, rc);
1745 				return 0;
1746 			}
1747 			seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
1748 			pcifunc = ipolicer->pfvf_map[idx];
1749 			if (!(pcifunc & RVU_PFVF_FUNC_MASK))
1750 				seq_printf(m, "Allocated to :: PF %d\n",
1751 					   rvu_get_pf(pcifunc));
1752 			else
1753 				seq_printf(m, "Allocated to :: PF %d VF %d\n",
1754 					   rvu_get_pf(pcifunc),
1755 					   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1756 			print_band_prof_ctx(m, &aq_rsp.prof);
1757 		}
1758 	}
1759 	return 0;
1760 }
1761 
1762 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
1763 
1764 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
1765 {
1766 	struct nix_hw *nix_hw = m->private;
1767 	struct nix_ipolicer *ipolicer;
1768 	int layer;
1769 	char *str;
1770 
1771 	seq_puts(m, "\nBandwidth profile resource free count\n");
1772 	seq_puts(m, "=====================================\n");
1773 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1774 		if (layer == BAND_PROF_INVAL_LAYER)
1775 			continue;
1776 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1777 			(layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
1778 
1779 		ipolicer = &nix_hw->ipolicer[layer];
1780 		seq_printf(m, "%s :: Max: %4d  Free: %4d\n", str,
1781 			   ipolicer->band_prof.max,
1782 			   rvu_rsrc_free_count(&ipolicer->band_prof));
1783 	}
1784 	seq_puts(m, "=====================================\n");
1785 
1786 	return 0;
1787 }
1788 
1789 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
1790 
1791 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
1792 {
1793 	struct nix_hw *nix_hw;
1794 
1795 	if (!is_block_implemented(rvu->hw, blkaddr))
1796 		return;
1797 
1798 	if (blkaddr == BLKADDR_NIX0) {
1799 		rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1800 		nix_hw = &rvu->hw->nix[0];
1801 	} else {
1802 		rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
1803 						      rvu->rvu_dbg.root);
1804 		nix_hw = &rvu->hw->nix[1];
1805 	}
1806 
1807 	debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1808 			    &rvu_dbg_nix_sq_ctx_fops);
1809 	debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1810 			    &rvu_dbg_nix_rq_ctx_fops);
1811 	debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1812 			    &rvu_dbg_nix_cq_ctx_fops);
1813 	debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1814 			    &rvu_dbg_nix_ndc_tx_cache_fops);
1815 	debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1816 			    &rvu_dbg_nix_ndc_rx_cache_fops);
1817 	debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1818 			    &rvu_dbg_nix_ndc_tx_hits_miss_fops);
1819 	debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1820 			    &rvu_dbg_nix_ndc_rx_hits_miss_fops);
1821 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
1822 			    &rvu_dbg_nix_qsize_fops);
1823 	debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1824 			    &rvu_dbg_nix_band_prof_ctx_fops);
1825 	debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
1826 			    &rvu_dbg_nix_band_prof_rsrc_fops);
1827 }
1828 
1829 static void rvu_dbg_npa_init(struct rvu *rvu)
1830 {
1831 	rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
1832 
1833 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
1834 			    &rvu_dbg_npa_qsize_fops);
1835 	debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1836 			    &rvu_dbg_npa_aura_ctx_fops);
1837 	debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1838 			    &rvu_dbg_npa_pool_ctx_fops);
1839 	debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
1840 			    &rvu_dbg_npa_ndc_cache_fops);
1841 	debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
1842 			    &rvu_dbg_npa_ndc_hits_miss_fops);
1843 }
1844 
1845 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)				\
1846 	({								\
1847 		u64 cnt;						\
1848 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
1849 					     NIX_STATS_RX, &(cnt));	\
1850 		if (!err)						\
1851 			seq_printf(s, "%s: %llu\n", name, cnt);		\
1852 		cnt;							\
1853 	})
1854 
1855 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)			\
1856 	({								\
1857 		u64 cnt;						\
1858 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
1859 					  NIX_STATS_TX, &(cnt));	\
1860 		if (!err)						\
1861 			seq_printf(s, "%s: %llu\n", name, cnt);		\
1862 		cnt;							\
1863 	})
1864 
1865 static int cgx_print_stats(struct seq_file *s, int lmac_id)
1866 {
1867 	struct cgx_link_user_info linfo;
1868 	struct mac_ops *mac_ops;
1869 	void *cgxd = s->private;
1870 	u64 ucast, mcast, bcast;
1871 	int stat = 0, err = 0;
1872 	u64 tx_stat, rx_stat;
1873 	struct rvu *rvu;
1874 
1875 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
1876 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
1877 	if (!rvu)
1878 		return -ENODEV;
1879 
1880 	mac_ops = get_mac_ops(cgxd);
1881 
1882 	if (!mac_ops)
1883 		return 0;
1884 
1885 	/* Link status */
1886 	seq_puts(s, "\n=======Link Status======\n\n");
1887 	err = cgx_get_link_info(cgxd, lmac_id, &linfo);
1888 	if (err)
1889 		seq_puts(s, "Failed to read link status\n");
1890 	seq_printf(s, "\nLink is %s %d Mbps\n\n",
1891 		   linfo.link_up ? "UP" : "DOWN", linfo.speed);
1892 
1893 	/* Rx stats */
1894 	seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
1895 		   mac_ops->name);
1896 	ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
1897 	if (err)
1898 		return err;
1899 	mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
1900 	if (err)
1901 		return err;
1902 	bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
1903 	if (err)
1904 		return err;
1905 	seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
1906 	PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
1907 	if (err)
1908 		return err;
1909 	PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
1910 	if (err)
1911 		return err;
1912 	PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
1913 	if (err)
1914 		return err;
1915 
1916 	/* Tx stats */
1917 	seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
1918 		   mac_ops->name);
1919 	ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
1920 	if (err)
1921 		return err;
1922 	mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
1923 	if (err)
1924 		return err;
1925 	bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
1926 	if (err)
1927 		return err;
1928 	seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
1929 	PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
1930 	if (err)
1931 		return err;
1932 	PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
1933 	if (err)
1934 		return err;
1935 
1936 	/* Rx stats */
1937 	seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
1938 	while (stat < mac_ops->rx_stats_cnt) {
1939 		err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
1940 		if (err)
1941 			return err;
1942 		if (is_rvu_otx2(rvu))
1943 			seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
1944 				   rx_stat);
1945 		else
1946 			seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
1947 				   rx_stat);
1948 		stat++;
1949 	}
1950 
1951 	/* Tx stats */
1952 	stat = 0;
1953 	seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
1954 	while (stat < mac_ops->tx_stats_cnt) {
1955 		err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
1956 		if (err)
1957 			return err;
1958 
1959 	if (is_rvu_otx2(rvu))
1960 		seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
1961 			   tx_stat);
1962 	else
1963 		seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
1964 			   tx_stat);
1965 	stat++;
1966 	}
1967 
1968 	return err;
1969 }
1970 
1971 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
1972 {
1973 	struct dentry *current_dir;
1974 	char *buf;
1975 
1976 	current_dir = filp->file->f_path.dentry->d_parent;
1977 	buf = strrchr(current_dir->d_name.name, 'c');
1978 	if (!buf)
1979 		return -EINVAL;
1980 
1981 	return kstrtoint(buf + 1, 10, lmac_id);
1982 }
1983 
1984 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
1985 {
1986 	int lmac_id, err;
1987 
1988 	err = rvu_dbg_derive_lmacid(filp, &lmac_id);
1989 	if (!err)
1990 		return cgx_print_stats(filp, lmac_id);
1991 
1992 	return err;
1993 }
1994 
1995 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
1996 
1997 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
1998 {
1999 	struct pci_dev *pdev = NULL;
2000 	void *cgxd = s->private;
2001 	char *bcast, *mcast;
2002 	u16 index, domain;
2003 	u8 dmac[ETH_ALEN];
2004 	struct rvu *rvu;
2005 	u64 cfg, mac;
2006 	int pf;
2007 
2008 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2009 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2010 	if (!rvu)
2011 		return -ENODEV;
2012 
2013 	pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2014 	domain = 2;
2015 
2016 	pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2017 	if (!pdev)
2018 		return 0;
2019 
2020 	cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2021 	bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2022 	mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2023 
2024 	seq_puts(s,
2025 		 "PCI dev       RVUPF   BROADCAST  MULTICAST  FILTER-MODE\n");
2026 	seq_printf(s, "%s  PF%d  %9s  %9s",
2027 		   dev_name(&pdev->dev), pf, bcast, mcast);
2028 	if (cfg & CGX_DMAC_CAM_ACCEPT)
2029 		seq_printf(s, "%12s\n\n", "UNICAST");
2030 	else
2031 		seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2032 
2033 	seq_puts(s, "\nDMAC-INDEX  ADDRESS\n");
2034 
2035 	for (index = 0 ; index < 32 ; index++) {
2036 		cfg = cgx_read_dmac_entry(cgxd, index);
2037 		/* Display enabled dmac entries associated with current lmac */
2038 		if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2039 		    FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2040 			mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2041 			u64_to_ether_addr(mac, dmac);
2042 			seq_printf(s, "%7d     %pM\n", index, dmac);
2043 		}
2044 	}
2045 
2046 	return 0;
2047 }
2048 
2049 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2050 {
2051 	int err, lmac_id;
2052 
2053 	err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2054 	if (!err)
2055 		return cgx_print_dmac_flt(filp, lmac_id);
2056 
2057 	return err;
2058 }
2059 
2060 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2061 
2062 static void rvu_dbg_cgx_init(struct rvu *rvu)
2063 {
2064 	struct mac_ops *mac_ops;
2065 	unsigned long lmac_bmap;
2066 	int i, lmac_id;
2067 	char dname[20];
2068 	void *cgx;
2069 
2070 	if (!cgx_get_cgxcnt_max())
2071 		return;
2072 
2073 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2074 	if (!mac_ops)
2075 		return;
2076 
2077 	rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2078 						   rvu->rvu_dbg.root);
2079 
2080 	for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2081 		cgx = rvu_cgx_pdata(i, rvu);
2082 		if (!cgx)
2083 			continue;
2084 		lmac_bmap = cgx_get_lmac_bmap(cgx);
2085 		/* cgx debugfs dir */
2086 		sprintf(dname, "%s%d", mac_ops->name, i);
2087 		rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2088 						      rvu->rvu_dbg.cgx_root);
2089 
2090 		for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
2091 			/* lmac debugfs dir */
2092 			sprintf(dname, "lmac%d", lmac_id);
2093 			rvu->rvu_dbg.lmac =
2094 				debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2095 
2096 			debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2097 					    cgx, &rvu_dbg_cgx_stat_fops);
2098 			debugfs_create_file("mac_filter", 0600,
2099 					    rvu->rvu_dbg.lmac, cgx,
2100 					    &rvu_dbg_cgx_dmac_flt_fops);
2101 		}
2102 	}
2103 }
2104 
2105 /* NPC debugfs APIs */
2106 static void rvu_print_npc_mcam_info(struct seq_file *s,
2107 				    u16 pcifunc, int blkaddr)
2108 {
2109 	struct rvu *rvu = s->private;
2110 	int entry_acnt, entry_ecnt;
2111 	int cntr_acnt, cntr_ecnt;
2112 
2113 	rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2114 					  &entry_acnt, &entry_ecnt);
2115 	rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2116 					    &cntr_acnt, &cntr_ecnt);
2117 	if (!entry_acnt && !cntr_acnt)
2118 		return;
2119 
2120 	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2121 		seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2122 			   rvu_get_pf(pcifunc));
2123 	else
2124 		seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2125 			   rvu_get_pf(pcifunc),
2126 			   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2127 
2128 	if (entry_acnt) {
2129 		seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2130 		seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2131 	}
2132 	if (cntr_acnt) {
2133 		seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2134 		seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2135 	}
2136 }
2137 
2138 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2139 {
2140 	struct rvu *rvu = filp->private;
2141 	int pf, vf, numvfs, blkaddr;
2142 	struct npc_mcam *mcam;
2143 	u16 pcifunc, counters;
2144 	u64 cfg;
2145 
2146 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2147 	if (blkaddr < 0)
2148 		return -ENODEV;
2149 
2150 	mcam = &rvu->hw->mcam;
2151 	counters = rvu->hw->npc_counters;
2152 
2153 	seq_puts(filp, "\nNPC MCAM info:\n");
2154 	/* MCAM keywidth on receive and transmit sides */
2155 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2156 	cfg = (cfg >> 32) & 0x07;
2157 	seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2158 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2159 		   "224bits" : "448bits"));
2160 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2161 	cfg = (cfg >> 32) & 0x07;
2162 	seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2163 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2164 		   "224bits" : "448bits"));
2165 
2166 	mutex_lock(&mcam->lock);
2167 	/* MCAM entries */
2168 	seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2169 	seq_printf(filp, "\t\t Reserved \t: %d\n",
2170 		   mcam->total_entries - mcam->bmap_entries);
2171 	seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2172 
2173 	/* MCAM counters */
2174 	seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2175 	seq_printf(filp, "\t\t Reserved \t: %d\n",
2176 		   counters - mcam->counters.max);
2177 	seq_printf(filp, "\t\t Available \t: %d\n",
2178 		   rvu_rsrc_free_count(&mcam->counters));
2179 
2180 	if (mcam->bmap_entries == mcam->bmap_fcnt) {
2181 		mutex_unlock(&mcam->lock);
2182 		return 0;
2183 	}
2184 
2185 	seq_puts(filp, "\n\t\t Current allocation\n");
2186 	seq_puts(filp, "\t\t====================\n");
2187 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2188 		pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2189 		rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2190 
2191 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2192 		numvfs = (cfg >> 12) & 0xFF;
2193 		for (vf = 0; vf < numvfs; vf++) {
2194 			pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2195 			rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2196 		}
2197 	}
2198 
2199 	mutex_unlock(&mcam->lock);
2200 	return 0;
2201 }
2202 
2203 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2204 
2205 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2206 					     void *unused)
2207 {
2208 	struct rvu *rvu = filp->private;
2209 	struct npc_mcam *mcam;
2210 	int blkaddr;
2211 
2212 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2213 	if (blkaddr < 0)
2214 		return -ENODEV;
2215 
2216 	mcam = &rvu->hw->mcam;
2217 
2218 	seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2219 	seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2220 		   rvu_read64(rvu, blkaddr,
2221 			      NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2222 
2223 	return 0;
2224 }
2225 
2226 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2227 
2228 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2229 					struct rvu_npc_mcam_rule *rule)
2230 {
2231 	u8 bit;
2232 
2233 	for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2234 		seq_printf(s, "\t%s  ", npc_get_field_name(bit));
2235 		switch (bit) {
2236 		case NPC_DMAC:
2237 			seq_printf(s, "%pM ", rule->packet.dmac);
2238 			seq_printf(s, "mask %pM\n", rule->mask.dmac);
2239 			break;
2240 		case NPC_SMAC:
2241 			seq_printf(s, "%pM ", rule->packet.smac);
2242 			seq_printf(s, "mask %pM\n", rule->mask.smac);
2243 			break;
2244 		case NPC_ETYPE:
2245 			seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2246 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2247 			break;
2248 		case NPC_OUTER_VID:
2249 			seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2250 			seq_printf(s, "mask 0x%x\n",
2251 				   ntohs(rule->mask.vlan_tci));
2252 			break;
2253 		case NPC_TOS:
2254 			seq_printf(s, "%d ", rule->packet.tos);
2255 			seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2256 			break;
2257 		case NPC_SIP_IPV4:
2258 			seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2259 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2260 			break;
2261 		case NPC_DIP_IPV4:
2262 			seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2263 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2264 			break;
2265 		case NPC_SIP_IPV6:
2266 			seq_printf(s, "%pI6 ", rule->packet.ip6src);
2267 			seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2268 			break;
2269 		case NPC_DIP_IPV6:
2270 			seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2271 			seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2272 			break;
2273 		case NPC_SPORT_TCP:
2274 		case NPC_SPORT_UDP:
2275 		case NPC_SPORT_SCTP:
2276 			seq_printf(s, "%d ", ntohs(rule->packet.sport));
2277 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2278 			break;
2279 		case NPC_DPORT_TCP:
2280 		case NPC_DPORT_UDP:
2281 		case NPC_DPORT_SCTP:
2282 			seq_printf(s, "%d ", ntohs(rule->packet.dport));
2283 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2284 			break;
2285 		default:
2286 			seq_puts(s, "\n");
2287 			break;
2288 		}
2289 	}
2290 }
2291 
2292 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2293 					 struct rvu_npc_mcam_rule *rule)
2294 {
2295 	if (is_npc_intf_tx(rule->intf)) {
2296 		switch (rule->tx_action.op) {
2297 		case NIX_TX_ACTIONOP_DROP:
2298 			seq_puts(s, "\taction: Drop\n");
2299 			break;
2300 		case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2301 			seq_puts(s, "\taction: Unicast to default channel\n");
2302 			break;
2303 		case NIX_TX_ACTIONOP_UCAST_CHAN:
2304 			seq_printf(s, "\taction: Unicast to channel %d\n",
2305 				   rule->tx_action.index);
2306 			break;
2307 		case NIX_TX_ACTIONOP_MCAST:
2308 			seq_puts(s, "\taction: Multicast\n");
2309 			break;
2310 		case NIX_TX_ACTIONOP_DROP_VIOL:
2311 			seq_puts(s, "\taction: Lockdown Violation Drop\n");
2312 			break;
2313 		default:
2314 			break;
2315 		}
2316 	} else {
2317 		switch (rule->rx_action.op) {
2318 		case NIX_RX_ACTIONOP_DROP:
2319 			seq_puts(s, "\taction: Drop\n");
2320 			break;
2321 		case NIX_RX_ACTIONOP_UCAST:
2322 			seq_printf(s, "\taction: Direct to queue %d\n",
2323 				   rule->rx_action.index);
2324 			break;
2325 		case NIX_RX_ACTIONOP_RSS:
2326 			seq_puts(s, "\taction: RSS\n");
2327 			break;
2328 		case NIX_RX_ACTIONOP_UCAST_IPSEC:
2329 			seq_puts(s, "\taction: Unicast ipsec\n");
2330 			break;
2331 		case NIX_RX_ACTIONOP_MCAST:
2332 			seq_puts(s, "\taction: Multicast\n");
2333 			break;
2334 		default:
2335 			break;
2336 		}
2337 	}
2338 }
2339 
2340 static const char *rvu_dbg_get_intf_name(int intf)
2341 {
2342 	switch (intf) {
2343 	case NIX_INTFX_RX(0):
2344 		return "NIX0_RX";
2345 	case NIX_INTFX_RX(1):
2346 		return "NIX1_RX";
2347 	case NIX_INTFX_TX(0):
2348 		return "NIX0_TX";
2349 	case NIX_INTFX_TX(1):
2350 		return "NIX1_TX";
2351 	default:
2352 		break;
2353 	}
2354 
2355 	return "unknown";
2356 }
2357 
2358 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2359 {
2360 	struct rvu_npc_mcam_rule *iter;
2361 	struct rvu *rvu = s->private;
2362 	struct npc_mcam *mcam;
2363 	int pf, vf = -1;
2364 	bool enabled;
2365 	int blkaddr;
2366 	u16 target;
2367 	u64 hits;
2368 
2369 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2370 	if (blkaddr < 0)
2371 		return 0;
2372 
2373 	mcam = &rvu->hw->mcam;
2374 
2375 	mutex_lock(&mcam->lock);
2376 	list_for_each_entry(iter, &mcam->mcam_rules, list) {
2377 		pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2378 		seq_printf(s, "\n\tInstalled by: PF%d ", pf);
2379 
2380 		if (iter->owner & RVU_PFVF_FUNC_MASK) {
2381 			vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
2382 			seq_printf(s, "VF%d", vf);
2383 		}
2384 		seq_puts(s, "\n");
2385 
2386 		seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
2387 						    "RX" : "TX");
2388 		seq_printf(s, "\tinterface: %s\n",
2389 			   rvu_dbg_get_intf_name(iter->intf));
2390 		seq_printf(s, "\tmcam entry: %d\n", iter->entry);
2391 
2392 		rvu_dbg_npc_mcam_show_flows(s, iter);
2393 		if (is_npc_intf_rx(iter->intf)) {
2394 			target = iter->rx_action.pf_func;
2395 			pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2396 			seq_printf(s, "\tForward to: PF%d ", pf);
2397 
2398 			if (target & RVU_PFVF_FUNC_MASK) {
2399 				vf = (target & RVU_PFVF_FUNC_MASK) - 1;
2400 				seq_printf(s, "VF%d", vf);
2401 			}
2402 			seq_puts(s, "\n");
2403 		}
2404 
2405 		rvu_dbg_npc_mcam_show_action(s, iter);
2406 
2407 		enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
2408 		seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
2409 
2410 		if (!iter->has_cntr)
2411 			continue;
2412 		seq_printf(s, "\tcounter: %d\n", iter->cntr);
2413 
2414 		hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
2415 		seq_printf(s, "\thits: %lld\n", hits);
2416 	}
2417 	mutex_unlock(&mcam->lock);
2418 
2419 	return 0;
2420 }
2421 
2422 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
2423 
2424 static void rvu_dbg_npc_init(struct rvu *rvu)
2425 {
2426 	rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
2427 
2428 	debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
2429 			    &rvu_dbg_npc_mcam_info_fops);
2430 	debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
2431 			    &rvu_dbg_npc_mcam_rules_fops);
2432 	debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
2433 			    &rvu_dbg_npc_rx_miss_act_fops);
2434 }
2435 
2436 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
2437 {
2438 	struct cpt_ctx *ctx = filp->private;
2439 	u64 busy_sts = 0, free_sts = 0;
2440 	u32 e_min = 0, e_max = 0, e, i;
2441 	u16 max_ses, max_ies, max_aes;
2442 	struct rvu *rvu = ctx->rvu;
2443 	int blkaddr = ctx->blkaddr;
2444 	u64 reg;
2445 
2446 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2447 	max_ses = reg & 0xffff;
2448 	max_ies = (reg >> 16) & 0xffff;
2449 	max_aes = (reg >> 32) & 0xffff;
2450 
2451 	switch (eng_type) {
2452 	case CPT_AE_TYPE:
2453 		e_min = max_ses + max_ies;
2454 		e_max = max_ses + max_ies + max_aes;
2455 		break;
2456 	case CPT_SE_TYPE:
2457 		e_min = 0;
2458 		e_max = max_ses;
2459 		break;
2460 	case CPT_IE_TYPE:
2461 		e_min = max_ses;
2462 		e_max = max_ses + max_ies;
2463 		break;
2464 	default:
2465 		return -EINVAL;
2466 	}
2467 
2468 	for (e = e_min, i = 0; e < e_max; e++, i++) {
2469 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
2470 		if (reg & 0x1)
2471 			busy_sts |= 1ULL << i;
2472 
2473 		if (reg & 0x2)
2474 			free_sts |= 1ULL << i;
2475 	}
2476 	seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
2477 	seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
2478 
2479 	return 0;
2480 }
2481 
2482 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
2483 {
2484 	return cpt_eng_sts_display(filp, CPT_AE_TYPE);
2485 }
2486 
2487 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
2488 
2489 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
2490 {
2491 	return cpt_eng_sts_display(filp, CPT_SE_TYPE);
2492 }
2493 
2494 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
2495 
2496 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
2497 {
2498 	return cpt_eng_sts_display(filp, CPT_IE_TYPE);
2499 }
2500 
2501 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
2502 
2503 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
2504 {
2505 	struct cpt_ctx *ctx = filp->private;
2506 	u16 max_ses, max_ies, max_aes;
2507 	struct rvu *rvu = ctx->rvu;
2508 	int blkaddr = ctx->blkaddr;
2509 	u32 e_max, e;
2510 	u64 reg;
2511 
2512 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2513 	max_ses = reg & 0xffff;
2514 	max_ies = (reg >> 16) & 0xffff;
2515 	max_aes = (reg >> 32) & 0xffff;
2516 
2517 	e_max = max_ses + max_ies + max_aes;
2518 
2519 	seq_puts(filp, "===========================================\n");
2520 	for (e = 0; e < e_max; e++) {
2521 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
2522 		seq_printf(filp, "CPT Engine[%u] Group Enable   0x%02llx\n", e,
2523 			   reg & 0xff);
2524 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
2525 		seq_printf(filp, "CPT Engine[%u] Active Info    0x%llx\n", e,
2526 			   reg);
2527 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
2528 		seq_printf(filp, "CPT Engine[%u] Control        0x%llx\n", e,
2529 			   reg);
2530 		seq_puts(filp, "===========================================\n");
2531 	}
2532 	return 0;
2533 }
2534 
2535 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
2536 
2537 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
2538 {
2539 	struct cpt_ctx *ctx = filp->private;
2540 	int blkaddr = ctx->blkaddr;
2541 	struct rvu *rvu = ctx->rvu;
2542 	struct rvu_block *block;
2543 	struct rvu_hwinfo *hw;
2544 	u64 reg;
2545 	u32 lf;
2546 
2547 	hw = rvu->hw;
2548 	block = &hw->block[blkaddr];
2549 	if (!block->lf.bmap)
2550 		return -ENODEV;
2551 
2552 	seq_puts(filp, "===========================================\n");
2553 	for (lf = 0; lf < block->lf.max; lf++) {
2554 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
2555 		seq_printf(filp, "CPT Lf[%u] CTL          0x%llx\n", lf, reg);
2556 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
2557 		seq_printf(filp, "CPT Lf[%u] CTL2         0x%llx\n", lf, reg);
2558 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
2559 		seq_printf(filp, "CPT Lf[%u] PTR_CTL      0x%llx\n", lf, reg);
2560 		reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
2561 				(lf << block->lfshift));
2562 		seq_printf(filp, "CPT Lf[%u] CFG          0x%llx\n", lf, reg);
2563 		seq_puts(filp, "===========================================\n");
2564 	}
2565 	return 0;
2566 }
2567 
2568 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
2569 
2570 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
2571 {
2572 	struct cpt_ctx *ctx = filp->private;
2573 	struct rvu *rvu = ctx->rvu;
2574 	int blkaddr = ctx->blkaddr;
2575 	u64 reg0, reg1;
2576 
2577 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
2578 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
2579 	seq_printf(filp, "CPT_AF_FLTX_INT:       0x%llx 0x%llx\n", reg0, reg1);
2580 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
2581 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
2582 	seq_printf(filp, "CPT_AF_PSNX_EXE:       0x%llx 0x%llx\n", reg0, reg1);
2583 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
2584 	seq_printf(filp, "CPT_AF_PSNX_LF:        0x%llx\n", reg0);
2585 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
2586 	seq_printf(filp, "CPT_AF_RVU_INT:        0x%llx\n", reg0);
2587 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
2588 	seq_printf(filp, "CPT_AF_RAS_INT:        0x%llx\n", reg0);
2589 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
2590 	seq_printf(filp, "CPT_AF_EXE_ERR_INFO:   0x%llx\n", reg0);
2591 
2592 	return 0;
2593 }
2594 
2595 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
2596 
2597 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
2598 {
2599 	struct cpt_ctx *ctx = filp->private;
2600 	struct rvu *rvu = ctx->rvu;
2601 	int blkaddr = ctx->blkaddr;
2602 	u64 reg;
2603 
2604 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
2605 	seq_printf(filp, "CPT instruction requests   %llu\n", reg);
2606 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
2607 	seq_printf(filp, "CPT instruction latency    %llu\n", reg);
2608 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
2609 	seq_printf(filp, "CPT NCB read requests      %llu\n", reg);
2610 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
2611 	seq_printf(filp, "CPT NCB read latency       %llu\n", reg);
2612 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
2613 	seq_printf(filp, "CPT read requests caused by UC fills   %llu\n", reg);
2614 	reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
2615 	seq_printf(filp, "CPT active cycles pc       %llu\n", reg);
2616 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
2617 	seq_printf(filp, "CPT clock count pc         %llu\n", reg);
2618 
2619 	return 0;
2620 }
2621 
2622 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
2623 
2624 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
2625 {
2626 	struct cpt_ctx *ctx;
2627 
2628 	if (!is_block_implemented(rvu->hw, blkaddr))
2629 		return;
2630 
2631 	if (blkaddr == BLKADDR_CPT0) {
2632 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
2633 		ctx = &rvu->rvu_dbg.cpt_ctx[0];
2634 		ctx->blkaddr = BLKADDR_CPT0;
2635 		ctx->rvu = rvu;
2636 	} else {
2637 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
2638 						      rvu->rvu_dbg.root);
2639 		ctx = &rvu->rvu_dbg.cpt_ctx[1];
2640 		ctx->blkaddr = BLKADDR_CPT1;
2641 		ctx->rvu = rvu;
2642 	}
2643 
2644 	debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
2645 			    &rvu_dbg_cpt_pc_fops);
2646 	debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2647 			    &rvu_dbg_cpt_ae_sts_fops);
2648 	debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2649 			    &rvu_dbg_cpt_se_sts_fops);
2650 	debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2651 			    &rvu_dbg_cpt_ie_sts_fops);
2652 	debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
2653 			    &rvu_dbg_cpt_engines_info_fops);
2654 	debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
2655 			    &rvu_dbg_cpt_lfs_info_fops);
2656 	debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
2657 			    &rvu_dbg_cpt_err_info_fops);
2658 }
2659 
2660 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
2661 {
2662 	if (!is_rvu_otx2(rvu))
2663 		return "cn10k";
2664 	else
2665 		return "octeontx2";
2666 }
2667 
2668 void rvu_dbg_init(struct rvu *rvu)
2669 {
2670 	rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
2671 
2672 	debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
2673 			    &rvu_dbg_rsrc_status_fops);
2674 
2675 	if (!cgx_get_cgxcnt_max())
2676 		goto create;
2677 
2678 	if (is_rvu_otx2(rvu))
2679 		debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
2680 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2681 	else
2682 		debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
2683 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2684 
2685 create:
2686 	rvu_dbg_npa_init(rvu);
2687 	rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
2688 
2689 	rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
2690 	rvu_dbg_cgx_init(rvu);
2691 	rvu_dbg_npc_init(rvu);
2692 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
2693 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
2694 }
2695 
2696 void rvu_dbg_exit(struct rvu *rvu)
2697 {
2698 	debugfs_remove_recursive(rvu->rvu_dbg.root);
2699 }
2700 
2701 #endif /* CONFIG_DEBUG_FS */
2702