1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2019 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #ifdef CONFIG_DEBUG_FS
12 
13 #include <linux/fs.h>
14 #include <linux/debugfs.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 
18 #include "rvu_struct.h"
19 #include "rvu_reg.h"
20 #include "rvu.h"
21 #include "cgx.h"
22 #include "lmac_common.h"
23 #include "npc.h"
24 
25 #define DEBUGFS_DIR_NAME "octeontx2"
26 
27 enum {
28 	CGX_STAT0,
29 	CGX_STAT1,
30 	CGX_STAT2,
31 	CGX_STAT3,
32 	CGX_STAT4,
33 	CGX_STAT5,
34 	CGX_STAT6,
35 	CGX_STAT7,
36 	CGX_STAT8,
37 	CGX_STAT9,
38 	CGX_STAT10,
39 	CGX_STAT11,
40 	CGX_STAT12,
41 	CGX_STAT13,
42 	CGX_STAT14,
43 	CGX_STAT15,
44 	CGX_STAT16,
45 	CGX_STAT17,
46 	CGX_STAT18,
47 };
48 
49 /* NIX TX stats */
50 enum nix_stat_lf_tx {
51 	TX_UCAST	= 0x0,
52 	TX_BCAST	= 0x1,
53 	TX_MCAST	= 0x2,
54 	TX_DROP		= 0x3,
55 	TX_OCTS		= 0x4,
56 	TX_STATS_ENUM_LAST,
57 };
58 
59 /* NIX RX stats */
60 enum nix_stat_lf_rx {
61 	RX_OCTS		= 0x0,
62 	RX_UCAST	= 0x1,
63 	RX_BCAST	= 0x2,
64 	RX_MCAST	= 0x3,
65 	RX_DROP		= 0x4,
66 	RX_DROP_OCTS	= 0x5,
67 	RX_FCS		= 0x6,
68 	RX_ERR		= 0x7,
69 	RX_DRP_BCAST	= 0x8,
70 	RX_DRP_MCAST	= 0x9,
71 	RX_DRP_L3BCAST	= 0xa,
72 	RX_DRP_L3MCAST	= 0xb,
73 	RX_STATS_ENUM_LAST,
74 };
75 
76 static char *cgx_rx_stats_fields[] = {
77 	[CGX_STAT0]	= "Received packets",
78 	[CGX_STAT1]	= "Octets of received packets",
79 	[CGX_STAT2]	= "Received PAUSE packets",
80 	[CGX_STAT3]	= "Received PAUSE and control packets",
81 	[CGX_STAT4]	= "Filtered DMAC0 (NIX-bound) packets",
82 	[CGX_STAT5]	= "Filtered DMAC0 (NIX-bound) octets",
83 	[CGX_STAT6]	= "Packets dropped due to RX FIFO full",
84 	[CGX_STAT7]	= "Octets dropped due to RX FIFO full",
85 	[CGX_STAT8]	= "Error packets",
86 	[CGX_STAT9]	= "Filtered DMAC1 (NCSI-bound) packets",
87 	[CGX_STAT10]	= "Filtered DMAC1 (NCSI-bound) octets",
88 	[CGX_STAT11]	= "NCSI-bound packets dropped",
89 	[CGX_STAT12]	= "NCSI-bound octets dropped",
90 };
91 
92 static char *cgx_tx_stats_fields[] = {
93 	[CGX_STAT0]	= "Packets dropped due to excessive collisions",
94 	[CGX_STAT1]	= "Packets dropped due to excessive deferral",
95 	[CGX_STAT2]	= "Multiple collisions before successful transmission",
96 	[CGX_STAT3]	= "Single collisions before successful transmission",
97 	[CGX_STAT4]	= "Total octets sent on the interface",
98 	[CGX_STAT5]	= "Total frames sent on the interface",
99 	[CGX_STAT6]	= "Packets sent with an octet count < 64",
100 	[CGX_STAT7]	= "Packets sent with an octet count == 64",
101 	[CGX_STAT8]	= "Packets sent with an octet count of 65–127",
102 	[CGX_STAT9]	= "Packets sent with an octet count of 128-255",
103 	[CGX_STAT10]	= "Packets sent with an octet count of 256-511",
104 	[CGX_STAT11]	= "Packets sent with an octet count of 512-1023",
105 	[CGX_STAT12]	= "Packets sent with an octet count of 1024-1518",
106 	[CGX_STAT13]	= "Packets sent with an octet count of > 1518",
107 	[CGX_STAT14]	= "Packets sent to a broadcast DMAC",
108 	[CGX_STAT15]	= "Packets sent to the multicast DMAC",
109 	[CGX_STAT16]	= "Transmit underflow and were truncated",
110 	[CGX_STAT17]	= "Control/PAUSE packets sent",
111 };
112 
113 static char *rpm_rx_stats_fields[] = {
114 	"Octets of received packets",
115 	"Octets of received packets with out error",
116 	"Received packets with alignment errors",
117 	"Control/PAUSE packets received",
118 	"Packets received with Frame too long Errors",
119 	"Packets received with a1nrange length Errors",
120 	"Received packets",
121 	"Packets received with FrameCheckSequenceErrors",
122 	"Packets received with VLAN header",
123 	"Error packets",
124 	"Packets received with unicast DMAC",
125 	"Packets received with multicast DMAC",
126 	"Packets received with broadcast DMAC",
127 	"Dropped packets",
128 	"Total frames received on interface",
129 	"Packets received with an octet count < 64",
130 	"Packets received with an octet count == 64",
131 	"Packets received with an octet count of 65–127",
132 	"Packets received with an octet count of 128-255",
133 	"Packets received with an octet count of 256-511",
134 	"Packets received with an octet count of 512-1023",
135 	"Packets received with an octet count of 1024-1518",
136 	"Packets received with an octet count of > 1518",
137 	"Oversized Packets",
138 	"Jabber Packets",
139 	"Fragmented Packets",
140 	"CBFC(class based flow control) pause frames received for class 0",
141 	"CBFC pause frames received for class 1",
142 	"CBFC pause frames received for class 2",
143 	"CBFC pause frames received for class 3",
144 	"CBFC pause frames received for class 4",
145 	"CBFC pause frames received for class 5",
146 	"CBFC pause frames received for class 6",
147 	"CBFC pause frames received for class 7",
148 	"CBFC pause frames received for class 8",
149 	"CBFC pause frames received for class 9",
150 	"CBFC pause frames received for class 10",
151 	"CBFC pause frames received for class 11",
152 	"CBFC pause frames received for class 12",
153 	"CBFC pause frames received for class 13",
154 	"CBFC pause frames received for class 14",
155 	"CBFC pause frames received for class 15",
156 	"MAC control packets received",
157 };
158 
159 static char *rpm_tx_stats_fields[] = {
160 	"Total octets sent on the interface",
161 	"Total octets transmitted OK",
162 	"Control/Pause frames sent",
163 	"Total frames transmitted OK",
164 	"Total frames sent with VLAN header",
165 	"Error Packets",
166 	"Packets sent to unicast DMAC",
167 	"Packets sent to the multicast DMAC",
168 	"Packets sent to a broadcast DMAC",
169 	"Packets sent with an octet count == 64",
170 	"Packets sent with an octet count of 65–127",
171 	"Packets sent with an octet count of 128-255",
172 	"Packets sent with an octet count of 256-511",
173 	"Packets sent with an octet count of 512-1023",
174 	"Packets sent with an octet count of 1024-1518",
175 	"Packets sent with an octet count of > 1518",
176 	"CBFC(class based flow control) pause frames transmitted for class 0",
177 	"CBFC pause frames transmitted for class 1",
178 	"CBFC pause frames transmitted for class 2",
179 	"CBFC pause frames transmitted for class 3",
180 	"CBFC pause frames transmitted for class 4",
181 	"CBFC pause frames transmitted for class 5",
182 	"CBFC pause frames transmitted for class 6",
183 	"CBFC pause frames transmitted for class 7",
184 	"CBFC pause frames transmitted for class 8",
185 	"CBFC pause frames transmitted for class 9",
186 	"CBFC pause frames transmitted for class 10",
187 	"CBFC pause frames transmitted for class 11",
188 	"CBFC pause frames transmitted for class 12",
189 	"CBFC pause frames transmitted for class 13",
190 	"CBFC pause frames transmitted for class 14",
191 	"CBFC pause frames transmitted for class 15",
192 	"MAC control packets sent",
193 	"Total frames sent on the interface"
194 };
195 
196 enum cpt_eng_type {
197 	CPT_AE_TYPE = 1,
198 	CPT_SE_TYPE = 2,
199 	CPT_IE_TYPE = 3,
200 };
201 
202 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
203 						blk_addr, NDC_AF_CONST) & 0xFF)
204 
205 #define rvu_dbg_NULL NULL
206 #define rvu_dbg_open_NULL NULL
207 
208 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)	\
209 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
210 { \
211 	return single_open(file, rvu_dbg_##read_op, inode->i_private); \
212 } \
213 static const struct file_operations rvu_dbg_##name##_fops = { \
214 	.owner		= THIS_MODULE, \
215 	.open		= rvu_dbg_open_##name, \
216 	.read		= seq_read, \
217 	.write		= rvu_dbg_##write_op, \
218 	.llseek		= seq_lseek, \
219 	.release	= single_release, \
220 }
221 
222 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
223 static const struct file_operations rvu_dbg_##name##_fops = { \
224 	.owner = THIS_MODULE, \
225 	.open = simple_open, \
226 	.read = rvu_dbg_##read_op, \
227 	.write = rvu_dbg_##write_op \
228 }
229 
230 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
231 
232 /* Dumps current provisioning status of all RVU block LFs */
233 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
234 					  char __user *buffer,
235 					  size_t count, loff_t *ppos)
236 {
237 	int index, off = 0, flag = 0, go_back = 0, off_prev;
238 	struct rvu *rvu = filp->private_data;
239 	int lf, pf, vf, pcifunc;
240 	struct rvu_block block;
241 	int bytes_not_copied;
242 	int buf_size = 2048;
243 	char *buf;
244 
245 	/* don't allow partial reads */
246 	if (*ppos != 0)
247 		return 0;
248 
249 	buf = kzalloc(buf_size, GFP_KERNEL);
250 	if (!buf)
251 		return -ENOSPC;
252 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t");
253 	for (index = 0; index < BLK_COUNT; index++)
254 		if (strlen(rvu->hw->block[index].name))
255 			off +=	scnprintf(&buf[off], buf_size - 1 - off,
256 					  "%*s\t", (index - 1) * 2,
257 					  rvu->hw->block[index].name);
258 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
259 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
260 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
261 			pcifunc = pf << 10 | vf;
262 			if (!pcifunc)
263 				continue;
264 
265 			if (vf) {
266 				go_back = scnprintf(&buf[off],
267 						    buf_size - 1 - off,
268 						    "PF%d:VF%d\t\t", pf,
269 						    vf - 1);
270 			} else {
271 				go_back = scnprintf(&buf[off],
272 						    buf_size - 1 - off,
273 						    "PF%d\t\t", pf);
274 			}
275 
276 			off += go_back;
277 			for (index = 0; index < BLKTYPE_MAX; index++) {
278 				block = rvu->hw->block[index];
279 				if (!strlen(block.name))
280 					continue;
281 				off_prev = off;
282 				for (lf = 0; lf < block.lf.max; lf++) {
283 					if (block.fn_map[lf] != pcifunc)
284 						continue;
285 					flag = 1;
286 					off += scnprintf(&buf[off], buf_size - 1
287 							- off, "%3d,", lf);
288 				}
289 				if (flag && off_prev != off)
290 					off--;
291 				else
292 					go_back++;
293 				off += scnprintf(&buf[off], buf_size - 1 - off,
294 						"\t");
295 			}
296 			if (!flag)
297 				off -= go_back;
298 			else
299 				flag = 0;
300 			off--;
301 			off +=	scnprintf(&buf[off], buf_size - 1 - off, "\n");
302 		}
303 	}
304 
305 	bytes_not_copied = copy_to_user(buffer, buf, off);
306 	kfree(buf);
307 
308 	if (bytes_not_copied)
309 		return -EFAULT;
310 
311 	*ppos = off;
312 	return off;
313 }
314 
315 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
316 
317 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
318 {
319 	struct rvu *rvu = filp->private;
320 	struct pci_dev *pdev = NULL;
321 	struct mac_ops *mac_ops;
322 	int rvu_def_cgx_id = 0;
323 	char cgx[10], lmac[10];
324 	struct rvu_pfvf *pfvf;
325 	int pf, domain, blkid;
326 	u8 cgx_id, lmac_id;
327 	u16 pcifunc;
328 
329 	domain = 2;
330 	mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu));
331 	seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
332 		   mac_ops->name);
333 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
334 		if (!is_pf_cgxmapped(rvu, pf))
335 			continue;
336 
337 		pdev =  pci_get_domain_bus_and_slot(domain, pf + 1, 0);
338 		if (!pdev)
339 			continue;
340 
341 		cgx[0] = 0;
342 		lmac[0] = 0;
343 		pcifunc = pf << 10;
344 		pfvf = rvu_get_pfvf(rvu, pcifunc);
345 
346 		if (pfvf->nix_blkaddr == BLKADDR_NIX0)
347 			blkid = 0;
348 		else
349 			blkid = 1;
350 
351 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
352 				    &lmac_id);
353 		sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
354 		sprintf(lmac, "LMAC%d", lmac_id);
355 		seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
356 			   dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
357 	}
358 	return 0;
359 }
360 
361 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
362 
363 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
364 				u16 *pcifunc)
365 {
366 	struct rvu_block *block;
367 	struct rvu_hwinfo *hw;
368 
369 	hw = rvu->hw;
370 	block = &hw->block[blkaddr];
371 
372 	if (lf < 0 || lf >= block->lf.max) {
373 		dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
374 			 block->lf.max - 1);
375 		return false;
376 	}
377 
378 	*pcifunc = block->fn_map[lf];
379 	if (!*pcifunc) {
380 		dev_warn(rvu->dev,
381 			 "This LF is not attached to any RVU PFFUNC\n");
382 		return false;
383 	}
384 	return true;
385 }
386 
387 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
388 {
389 	char *buf;
390 
391 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
392 	if (!buf)
393 		return;
394 
395 	if (!pfvf->aura_ctx) {
396 		seq_puts(m, "Aura context is not initialized\n");
397 	} else {
398 		bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
399 					pfvf->aura_ctx->qsize);
400 		seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
401 		seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
402 	}
403 
404 	if (!pfvf->pool_ctx) {
405 		seq_puts(m, "Pool context is not initialized\n");
406 	} else {
407 		bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
408 					pfvf->pool_ctx->qsize);
409 		seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
410 		seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
411 	}
412 	kfree(buf);
413 }
414 
415 /* The 'qsize' entry dumps current Aura/Pool context Qsize
416  * and each context's current enable/disable status in a bitmap.
417  */
418 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
419 				 int blktype)
420 {
421 	void (*print_qsize)(struct seq_file *filp,
422 			    struct rvu_pfvf *pfvf) = NULL;
423 	struct dentry *current_dir;
424 	struct rvu_pfvf *pfvf;
425 	struct rvu *rvu;
426 	int qsize_id;
427 	u16 pcifunc;
428 	int blkaddr;
429 
430 	rvu = filp->private;
431 	switch (blktype) {
432 	case BLKTYPE_NPA:
433 		qsize_id = rvu->rvu_dbg.npa_qsize_id;
434 		print_qsize = print_npa_qsize;
435 		break;
436 
437 	case BLKTYPE_NIX:
438 		qsize_id = rvu->rvu_dbg.nix_qsize_id;
439 		print_qsize = print_nix_qsize;
440 		break;
441 
442 	default:
443 		return -EINVAL;
444 	}
445 
446 	if (blktype == BLKTYPE_NPA) {
447 		blkaddr = BLKADDR_NPA;
448 	} else {
449 		current_dir = filp->file->f_path.dentry->d_parent;
450 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
451 				   BLKADDR_NIX1 : BLKADDR_NIX0);
452 	}
453 
454 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
455 		return -EINVAL;
456 
457 	pfvf = rvu_get_pfvf(rvu, pcifunc);
458 	print_qsize(filp, pfvf);
459 
460 	return 0;
461 }
462 
463 static ssize_t rvu_dbg_qsize_write(struct file *filp,
464 				   const char __user *buffer, size_t count,
465 				   loff_t *ppos, int blktype)
466 {
467 	char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
468 	struct seq_file *seqfile = filp->private_data;
469 	char *cmd_buf, *cmd_buf_tmp, *subtoken;
470 	struct rvu *rvu = seqfile->private;
471 	struct dentry *current_dir;
472 	int blkaddr;
473 	u16 pcifunc;
474 	int ret, lf;
475 
476 	cmd_buf = memdup_user(buffer, count + 1);
477 	if (IS_ERR(cmd_buf))
478 		return -ENOMEM;
479 
480 	cmd_buf[count] = '\0';
481 
482 	cmd_buf_tmp = strchr(cmd_buf, '\n');
483 	if (cmd_buf_tmp) {
484 		*cmd_buf_tmp = '\0';
485 		count = cmd_buf_tmp - cmd_buf + 1;
486 	}
487 
488 	cmd_buf_tmp = cmd_buf;
489 	subtoken = strsep(&cmd_buf, " ");
490 	ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
491 	if (cmd_buf)
492 		ret = -EINVAL;
493 
494 	if (!strncmp(subtoken, "help", 4) || ret < 0) {
495 		dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
496 		goto qsize_write_done;
497 	}
498 
499 	if (blktype == BLKTYPE_NPA) {
500 		blkaddr = BLKADDR_NPA;
501 	} else {
502 		current_dir = filp->f_path.dentry->d_parent;
503 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
504 				   BLKADDR_NIX1 : BLKADDR_NIX0);
505 	}
506 
507 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
508 		ret = -EINVAL;
509 		goto qsize_write_done;
510 	}
511 	if (blktype  == BLKTYPE_NPA)
512 		rvu->rvu_dbg.npa_qsize_id = lf;
513 	else
514 		rvu->rvu_dbg.nix_qsize_id = lf;
515 
516 qsize_write_done:
517 	kfree(cmd_buf_tmp);
518 	return ret ? ret : count;
519 }
520 
521 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
522 				       const char __user *buffer,
523 				       size_t count, loff_t *ppos)
524 {
525 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
526 					    BLKTYPE_NPA);
527 }
528 
529 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
530 {
531 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
532 }
533 
534 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
535 
536 /* Dumps given NPA Aura's context */
537 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
538 {
539 	struct npa_aura_s *aura = &rsp->aura;
540 	struct rvu *rvu = m->private;
541 
542 	seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
543 
544 	seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
545 		   aura->ena, aura->pool_caching);
546 	seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
547 		   aura->pool_way_mask, aura->avg_con);
548 	seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
549 		   aura->pool_drop_ena, aura->aura_drop_ena);
550 	seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
551 		   aura->bp_ena, aura->aura_drop);
552 	seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
553 		   aura->shift, aura->avg_level);
554 
555 	seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
556 		   (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
557 
558 	seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
559 		   (u64)aura->limit, aura->bp, aura->fc_ena);
560 
561 	if (!is_rvu_otx2(rvu))
562 		seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
563 	seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
564 		   aura->fc_up_crossing, aura->fc_stype);
565 	seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
566 
567 	seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
568 
569 	seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
570 		   aura->pool_drop, aura->update_time);
571 	seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
572 		   aura->err_int, aura->err_int_ena);
573 	seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
574 		   aura->thresh_int, aura->thresh_int_ena);
575 	seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
576 		   aura->thresh_up, aura->thresh_qint_idx);
577 	seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
578 
579 	seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
580 	if (!is_rvu_otx2(rvu))
581 		seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
582 }
583 
584 /* Dumps given NPA Pool's context */
585 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
586 {
587 	struct npa_pool_s *pool = &rsp->pool;
588 	struct rvu *rvu = m->private;
589 
590 	seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
591 
592 	seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
593 		   pool->ena, pool->nat_align);
594 	seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
595 		   pool->stack_caching, pool->stack_way_mask);
596 	seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
597 		   pool->buf_offset, pool->buf_size);
598 
599 	seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
600 		   pool->stack_max_pages, pool->stack_pages);
601 
602 	seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
603 
604 	seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
605 		   pool->stack_offset, pool->shift, pool->avg_level);
606 	seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
607 		   pool->avg_con, pool->fc_ena, pool->fc_stype);
608 	seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
609 		   pool->fc_hyst_bits, pool->fc_up_crossing);
610 	if (!is_rvu_otx2(rvu))
611 		seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
612 	seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
613 
614 	seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
615 
616 	seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
617 
618 	seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
619 
620 	seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
621 		   pool->err_int, pool->err_int_ena);
622 	seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
623 	seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
624 		   pool->thresh_int_ena, pool->thresh_up);
625 	seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
626 		   pool->thresh_qint_idx, pool->err_qint_idx);
627 	if (!is_rvu_otx2(rvu))
628 		seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
629 }
630 
631 /* Reads aura/pool's ctx from admin queue */
632 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
633 {
634 	void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
635 	struct npa_aq_enq_req aq_req;
636 	struct npa_aq_enq_rsp rsp;
637 	struct rvu_pfvf *pfvf;
638 	int aura, rc, max_id;
639 	int npalf, id, all;
640 	struct rvu *rvu;
641 	u16 pcifunc;
642 
643 	rvu = m->private;
644 
645 	switch (ctype) {
646 	case NPA_AQ_CTYPE_AURA:
647 		npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
648 		id = rvu->rvu_dbg.npa_aura_ctx.id;
649 		all = rvu->rvu_dbg.npa_aura_ctx.all;
650 		break;
651 
652 	case NPA_AQ_CTYPE_POOL:
653 		npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
654 		id = rvu->rvu_dbg.npa_pool_ctx.id;
655 		all = rvu->rvu_dbg.npa_pool_ctx.all;
656 		break;
657 	default:
658 		return -EINVAL;
659 	}
660 
661 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
662 		return -EINVAL;
663 
664 	pfvf = rvu_get_pfvf(rvu, pcifunc);
665 	if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
666 		seq_puts(m, "Aura context is not initialized\n");
667 		return -EINVAL;
668 	} else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
669 		seq_puts(m, "Pool context is not initialized\n");
670 		return -EINVAL;
671 	}
672 
673 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
674 	aq_req.hdr.pcifunc = pcifunc;
675 	aq_req.ctype = ctype;
676 	aq_req.op = NPA_AQ_INSTOP_READ;
677 	if (ctype == NPA_AQ_CTYPE_AURA) {
678 		max_id = pfvf->aura_ctx->qsize;
679 		print_npa_ctx = print_npa_aura_ctx;
680 	} else {
681 		max_id = pfvf->pool_ctx->qsize;
682 		print_npa_ctx = print_npa_pool_ctx;
683 	}
684 
685 	if (id < 0 || id >= max_id) {
686 		seq_printf(m, "Invalid %s, valid range is 0-%d\n",
687 			   (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
688 			max_id - 1);
689 		return -EINVAL;
690 	}
691 
692 	if (all)
693 		id = 0;
694 	else
695 		max_id = id + 1;
696 
697 	for (aura = id; aura < max_id; aura++) {
698 		aq_req.aura_id = aura;
699 		seq_printf(m, "======%s : %d=======\n",
700 			   (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
701 			aq_req.aura_id);
702 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
703 		if (rc) {
704 			seq_puts(m, "Failed to read context\n");
705 			return -EINVAL;
706 		}
707 		print_npa_ctx(m, &rsp);
708 	}
709 	return 0;
710 }
711 
712 static int write_npa_ctx(struct rvu *rvu, bool all,
713 			 int npalf, int id, int ctype)
714 {
715 	struct rvu_pfvf *pfvf;
716 	int max_id = 0;
717 	u16 pcifunc;
718 
719 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
720 		return -EINVAL;
721 
722 	pfvf = rvu_get_pfvf(rvu, pcifunc);
723 
724 	if (ctype == NPA_AQ_CTYPE_AURA) {
725 		if (!pfvf->aura_ctx) {
726 			dev_warn(rvu->dev, "Aura context is not initialized\n");
727 			return -EINVAL;
728 		}
729 		max_id = pfvf->aura_ctx->qsize;
730 	} else if (ctype == NPA_AQ_CTYPE_POOL) {
731 		if (!pfvf->pool_ctx) {
732 			dev_warn(rvu->dev, "Pool context is not initialized\n");
733 			return -EINVAL;
734 		}
735 		max_id = pfvf->pool_ctx->qsize;
736 	}
737 
738 	if (id < 0 || id >= max_id) {
739 		dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
740 			 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
741 			max_id - 1);
742 		return -EINVAL;
743 	}
744 
745 	switch (ctype) {
746 	case NPA_AQ_CTYPE_AURA:
747 		rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
748 		rvu->rvu_dbg.npa_aura_ctx.id = id;
749 		rvu->rvu_dbg.npa_aura_ctx.all = all;
750 		break;
751 
752 	case NPA_AQ_CTYPE_POOL:
753 		rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
754 		rvu->rvu_dbg.npa_pool_ctx.id = id;
755 		rvu->rvu_dbg.npa_pool_ctx.all = all;
756 		break;
757 	default:
758 		return -EINVAL;
759 	}
760 	return 0;
761 }
762 
763 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
764 				const char __user *buffer, int *npalf,
765 				int *id, bool *all)
766 {
767 	int bytes_not_copied;
768 	char *cmd_buf_tmp;
769 	char *subtoken;
770 	int ret;
771 
772 	bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
773 	if (bytes_not_copied)
774 		return -EFAULT;
775 
776 	cmd_buf[*count] = '\0';
777 	cmd_buf_tmp = strchr(cmd_buf, '\n');
778 
779 	if (cmd_buf_tmp) {
780 		*cmd_buf_tmp = '\0';
781 		*count = cmd_buf_tmp - cmd_buf + 1;
782 	}
783 
784 	subtoken = strsep(&cmd_buf, " ");
785 	ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
786 	if (ret < 0)
787 		return ret;
788 	subtoken = strsep(&cmd_buf, " ");
789 	if (subtoken && strcmp(subtoken, "all") == 0) {
790 		*all = true;
791 	} else {
792 		ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
793 		if (ret < 0)
794 			return ret;
795 	}
796 	if (cmd_buf)
797 		return -EINVAL;
798 	return ret;
799 }
800 
801 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
802 				     const char __user *buffer,
803 				     size_t count, loff_t *ppos, int ctype)
804 {
805 	char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
806 					"aura" : "pool";
807 	struct seq_file *seqfp = filp->private_data;
808 	struct rvu *rvu = seqfp->private;
809 	int npalf, id = 0, ret;
810 	bool all = false;
811 
812 	if ((*ppos != 0) || !count)
813 		return -EINVAL;
814 
815 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
816 	if (!cmd_buf)
817 		return count;
818 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
819 				   &npalf, &id, &all);
820 	if (ret < 0) {
821 		dev_info(rvu->dev,
822 			 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
823 			 ctype_string, ctype_string);
824 		goto done;
825 	} else {
826 		ret = write_npa_ctx(rvu, all, npalf, id, ctype);
827 	}
828 done:
829 	kfree(cmd_buf);
830 	return ret ? ret : count;
831 }
832 
833 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
834 					  const char __user *buffer,
835 					  size_t count, loff_t *ppos)
836 {
837 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
838 				     NPA_AQ_CTYPE_AURA);
839 }
840 
841 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
842 {
843 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
844 }
845 
846 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
847 
848 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
849 					  const char __user *buffer,
850 					  size_t count, loff_t *ppos)
851 {
852 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
853 				     NPA_AQ_CTYPE_POOL);
854 }
855 
856 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
857 {
858 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
859 }
860 
861 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
862 
863 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
864 			    int ctype, int transaction)
865 {
866 	u64 req, out_req, lat, cant_alloc;
867 	struct nix_hw *nix_hw;
868 	struct rvu *rvu;
869 	int port;
870 
871 	if (blk_addr == BLKADDR_NDC_NPA0) {
872 		rvu = s->private;
873 	} else {
874 		nix_hw = s->private;
875 		rvu = nix_hw->rvu;
876 	}
877 
878 	for (port = 0; port < NDC_MAX_PORT; port++) {
879 		req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
880 						(port, ctype, transaction));
881 		lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
882 						(port, ctype, transaction));
883 		out_req = rvu_read64(rvu, blk_addr,
884 				     NDC_AF_PORTX_RTX_RWX_OSTDN_PC
885 				     (port, ctype, transaction));
886 		cant_alloc = rvu_read64(rvu, blk_addr,
887 					NDC_AF_PORTX_RTX_CANT_ALLOC_PC
888 					(port, transaction));
889 		seq_printf(s, "\nPort:%d\n", port);
890 		seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
891 		seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
892 		seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
893 		seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
894 		seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
895 	}
896 }
897 
898 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
899 {
900 	seq_puts(s, "\n***** CACHE mode read stats *****\n");
901 	ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
902 	seq_puts(s, "\n***** CACHE mode write stats *****\n");
903 	ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
904 	seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
905 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
906 	seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
907 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
908 	return 0;
909 }
910 
911 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
912 {
913 	return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
914 }
915 
916 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
917 
918 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
919 {
920 	struct nix_hw *nix_hw;
921 	struct rvu *rvu;
922 	int bank, max_bank;
923 
924 	if (blk_addr == BLKADDR_NDC_NPA0) {
925 		rvu = s->private;
926 	} else {
927 		nix_hw = s->private;
928 		rvu = nix_hw->rvu;
929 	}
930 
931 	max_bank = NDC_MAX_BANK(rvu, blk_addr);
932 	for (bank = 0; bank < max_bank; bank++) {
933 		seq_printf(s, "BANK:%d\n", bank);
934 		seq_printf(s, "\tHits:\t%lld\n",
935 			   (u64)rvu_read64(rvu, blk_addr,
936 			   NDC_AF_BANKX_HIT_PC(bank)));
937 		seq_printf(s, "\tMiss:\t%lld\n",
938 			   (u64)rvu_read64(rvu, blk_addr,
939 			    NDC_AF_BANKX_MISS_PC(bank)));
940 	}
941 	return 0;
942 }
943 
944 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
945 {
946 	struct nix_hw *nix_hw = filp->private;
947 	int blkaddr = 0;
948 	int ndc_idx = 0;
949 
950 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
951 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
952 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
953 
954 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
955 }
956 
957 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
958 
959 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
960 {
961 	struct nix_hw *nix_hw = filp->private;
962 	int blkaddr = 0;
963 	int ndc_idx = 0;
964 
965 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
966 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
967 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
968 
969 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
970 }
971 
972 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
973 
974 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
975 					     void *unused)
976 {
977 	return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
978 }
979 
980 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
981 
982 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
983 						void *unused)
984 {
985 	struct nix_hw *nix_hw = filp->private;
986 	int ndc_idx = NPA0_U;
987 	int blkaddr = 0;
988 
989 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
990 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
991 
992 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
993 }
994 
995 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
996 
997 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
998 						void *unused)
999 {
1000 	struct nix_hw *nix_hw = filp->private;
1001 	int ndc_idx = NPA0_U;
1002 	int blkaddr = 0;
1003 
1004 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1005 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1006 
1007 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1008 }
1009 
1010 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1011 
1012 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1013 				   struct nix_cn10k_sq_ctx_s *sq_ctx)
1014 {
1015 	seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1016 		   sq_ctx->ena, sq_ctx->qint_idx);
1017 	seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1018 		   sq_ctx->substream, sq_ctx->sdp_mcast);
1019 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1020 		   sq_ctx->cq, sq_ctx->sqe_way_mask);
1021 
1022 	seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1023 		   sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1024 	seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1025 		   sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1026 	seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1027 		   sq_ctx->default_chan, sq_ctx->sqb_count);
1028 
1029 	seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1030 	seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1031 	seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1032 		   sq_ctx->sqb_aura, sq_ctx->sq_int);
1033 	seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1034 		   sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1035 
1036 	seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1037 		   sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1038 	seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1039 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1040 	seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1041 		   sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1042 	seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1043 		   sq_ctx->tail_offset, sq_ctx->smenq_offset);
1044 	seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1045 		   sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1046 
1047 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1048 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1049 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1050 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1051 		   sq_ctx->smenq_next_sqb);
1052 
1053 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1054 
1055 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1056 	seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1057 		   sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1058 	seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1059 		   sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1060 	seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1061 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1062 
1063 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1064 		   (u64)sq_ctx->scm_lso_rem);
1065 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1066 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1067 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1068 		   (u64)sq_ctx->dropped_octs);
1069 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1070 		   (u64)sq_ctx->dropped_pkts);
1071 }
1072 
1073 /* Dumps given nix_sq's context */
1074 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1075 {
1076 	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1077 	struct nix_hw *nix_hw = m->private;
1078 	struct rvu *rvu = nix_hw->rvu;
1079 
1080 	if (!is_rvu_otx2(rvu)) {
1081 		print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1082 		return;
1083 	}
1084 	seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1085 		   sq_ctx->sqe_way_mask, sq_ctx->cq);
1086 	seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1087 		   sq_ctx->sdp_mcast, sq_ctx->substream);
1088 	seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1089 		   sq_ctx->qint_idx, sq_ctx->ena);
1090 
1091 	seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1092 		   sq_ctx->sqb_count, sq_ctx->default_chan);
1093 	seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1094 		   sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1095 	seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1096 		   sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1097 
1098 	seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1099 		   sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1100 	seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1101 		   sq_ctx->sq_int, sq_ctx->sqb_aura);
1102 	seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1103 
1104 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1105 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1106 	seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1107 		   sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1108 	seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1109 		   sq_ctx->smenq_offset, sq_ctx->tail_offset);
1110 	seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1111 		   sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1112 	seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1113 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1114 	seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1115 		   sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1116 
1117 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1118 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1119 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1120 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1121 		   sq_ctx->smenq_next_sqb);
1122 
1123 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1124 
1125 	seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1126 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1127 	seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1128 		   sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1129 	seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1130 		   sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1131 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1132 
1133 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1134 		   (u64)sq_ctx->scm_lso_rem);
1135 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1136 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1137 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1138 		   (u64)sq_ctx->dropped_octs);
1139 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1140 		   (u64)sq_ctx->dropped_pkts);
1141 }
1142 
1143 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1144 				   struct nix_cn10k_rq_ctx_s *rq_ctx)
1145 {
1146 	seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1147 		   rq_ctx->ena, rq_ctx->sso_ena);
1148 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1149 		   rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1150 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1151 		   rq_ctx->cq, rq_ctx->lenerr_dis);
1152 	seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1153 		   rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1154 	seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1155 		   rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1156 	seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1157 		   rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1158 	seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1159 
1160 	seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1161 		   rq_ctx->spb_aura, rq_ctx->lpb_aura);
1162 	seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1163 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1164 		   rq_ctx->sso_grp, rq_ctx->sso_tt);
1165 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1166 		   rq_ctx->pb_caching, rq_ctx->wqe_caching);
1167 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1168 		   rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1169 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1170 		   rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1171 	seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1172 		   rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1173 
1174 	seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1175 	seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1176 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1177 	seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1178 		   rq_ctx->wqe_skip, rq_ctx->spb_ena);
1179 	seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1180 		   rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1181 	seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1182 		   rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1183 	seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1184 		   rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1185 
1186 	seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1187 		   rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1188 	seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1189 		   rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1190 	seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1191 		   rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1192 	seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1193 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1194 
1195 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1196 		   rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1197 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1198 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1199 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1200 		   rq_ctx->rq_int, rq_ctx->rq_int_ena);
1201 	seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1202 
1203 	seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1204 		   rq_ctx->ltag, rq_ctx->good_utag);
1205 	seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1206 		   rq_ctx->bad_utag, rq_ctx->flow_tagw);
1207 	seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1208 		   rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1209 	seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1210 		   rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1211 	seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1212 
1213 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1214 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1215 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1216 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1217 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1218 }
1219 
1220 /* Dumps given nix_rq's context */
1221 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1222 {
1223 	struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1224 	struct nix_hw *nix_hw = m->private;
1225 	struct rvu *rvu = nix_hw->rvu;
1226 
1227 	if (!is_rvu_otx2(rvu)) {
1228 		print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1229 		return;
1230 	}
1231 
1232 	seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1233 		   rq_ctx->wqe_aura, rq_ctx->substream);
1234 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1235 		   rq_ctx->cq, rq_ctx->ena_wqwd);
1236 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1237 		   rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1238 	seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1239 
1240 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1241 		   rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1242 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1243 		   rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1244 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1245 		   rq_ctx->pb_caching, rq_ctx->sso_tt);
1246 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1247 		   rq_ctx->sso_grp, rq_ctx->lpb_aura);
1248 	seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1249 
1250 	seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1251 		   rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1252 	seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1253 		   rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1254 	seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1255 		   rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1256 	seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1257 		   rq_ctx->spb_ena, rq_ctx->wqe_skip);
1258 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1259 
1260 	seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1261 		   rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1262 	seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1263 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1264 	seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1265 		   rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1266 	seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1267 		   rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1268 
1269 	seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1270 		   rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1271 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1272 		   rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1273 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1274 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1275 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1276 
1277 	seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1278 		   rq_ctx->flow_tagw, rq_ctx->bad_utag);
1279 	seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1280 		   rq_ctx->good_utag, rq_ctx->ltag);
1281 
1282 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1283 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1284 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1285 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1286 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1287 }
1288 
1289 /* Dumps given nix_cq's context */
1290 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1291 {
1292 	struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1293 
1294 	seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1295 
1296 	seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1297 	seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1298 		   cq_ctx->avg_con, cq_ctx->cint_idx);
1299 	seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1300 		   cq_ctx->cq_err, cq_ctx->qint_idx);
1301 	seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1302 		   cq_ctx->bpid, cq_ctx->bp_ena);
1303 
1304 	seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1305 		   cq_ctx->update_time, cq_ctx->avg_level);
1306 	seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1307 		   cq_ctx->head, cq_ctx->tail);
1308 
1309 	seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1310 		   cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1311 	seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1312 		   cq_ctx->qsize, cq_ctx->caching);
1313 	seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1314 		   cq_ctx->substream, cq_ctx->ena);
1315 	seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1316 		   cq_ctx->drop_ena, cq_ctx->drop);
1317 	seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1318 }
1319 
1320 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1321 					 void *unused, int ctype)
1322 {
1323 	void (*print_nix_ctx)(struct seq_file *filp,
1324 			      struct nix_aq_enq_rsp *rsp) = NULL;
1325 	struct nix_hw *nix_hw = filp->private;
1326 	struct rvu *rvu = nix_hw->rvu;
1327 	struct nix_aq_enq_req aq_req;
1328 	struct nix_aq_enq_rsp rsp;
1329 	char *ctype_string = NULL;
1330 	int qidx, rc, max_id = 0;
1331 	struct rvu_pfvf *pfvf;
1332 	int nixlf, id, all;
1333 	u16 pcifunc;
1334 
1335 	switch (ctype) {
1336 	case NIX_AQ_CTYPE_CQ:
1337 		nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1338 		id = rvu->rvu_dbg.nix_cq_ctx.id;
1339 		all = rvu->rvu_dbg.nix_cq_ctx.all;
1340 		break;
1341 
1342 	case NIX_AQ_CTYPE_SQ:
1343 		nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1344 		id = rvu->rvu_dbg.nix_sq_ctx.id;
1345 		all = rvu->rvu_dbg.nix_sq_ctx.all;
1346 		break;
1347 
1348 	case NIX_AQ_CTYPE_RQ:
1349 		nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1350 		id = rvu->rvu_dbg.nix_rq_ctx.id;
1351 		all = rvu->rvu_dbg.nix_rq_ctx.all;
1352 		break;
1353 
1354 	default:
1355 		return -EINVAL;
1356 	}
1357 
1358 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1359 		return -EINVAL;
1360 
1361 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1362 	if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1363 		seq_puts(filp, "SQ context is not initialized\n");
1364 		return -EINVAL;
1365 	} else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1366 		seq_puts(filp, "RQ context is not initialized\n");
1367 		return -EINVAL;
1368 	} else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1369 		seq_puts(filp, "CQ context is not initialized\n");
1370 		return -EINVAL;
1371 	}
1372 
1373 	if (ctype == NIX_AQ_CTYPE_SQ) {
1374 		max_id = pfvf->sq_ctx->qsize;
1375 		ctype_string = "sq";
1376 		print_nix_ctx = print_nix_sq_ctx;
1377 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1378 		max_id = pfvf->rq_ctx->qsize;
1379 		ctype_string = "rq";
1380 		print_nix_ctx = print_nix_rq_ctx;
1381 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1382 		max_id = pfvf->cq_ctx->qsize;
1383 		ctype_string = "cq";
1384 		print_nix_ctx = print_nix_cq_ctx;
1385 	}
1386 
1387 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1388 	aq_req.hdr.pcifunc = pcifunc;
1389 	aq_req.ctype = ctype;
1390 	aq_req.op = NIX_AQ_INSTOP_READ;
1391 	if (all)
1392 		id = 0;
1393 	else
1394 		max_id = id + 1;
1395 	for (qidx = id; qidx < max_id; qidx++) {
1396 		aq_req.qidx = qidx;
1397 		seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1398 			   ctype_string, nixlf, aq_req.qidx);
1399 		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1400 		if (rc) {
1401 			seq_puts(filp, "Failed to read the context\n");
1402 			return -EINVAL;
1403 		}
1404 		print_nix_ctx(filp, &rsp);
1405 	}
1406 	return 0;
1407 }
1408 
1409 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1410 			       int id, int ctype, char *ctype_string,
1411 			       struct seq_file *m)
1412 {
1413 	struct nix_hw *nix_hw = m->private;
1414 	struct rvu_pfvf *pfvf;
1415 	int max_id = 0;
1416 	u16 pcifunc;
1417 
1418 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1419 		return -EINVAL;
1420 
1421 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1422 
1423 	if (ctype == NIX_AQ_CTYPE_SQ) {
1424 		if (!pfvf->sq_ctx) {
1425 			dev_warn(rvu->dev, "SQ context is not initialized\n");
1426 			return -EINVAL;
1427 		}
1428 		max_id = pfvf->sq_ctx->qsize;
1429 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1430 		if (!pfvf->rq_ctx) {
1431 			dev_warn(rvu->dev, "RQ context is not initialized\n");
1432 			return -EINVAL;
1433 		}
1434 		max_id = pfvf->rq_ctx->qsize;
1435 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1436 		if (!pfvf->cq_ctx) {
1437 			dev_warn(rvu->dev, "CQ context is not initialized\n");
1438 			return -EINVAL;
1439 		}
1440 		max_id = pfvf->cq_ctx->qsize;
1441 	}
1442 
1443 	if (id < 0 || id >= max_id) {
1444 		dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1445 			 ctype_string, max_id - 1);
1446 		return -EINVAL;
1447 	}
1448 	switch (ctype) {
1449 	case NIX_AQ_CTYPE_CQ:
1450 		rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1451 		rvu->rvu_dbg.nix_cq_ctx.id = id;
1452 		rvu->rvu_dbg.nix_cq_ctx.all = all;
1453 		break;
1454 
1455 	case NIX_AQ_CTYPE_SQ:
1456 		rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1457 		rvu->rvu_dbg.nix_sq_ctx.id = id;
1458 		rvu->rvu_dbg.nix_sq_ctx.all = all;
1459 		break;
1460 
1461 	case NIX_AQ_CTYPE_RQ:
1462 		rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1463 		rvu->rvu_dbg.nix_rq_ctx.id = id;
1464 		rvu->rvu_dbg.nix_rq_ctx.all = all;
1465 		break;
1466 	default:
1467 		return -EINVAL;
1468 	}
1469 	return 0;
1470 }
1471 
1472 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
1473 					   const char __user *buffer,
1474 					   size_t count, loff_t *ppos,
1475 					   int ctype)
1476 {
1477 	struct seq_file *m = filp->private_data;
1478 	struct nix_hw *nix_hw = m->private;
1479 	struct rvu *rvu = nix_hw->rvu;
1480 	char *cmd_buf, *ctype_string;
1481 	int nixlf, id = 0, ret;
1482 	bool all = false;
1483 
1484 	if ((*ppos != 0) || !count)
1485 		return -EINVAL;
1486 
1487 	switch (ctype) {
1488 	case NIX_AQ_CTYPE_SQ:
1489 		ctype_string = "sq";
1490 		break;
1491 	case NIX_AQ_CTYPE_RQ:
1492 		ctype_string = "rq";
1493 		break;
1494 	case NIX_AQ_CTYPE_CQ:
1495 		ctype_string = "cq";
1496 		break;
1497 	default:
1498 		return -EINVAL;
1499 	}
1500 
1501 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1502 
1503 	if (!cmd_buf)
1504 		return count;
1505 
1506 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1507 				   &nixlf, &id, &all);
1508 	if (ret < 0) {
1509 		dev_info(rvu->dev,
1510 			 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
1511 			 ctype_string, ctype_string);
1512 		goto done;
1513 	} else {
1514 		ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
1515 					  ctype_string, m);
1516 	}
1517 done:
1518 	kfree(cmd_buf);
1519 	return ret ? ret : count;
1520 }
1521 
1522 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
1523 					const char __user *buffer,
1524 					size_t count, loff_t *ppos)
1525 {
1526 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1527 					    NIX_AQ_CTYPE_SQ);
1528 }
1529 
1530 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
1531 {
1532 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
1533 }
1534 
1535 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
1536 
1537 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
1538 					const char __user *buffer,
1539 					size_t count, loff_t *ppos)
1540 {
1541 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1542 					    NIX_AQ_CTYPE_RQ);
1543 }
1544 
1545 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
1546 {
1547 	return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
1548 }
1549 
1550 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
1551 
1552 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
1553 					const char __user *buffer,
1554 					size_t count, loff_t *ppos)
1555 {
1556 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1557 					    NIX_AQ_CTYPE_CQ);
1558 }
1559 
1560 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1561 {
1562 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1563 }
1564 
1565 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1566 
1567 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1568 				 unsigned long *bmap, char *qtype)
1569 {
1570 	char *buf;
1571 
1572 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1573 	if (!buf)
1574 		return;
1575 
1576 	bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1577 	seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1578 	seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1579 		   qtype, buf);
1580 	kfree(buf);
1581 }
1582 
1583 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1584 {
1585 	if (!pfvf->cq_ctx)
1586 		seq_puts(filp, "cq context is not initialized\n");
1587 	else
1588 		print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1589 				     "cq");
1590 
1591 	if (!pfvf->rq_ctx)
1592 		seq_puts(filp, "rq context is not initialized\n");
1593 	else
1594 		print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1595 				     "rq");
1596 
1597 	if (!pfvf->sq_ctx)
1598 		seq_puts(filp, "sq context is not initialized\n");
1599 	else
1600 		print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1601 				     "sq");
1602 }
1603 
1604 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1605 				       const char __user *buffer,
1606 				       size_t count, loff_t *ppos)
1607 {
1608 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1609 				   BLKTYPE_NIX);
1610 }
1611 
1612 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1613 {
1614 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1615 }
1616 
1617 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1618 
1619 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
1620 {
1621 	struct nix_hw *nix_hw;
1622 
1623 	if (!is_block_implemented(rvu->hw, blkaddr))
1624 		return;
1625 
1626 	if (blkaddr == BLKADDR_NIX0) {
1627 		rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1628 		nix_hw = &rvu->hw->nix[0];
1629 	} else {
1630 		rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
1631 						      rvu->rvu_dbg.root);
1632 		nix_hw = &rvu->hw->nix[1];
1633 	}
1634 
1635 	debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1636 			    &rvu_dbg_nix_sq_ctx_fops);
1637 	debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1638 			    &rvu_dbg_nix_rq_ctx_fops);
1639 	debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1640 			    &rvu_dbg_nix_cq_ctx_fops);
1641 	debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1642 			    &rvu_dbg_nix_ndc_tx_cache_fops);
1643 	debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1644 			    &rvu_dbg_nix_ndc_rx_cache_fops);
1645 	debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1646 			    &rvu_dbg_nix_ndc_tx_hits_miss_fops);
1647 	debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1648 			    &rvu_dbg_nix_ndc_rx_hits_miss_fops);
1649 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
1650 			    &rvu_dbg_nix_qsize_fops);
1651 }
1652 
1653 static void rvu_dbg_npa_init(struct rvu *rvu)
1654 {
1655 	rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
1656 
1657 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
1658 			    &rvu_dbg_npa_qsize_fops);
1659 	debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1660 			    &rvu_dbg_npa_aura_ctx_fops);
1661 	debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1662 			    &rvu_dbg_npa_pool_ctx_fops);
1663 	debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
1664 			    &rvu_dbg_npa_ndc_cache_fops);
1665 	debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
1666 			    &rvu_dbg_npa_ndc_hits_miss_fops);
1667 }
1668 
1669 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)				\
1670 	({								\
1671 		u64 cnt;						\
1672 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
1673 					     NIX_STATS_RX, &(cnt));	\
1674 		if (!err)						\
1675 			seq_printf(s, "%s: %llu\n", name, cnt);		\
1676 		cnt;							\
1677 	})
1678 
1679 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)			\
1680 	({								\
1681 		u64 cnt;						\
1682 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
1683 					  NIX_STATS_TX, &(cnt));	\
1684 		if (!err)						\
1685 			seq_printf(s, "%s: %llu\n", name, cnt);		\
1686 		cnt;							\
1687 	})
1688 
1689 static int cgx_print_stats(struct seq_file *s, int lmac_id)
1690 {
1691 	struct cgx_link_user_info linfo;
1692 	struct mac_ops *mac_ops;
1693 	void *cgxd = s->private;
1694 	u64 ucast, mcast, bcast;
1695 	int stat = 0, err = 0;
1696 	u64 tx_stat, rx_stat;
1697 	struct rvu *rvu;
1698 
1699 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
1700 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
1701 	if (!rvu)
1702 		return -ENODEV;
1703 
1704 	mac_ops = get_mac_ops(cgxd);
1705 
1706 	if (!mac_ops)
1707 		return 0;
1708 
1709 	/* Link status */
1710 	seq_puts(s, "\n=======Link Status======\n\n");
1711 	err = cgx_get_link_info(cgxd, lmac_id, &linfo);
1712 	if (err)
1713 		seq_puts(s, "Failed to read link status\n");
1714 	seq_printf(s, "\nLink is %s %d Mbps\n\n",
1715 		   linfo.link_up ? "UP" : "DOWN", linfo.speed);
1716 
1717 	/* Rx stats */
1718 	seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
1719 		   mac_ops->name);
1720 	ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
1721 	if (err)
1722 		return err;
1723 	mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
1724 	if (err)
1725 		return err;
1726 	bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
1727 	if (err)
1728 		return err;
1729 	seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
1730 	PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
1731 	if (err)
1732 		return err;
1733 	PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
1734 	if (err)
1735 		return err;
1736 	PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
1737 	if (err)
1738 		return err;
1739 
1740 	/* Tx stats */
1741 	seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
1742 		   mac_ops->name);
1743 	ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
1744 	if (err)
1745 		return err;
1746 	mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
1747 	if (err)
1748 		return err;
1749 	bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
1750 	if (err)
1751 		return err;
1752 	seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
1753 	PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
1754 	if (err)
1755 		return err;
1756 	PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
1757 	if (err)
1758 		return err;
1759 
1760 	/* Rx stats */
1761 	seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
1762 	while (stat < mac_ops->rx_stats_cnt) {
1763 		err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
1764 		if (err)
1765 			return err;
1766 		if (is_rvu_otx2(rvu))
1767 			seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
1768 				   rx_stat);
1769 		else
1770 			seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
1771 				   rx_stat);
1772 		stat++;
1773 	}
1774 
1775 	/* Tx stats */
1776 	stat = 0;
1777 	seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
1778 	while (stat < mac_ops->tx_stats_cnt) {
1779 		err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
1780 		if (err)
1781 			return err;
1782 
1783 	if (is_rvu_otx2(rvu))
1784 		seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
1785 			   tx_stat);
1786 	else
1787 		seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
1788 			   tx_stat);
1789 	stat++;
1790 	}
1791 
1792 	return err;
1793 }
1794 
1795 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
1796 {
1797 	struct dentry *current_dir;
1798 	int err, lmac_id;
1799 	char *buf;
1800 
1801 	current_dir = filp->file->f_path.dentry->d_parent;
1802 	buf = strrchr(current_dir->d_name.name, 'c');
1803 	if (!buf)
1804 		return -EINVAL;
1805 
1806 	err = kstrtoint(buf + 1, 10, &lmac_id);
1807 	if (!err) {
1808 		err = cgx_print_stats(filp, lmac_id);
1809 		if (err)
1810 			return err;
1811 	}
1812 	return err;
1813 }
1814 
1815 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
1816 
1817 static void rvu_dbg_cgx_init(struct rvu *rvu)
1818 {
1819 	struct mac_ops *mac_ops;
1820 	unsigned long lmac_bmap;
1821 	int rvu_def_cgx_id = 0;
1822 	int i, lmac_id;
1823 	char dname[20];
1824 	void *cgx;
1825 
1826 	if (!cgx_get_cgxcnt_max())
1827 		return;
1828 
1829 	mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu));
1830 	if (!mac_ops)
1831 		return;
1832 
1833 	rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
1834 						   rvu->rvu_dbg.root);
1835 
1836 	for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
1837 		cgx = rvu_cgx_pdata(i, rvu);
1838 		if (!cgx)
1839 			continue;
1840 		lmac_bmap = cgx_get_lmac_bmap(cgx);
1841 		/* cgx debugfs dir */
1842 		sprintf(dname, "%s%d", mac_ops->name, i);
1843 		rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
1844 						      rvu->rvu_dbg.cgx_root);
1845 
1846 		for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
1847 			/* lmac debugfs dir */
1848 			sprintf(dname, "lmac%d", lmac_id);
1849 			rvu->rvu_dbg.lmac =
1850 				debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
1851 
1852 			debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
1853 					    cgx, &rvu_dbg_cgx_stat_fops);
1854 		}
1855 	}
1856 }
1857 
1858 /* NPC debugfs APIs */
1859 static void rvu_print_npc_mcam_info(struct seq_file *s,
1860 				    u16 pcifunc, int blkaddr)
1861 {
1862 	struct rvu *rvu = s->private;
1863 	int entry_acnt, entry_ecnt;
1864 	int cntr_acnt, cntr_ecnt;
1865 
1866 	/* Skip PF0 */
1867 	if (!pcifunc)
1868 		return;
1869 	rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
1870 					  &entry_acnt, &entry_ecnt);
1871 	rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
1872 					    &cntr_acnt, &cntr_ecnt);
1873 	if (!entry_acnt && !cntr_acnt)
1874 		return;
1875 
1876 	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
1877 		seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
1878 			   rvu_get_pf(pcifunc));
1879 	else
1880 		seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
1881 			   rvu_get_pf(pcifunc),
1882 			   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1883 
1884 	if (entry_acnt) {
1885 		seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
1886 		seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
1887 	}
1888 	if (cntr_acnt) {
1889 		seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
1890 		seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
1891 	}
1892 }
1893 
1894 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
1895 {
1896 	struct rvu *rvu = filp->private;
1897 	int pf, vf, numvfs, blkaddr;
1898 	struct npc_mcam *mcam;
1899 	u16 pcifunc, counters;
1900 	u64 cfg;
1901 
1902 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1903 	if (blkaddr < 0)
1904 		return -ENODEV;
1905 
1906 	mcam = &rvu->hw->mcam;
1907 	counters = rvu->hw->npc_counters;
1908 
1909 	seq_puts(filp, "\nNPC MCAM info:\n");
1910 	/* MCAM keywidth on receive and transmit sides */
1911 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
1912 	cfg = (cfg >> 32) & 0x07;
1913 	seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
1914 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
1915 		   "224bits" : "448bits"));
1916 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
1917 	cfg = (cfg >> 32) & 0x07;
1918 	seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
1919 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
1920 		   "224bits" : "448bits"));
1921 
1922 	mutex_lock(&mcam->lock);
1923 	/* MCAM entries */
1924 	seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
1925 	seq_printf(filp, "\t\t Reserved \t: %d\n",
1926 		   mcam->total_entries - mcam->bmap_entries);
1927 	seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
1928 
1929 	/* MCAM counters */
1930 	seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
1931 	seq_printf(filp, "\t\t Reserved \t: %d\n",
1932 		   counters - mcam->counters.max);
1933 	seq_printf(filp, "\t\t Available \t: %d\n",
1934 		   rvu_rsrc_free_count(&mcam->counters));
1935 
1936 	if (mcam->bmap_entries == mcam->bmap_fcnt) {
1937 		mutex_unlock(&mcam->lock);
1938 		return 0;
1939 	}
1940 
1941 	seq_puts(filp, "\n\t\t Current allocation\n");
1942 	seq_puts(filp, "\t\t====================\n");
1943 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
1944 		pcifunc = (pf << RVU_PFVF_PF_SHIFT);
1945 		rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
1946 
1947 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
1948 		numvfs = (cfg >> 12) & 0xFF;
1949 		for (vf = 0; vf < numvfs; vf++) {
1950 			pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
1951 			rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
1952 		}
1953 	}
1954 
1955 	mutex_unlock(&mcam->lock);
1956 	return 0;
1957 }
1958 
1959 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
1960 
1961 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
1962 					     void *unused)
1963 {
1964 	struct rvu *rvu = filp->private;
1965 	struct npc_mcam *mcam;
1966 	int blkaddr;
1967 
1968 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1969 	if (blkaddr < 0)
1970 		return -ENODEV;
1971 
1972 	mcam = &rvu->hw->mcam;
1973 
1974 	seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
1975 	seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
1976 		   rvu_read64(rvu, blkaddr,
1977 			      NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
1978 
1979 	return 0;
1980 }
1981 
1982 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
1983 
1984 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
1985 					struct rvu_npc_mcam_rule *rule)
1986 {
1987 	u8 bit;
1988 
1989 	for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
1990 		seq_printf(s, "\t%s  ", npc_get_field_name(bit));
1991 		switch (bit) {
1992 		case NPC_DMAC:
1993 			seq_printf(s, "%pM ", rule->packet.dmac);
1994 			seq_printf(s, "mask %pM\n", rule->mask.dmac);
1995 			break;
1996 		case NPC_SMAC:
1997 			seq_printf(s, "%pM ", rule->packet.smac);
1998 			seq_printf(s, "mask %pM\n", rule->mask.smac);
1999 			break;
2000 		case NPC_ETYPE:
2001 			seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2002 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2003 			break;
2004 		case NPC_OUTER_VID:
2005 			seq_printf(s, "%d ", ntohs(rule->packet.vlan_tci));
2006 			seq_printf(s, "mask 0x%x\n",
2007 				   ntohs(rule->mask.vlan_tci));
2008 			break;
2009 		case NPC_TOS:
2010 			seq_printf(s, "%d ", rule->packet.tos);
2011 			seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2012 			break;
2013 		case NPC_SIP_IPV4:
2014 			seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2015 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2016 			break;
2017 		case NPC_DIP_IPV4:
2018 			seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2019 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2020 			break;
2021 		case NPC_SIP_IPV6:
2022 			seq_printf(s, "%pI6 ", rule->packet.ip6src);
2023 			seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2024 			break;
2025 		case NPC_DIP_IPV6:
2026 			seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2027 			seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2028 			break;
2029 		case NPC_SPORT_TCP:
2030 		case NPC_SPORT_UDP:
2031 		case NPC_SPORT_SCTP:
2032 			seq_printf(s, "%d ", ntohs(rule->packet.sport));
2033 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2034 			break;
2035 		case NPC_DPORT_TCP:
2036 		case NPC_DPORT_UDP:
2037 		case NPC_DPORT_SCTP:
2038 			seq_printf(s, "%d ", ntohs(rule->packet.dport));
2039 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2040 			break;
2041 		default:
2042 			seq_puts(s, "\n");
2043 			break;
2044 		}
2045 	}
2046 }
2047 
2048 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2049 					 struct rvu_npc_mcam_rule *rule)
2050 {
2051 	if (rule->intf == NIX_INTF_TX) {
2052 		switch (rule->tx_action.op) {
2053 		case NIX_TX_ACTIONOP_DROP:
2054 			seq_puts(s, "\taction: Drop\n");
2055 			break;
2056 		case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2057 			seq_puts(s, "\taction: Unicast to default channel\n");
2058 			break;
2059 		case NIX_TX_ACTIONOP_UCAST_CHAN:
2060 			seq_printf(s, "\taction: Unicast to channel %d\n",
2061 				   rule->tx_action.index);
2062 			break;
2063 		case NIX_TX_ACTIONOP_MCAST:
2064 			seq_puts(s, "\taction: Multicast\n");
2065 			break;
2066 		case NIX_TX_ACTIONOP_DROP_VIOL:
2067 			seq_puts(s, "\taction: Lockdown Violation Drop\n");
2068 			break;
2069 		default:
2070 			break;
2071 		}
2072 	} else {
2073 		switch (rule->rx_action.op) {
2074 		case NIX_RX_ACTIONOP_DROP:
2075 			seq_puts(s, "\taction: Drop\n");
2076 			break;
2077 		case NIX_RX_ACTIONOP_UCAST:
2078 			seq_printf(s, "\taction: Direct to queue %d\n",
2079 				   rule->rx_action.index);
2080 			break;
2081 		case NIX_RX_ACTIONOP_RSS:
2082 			seq_puts(s, "\taction: RSS\n");
2083 			break;
2084 		case NIX_RX_ACTIONOP_UCAST_IPSEC:
2085 			seq_puts(s, "\taction: Unicast ipsec\n");
2086 			break;
2087 		case NIX_RX_ACTIONOP_MCAST:
2088 			seq_puts(s, "\taction: Multicast\n");
2089 			break;
2090 		default:
2091 			break;
2092 		}
2093 	}
2094 }
2095 
2096 static const char *rvu_dbg_get_intf_name(int intf)
2097 {
2098 	switch (intf) {
2099 	case NIX_INTFX_RX(0):
2100 		return "NIX0_RX";
2101 	case NIX_INTFX_RX(1):
2102 		return "NIX1_RX";
2103 	case NIX_INTFX_TX(0):
2104 		return "NIX0_TX";
2105 	case NIX_INTFX_TX(1):
2106 		return "NIX1_TX";
2107 	default:
2108 		break;
2109 	}
2110 
2111 	return "unknown";
2112 }
2113 
2114 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2115 {
2116 	struct rvu_npc_mcam_rule *iter;
2117 	struct rvu *rvu = s->private;
2118 	struct npc_mcam *mcam;
2119 	int pf, vf = -1;
2120 	int blkaddr;
2121 	u16 target;
2122 	u64 hits;
2123 
2124 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2125 	if (blkaddr < 0)
2126 		return 0;
2127 
2128 	mcam = &rvu->hw->mcam;
2129 
2130 	mutex_lock(&mcam->lock);
2131 	list_for_each_entry(iter, &mcam->mcam_rules, list) {
2132 		pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2133 		seq_printf(s, "\n\tInstalled by: PF%d ", pf);
2134 
2135 		if (iter->owner & RVU_PFVF_FUNC_MASK) {
2136 			vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
2137 			seq_printf(s, "VF%d", vf);
2138 		}
2139 		seq_puts(s, "\n");
2140 
2141 		seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
2142 						    "RX" : "TX");
2143 		seq_printf(s, "\tinterface: %s\n",
2144 			   rvu_dbg_get_intf_name(iter->intf));
2145 		seq_printf(s, "\tmcam entry: %d\n", iter->entry);
2146 
2147 		rvu_dbg_npc_mcam_show_flows(s, iter);
2148 		if (iter->intf == NIX_INTF_RX) {
2149 			target = iter->rx_action.pf_func;
2150 			pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2151 			seq_printf(s, "\tForward to: PF%d ", pf);
2152 
2153 			if (target & RVU_PFVF_FUNC_MASK) {
2154 				vf = (target & RVU_PFVF_FUNC_MASK) - 1;
2155 				seq_printf(s, "VF%d", vf);
2156 			}
2157 			seq_puts(s, "\n");
2158 		}
2159 
2160 		rvu_dbg_npc_mcam_show_action(s, iter);
2161 		seq_printf(s, "\tenabled: %s\n", iter->enable ? "yes" : "no");
2162 
2163 		if (!iter->has_cntr)
2164 			continue;
2165 		seq_printf(s, "\tcounter: %d\n", iter->cntr);
2166 
2167 		hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
2168 		seq_printf(s, "\thits: %lld\n", hits);
2169 	}
2170 	mutex_unlock(&mcam->lock);
2171 
2172 	return 0;
2173 }
2174 
2175 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
2176 
2177 static void rvu_dbg_npc_init(struct rvu *rvu)
2178 {
2179 	rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
2180 
2181 	debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
2182 			    &rvu_dbg_npc_mcam_info_fops);
2183 	debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
2184 			    &rvu_dbg_npc_mcam_rules_fops);
2185 	debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
2186 			    &rvu_dbg_npc_rx_miss_act_fops);
2187 }
2188 
2189 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
2190 {
2191 	struct cpt_ctx *ctx = filp->private;
2192 	u64 busy_sts = 0, free_sts = 0;
2193 	u32 e_min = 0, e_max = 0, e, i;
2194 	u16 max_ses, max_ies, max_aes;
2195 	struct rvu *rvu = ctx->rvu;
2196 	int blkaddr = ctx->blkaddr;
2197 	u64 reg;
2198 
2199 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2200 	max_ses = reg & 0xffff;
2201 	max_ies = (reg >> 16) & 0xffff;
2202 	max_aes = (reg >> 32) & 0xffff;
2203 
2204 	switch (eng_type) {
2205 	case CPT_AE_TYPE:
2206 		e_min = max_ses + max_ies;
2207 		e_max = max_ses + max_ies + max_aes;
2208 		break;
2209 	case CPT_SE_TYPE:
2210 		e_min = 0;
2211 		e_max = max_ses;
2212 		break;
2213 	case CPT_IE_TYPE:
2214 		e_min = max_ses;
2215 		e_max = max_ses + max_ies;
2216 		break;
2217 	default:
2218 		return -EINVAL;
2219 	}
2220 
2221 	for (e = e_min, i = 0; e < e_max; e++, i++) {
2222 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
2223 		if (reg & 0x1)
2224 			busy_sts |= 1ULL << i;
2225 
2226 		if (reg & 0x2)
2227 			free_sts |= 1ULL << i;
2228 	}
2229 	seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
2230 	seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
2231 
2232 	return 0;
2233 }
2234 
2235 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
2236 {
2237 	return cpt_eng_sts_display(filp, CPT_AE_TYPE);
2238 }
2239 
2240 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
2241 
2242 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
2243 {
2244 	return cpt_eng_sts_display(filp, CPT_SE_TYPE);
2245 }
2246 
2247 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
2248 
2249 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
2250 {
2251 	return cpt_eng_sts_display(filp, CPT_IE_TYPE);
2252 }
2253 
2254 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
2255 
2256 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
2257 {
2258 	struct cpt_ctx *ctx = filp->private;
2259 	u16 max_ses, max_ies, max_aes;
2260 	struct rvu *rvu = ctx->rvu;
2261 	int blkaddr = ctx->blkaddr;
2262 	u32 e_max, e;
2263 	u64 reg;
2264 
2265 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2266 	max_ses = reg & 0xffff;
2267 	max_ies = (reg >> 16) & 0xffff;
2268 	max_aes = (reg >> 32) & 0xffff;
2269 
2270 	e_max = max_ses + max_ies + max_aes;
2271 
2272 	seq_puts(filp, "===========================================\n");
2273 	for (e = 0; e < e_max; e++) {
2274 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
2275 		seq_printf(filp, "CPT Engine[%u] Group Enable   0x%02llx\n", e,
2276 			   reg & 0xff);
2277 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
2278 		seq_printf(filp, "CPT Engine[%u] Active Info    0x%llx\n", e,
2279 			   reg);
2280 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
2281 		seq_printf(filp, "CPT Engine[%u] Control        0x%llx\n", e,
2282 			   reg);
2283 		seq_puts(filp, "===========================================\n");
2284 	}
2285 	return 0;
2286 }
2287 
2288 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
2289 
2290 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
2291 {
2292 	struct cpt_ctx *ctx = filp->private;
2293 	int blkaddr = ctx->blkaddr;
2294 	struct rvu *rvu = ctx->rvu;
2295 	struct rvu_block *block;
2296 	struct rvu_hwinfo *hw;
2297 	u64 reg;
2298 	u32 lf;
2299 
2300 	hw = rvu->hw;
2301 	block = &hw->block[blkaddr];
2302 	if (!block->lf.bmap)
2303 		return -ENODEV;
2304 
2305 	seq_puts(filp, "===========================================\n");
2306 	for (lf = 0; lf < block->lf.max; lf++) {
2307 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
2308 		seq_printf(filp, "CPT Lf[%u] CTL          0x%llx\n", lf, reg);
2309 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
2310 		seq_printf(filp, "CPT Lf[%u] CTL2         0x%llx\n", lf, reg);
2311 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
2312 		seq_printf(filp, "CPT Lf[%u] PTR_CTL      0x%llx\n", lf, reg);
2313 		reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
2314 				(lf << block->lfshift));
2315 		seq_printf(filp, "CPT Lf[%u] CFG          0x%llx\n", lf, reg);
2316 		seq_puts(filp, "===========================================\n");
2317 	}
2318 	return 0;
2319 }
2320 
2321 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
2322 
2323 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
2324 {
2325 	struct cpt_ctx *ctx = filp->private;
2326 	struct rvu *rvu = ctx->rvu;
2327 	int blkaddr = ctx->blkaddr;
2328 	u64 reg0, reg1;
2329 
2330 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
2331 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
2332 	seq_printf(filp, "CPT_AF_FLTX_INT:       0x%llx 0x%llx\n", reg0, reg1);
2333 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
2334 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
2335 	seq_printf(filp, "CPT_AF_PSNX_EXE:       0x%llx 0x%llx\n", reg0, reg1);
2336 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
2337 	seq_printf(filp, "CPT_AF_PSNX_LF:        0x%llx\n", reg0);
2338 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
2339 	seq_printf(filp, "CPT_AF_RVU_INT:        0x%llx\n", reg0);
2340 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
2341 	seq_printf(filp, "CPT_AF_RAS_INT:        0x%llx\n", reg0);
2342 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
2343 	seq_printf(filp, "CPT_AF_EXE_ERR_INFO:   0x%llx\n", reg0);
2344 
2345 	return 0;
2346 }
2347 
2348 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
2349 
2350 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
2351 {
2352 	struct cpt_ctx *ctx = filp->private;
2353 	struct rvu *rvu = ctx->rvu;
2354 	int blkaddr = ctx->blkaddr;
2355 	u64 reg;
2356 
2357 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
2358 	seq_printf(filp, "CPT instruction requests   %llu\n", reg);
2359 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
2360 	seq_printf(filp, "CPT instruction latency    %llu\n", reg);
2361 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
2362 	seq_printf(filp, "CPT NCB read requests      %llu\n", reg);
2363 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
2364 	seq_printf(filp, "CPT NCB read latency       %llu\n", reg);
2365 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
2366 	seq_printf(filp, "CPT read requests caused by UC fills   %llu\n", reg);
2367 	reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
2368 	seq_printf(filp, "CPT active cycles pc       %llu\n", reg);
2369 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
2370 	seq_printf(filp, "CPT clock count pc         %llu\n", reg);
2371 
2372 	return 0;
2373 }
2374 
2375 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
2376 
2377 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
2378 {
2379 	struct cpt_ctx *ctx;
2380 
2381 	if (!is_block_implemented(rvu->hw, blkaddr))
2382 		return;
2383 
2384 	if (blkaddr == BLKADDR_CPT0) {
2385 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
2386 		ctx = &rvu->rvu_dbg.cpt_ctx[0];
2387 		ctx->blkaddr = BLKADDR_CPT0;
2388 		ctx->rvu = rvu;
2389 	} else {
2390 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
2391 						      rvu->rvu_dbg.root);
2392 		ctx = &rvu->rvu_dbg.cpt_ctx[1];
2393 		ctx->blkaddr = BLKADDR_CPT1;
2394 		ctx->rvu = rvu;
2395 	}
2396 
2397 	debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
2398 			    &rvu_dbg_cpt_pc_fops);
2399 	debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2400 			    &rvu_dbg_cpt_ae_sts_fops);
2401 	debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2402 			    &rvu_dbg_cpt_se_sts_fops);
2403 	debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2404 			    &rvu_dbg_cpt_ie_sts_fops);
2405 	debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
2406 			    &rvu_dbg_cpt_engines_info_fops);
2407 	debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
2408 			    &rvu_dbg_cpt_lfs_info_fops);
2409 	debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
2410 			    &rvu_dbg_cpt_err_info_fops);
2411 }
2412 
2413 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
2414 {
2415 	if (!is_rvu_otx2(rvu))
2416 		return "cn10k";
2417 	else
2418 		return "octeontx2";
2419 }
2420 
2421 void rvu_dbg_init(struct rvu *rvu)
2422 {
2423 	rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
2424 
2425 	debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
2426 			    &rvu_dbg_rsrc_status_fops);
2427 
2428 	if (!cgx_get_cgxcnt_max())
2429 		goto create;
2430 
2431 	if (is_rvu_otx2(rvu))
2432 		debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
2433 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2434 	else
2435 		debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
2436 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2437 
2438 create:
2439 	rvu_dbg_npa_init(rvu);
2440 	rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
2441 
2442 	rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
2443 	rvu_dbg_cgx_init(rvu);
2444 	rvu_dbg_npc_init(rvu);
2445 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
2446 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
2447 }
2448 
2449 void rvu_dbg_exit(struct rvu *rvu)
2450 {
2451 	debugfs_remove_recursive(rvu->rvu_dbg.root);
2452 }
2453 
2454 #endif /* CONFIG_DEBUG_FS */
2455