1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2019 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #ifdef CONFIG_DEBUG_FS
12 
13 #include <linux/fs.h>
14 #include <linux/debugfs.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 
18 #include "rvu_struct.h"
19 #include "rvu_reg.h"
20 #include "rvu.h"
21 #include "cgx.h"
22 #include "lmac_common.h"
23 #include "npc.h"
24 
25 #define DEBUGFS_DIR_NAME "octeontx2"
26 
27 enum {
28 	CGX_STAT0,
29 	CGX_STAT1,
30 	CGX_STAT2,
31 	CGX_STAT3,
32 	CGX_STAT4,
33 	CGX_STAT5,
34 	CGX_STAT6,
35 	CGX_STAT7,
36 	CGX_STAT8,
37 	CGX_STAT9,
38 	CGX_STAT10,
39 	CGX_STAT11,
40 	CGX_STAT12,
41 	CGX_STAT13,
42 	CGX_STAT14,
43 	CGX_STAT15,
44 	CGX_STAT16,
45 	CGX_STAT17,
46 	CGX_STAT18,
47 };
48 
49 /* NIX TX stats */
50 enum nix_stat_lf_tx {
51 	TX_UCAST	= 0x0,
52 	TX_BCAST	= 0x1,
53 	TX_MCAST	= 0x2,
54 	TX_DROP		= 0x3,
55 	TX_OCTS		= 0x4,
56 	TX_STATS_ENUM_LAST,
57 };
58 
59 /* NIX RX stats */
60 enum nix_stat_lf_rx {
61 	RX_OCTS		= 0x0,
62 	RX_UCAST	= 0x1,
63 	RX_BCAST	= 0x2,
64 	RX_MCAST	= 0x3,
65 	RX_DROP		= 0x4,
66 	RX_DROP_OCTS	= 0x5,
67 	RX_FCS		= 0x6,
68 	RX_ERR		= 0x7,
69 	RX_DRP_BCAST	= 0x8,
70 	RX_DRP_MCAST	= 0x9,
71 	RX_DRP_L3BCAST	= 0xa,
72 	RX_DRP_L3MCAST	= 0xb,
73 	RX_STATS_ENUM_LAST,
74 };
75 
76 static char *cgx_rx_stats_fields[] = {
77 	[CGX_STAT0]	= "Received packets",
78 	[CGX_STAT1]	= "Octets of received packets",
79 	[CGX_STAT2]	= "Received PAUSE packets",
80 	[CGX_STAT3]	= "Received PAUSE and control packets",
81 	[CGX_STAT4]	= "Filtered DMAC0 (NIX-bound) packets",
82 	[CGX_STAT5]	= "Filtered DMAC0 (NIX-bound) octets",
83 	[CGX_STAT6]	= "Packets dropped due to RX FIFO full",
84 	[CGX_STAT7]	= "Octets dropped due to RX FIFO full",
85 	[CGX_STAT8]	= "Error packets",
86 	[CGX_STAT9]	= "Filtered DMAC1 (NCSI-bound) packets",
87 	[CGX_STAT10]	= "Filtered DMAC1 (NCSI-bound) octets",
88 	[CGX_STAT11]	= "NCSI-bound packets dropped",
89 	[CGX_STAT12]	= "NCSI-bound octets dropped",
90 };
91 
92 static char *cgx_tx_stats_fields[] = {
93 	[CGX_STAT0]	= "Packets dropped due to excessive collisions",
94 	[CGX_STAT1]	= "Packets dropped due to excessive deferral",
95 	[CGX_STAT2]	= "Multiple collisions before successful transmission",
96 	[CGX_STAT3]	= "Single collisions before successful transmission",
97 	[CGX_STAT4]	= "Total octets sent on the interface",
98 	[CGX_STAT5]	= "Total frames sent on the interface",
99 	[CGX_STAT6]	= "Packets sent with an octet count < 64",
100 	[CGX_STAT7]	= "Packets sent with an octet count == 64",
101 	[CGX_STAT8]	= "Packets sent with an octet count of 65–127",
102 	[CGX_STAT9]	= "Packets sent with an octet count of 128-255",
103 	[CGX_STAT10]	= "Packets sent with an octet count of 256-511",
104 	[CGX_STAT11]	= "Packets sent with an octet count of 512-1023",
105 	[CGX_STAT12]	= "Packets sent with an octet count of 1024-1518",
106 	[CGX_STAT13]	= "Packets sent with an octet count of > 1518",
107 	[CGX_STAT14]	= "Packets sent to a broadcast DMAC",
108 	[CGX_STAT15]	= "Packets sent to the multicast DMAC",
109 	[CGX_STAT16]	= "Transmit underflow and were truncated",
110 	[CGX_STAT17]	= "Control/PAUSE packets sent",
111 };
112 
113 static char *rpm_rx_stats_fields[] = {
114 	"Octets of received packets",
115 	"Octets of received packets with out error",
116 	"Received packets with alignment errors",
117 	"Control/PAUSE packets received",
118 	"Packets received with Frame too long Errors",
119 	"Packets received with a1nrange length Errors",
120 	"Received packets",
121 	"Packets received with FrameCheckSequenceErrors",
122 	"Packets received with VLAN header",
123 	"Error packets",
124 	"Packets received with unicast DMAC",
125 	"Packets received with multicast DMAC",
126 	"Packets received with broadcast DMAC",
127 	"Dropped packets",
128 	"Total frames received on interface",
129 	"Packets received with an octet count < 64",
130 	"Packets received with an octet count == 64",
131 	"Packets received with an octet count of 65–127",
132 	"Packets received with an octet count of 128-255",
133 	"Packets received with an octet count of 256-511",
134 	"Packets received with an octet count of 512-1023",
135 	"Packets received with an octet count of 1024-1518",
136 	"Packets received with an octet count of > 1518",
137 	"Oversized Packets",
138 	"Jabber Packets",
139 	"Fragmented Packets",
140 	"CBFC(class based flow control) pause frames received for class 0",
141 	"CBFC pause frames received for class 1",
142 	"CBFC pause frames received for class 2",
143 	"CBFC pause frames received for class 3",
144 	"CBFC pause frames received for class 4",
145 	"CBFC pause frames received for class 5",
146 	"CBFC pause frames received for class 6",
147 	"CBFC pause frames received for class 7",
148 	"CBFC pause frames received for class 8",
149 	"CBFC pause frames received for class 9",
150 	"CBFC pause frames received for class 10",
151 	"CBFC pause frames received for class 11",
152 	"CBFC pause frames received for class 12",
153 	"CBFC pause frames received for class 13",
154 	"CBFC pause frames received for class 14",
155 	"CBFC pause frames received for class 15",
156 	"MAC control packets received",
157 };
158 
159 static char *rpm_tx_stats_fields[] = {
160 	"Total octets sent on the interface",
161 	"Total octets transmitted OK",
162 	"Control/Pause frames sent",
163 	"Total frames transmitted OK",
164 	"Total frames sent with VLAN header",
165 	"Error Packets",
166 	"Packets sent to unicast DMAC",
167 	"Packets sent to the multicast DMAC",
168 	"Packets sent to a broadcast DMAC",
169 	"Packets sent with an octet count == 64",
170 	"Packets sent with an octet count of 65–127",
171 	"Packets sent with an octet count of 128-255",
172 	"Packets sent with an octet count of 256-511",
173 	"Packets sent with an octet count of 512-1023",
174 	"Packets sent with an octet count of 1024-1518",
175 	"Packets sent with an octet count of > 1518",
176 	"CBFC(class based flow control) pause frames transmitted for class 0",
177 	"CBFC pause frames transmitted for class 1",
178 	"CBFC pause frames transmitted for class 2",
179 	"CBFC pause frames transmitted for class 3",
180 	"CBFC pause frames transmitted for class 4",
181 	"CBFC pause frames transmitted for class 5",
182 	"CBFC pause frames transmitted for class 6",
183 	"CBFC pause frames transmitted for class 7",
184 	"CBFC pause frames transmitted for class 8",
185 	"CBFC pause frames transmitted for class 9",
186 	"CBFC pause frames transmitted for class 10",
187 	"CBFC pause frames transmitted for class 11",
188 	"CBFC pause frames transmitted for class 12",
189 	"CBFC pause frames transmitted for class 13",
190 	"CBFC pause frames transmitted for class 14",
191 	"CBFC pause frames transmitted for class 15",
192 	"MAC control packets sent",
193 	"Total frames sent on the interface"
194 };
195 
196 enum cpt_eng_type {
197 	CPT_AE_TYPE = 1,
198 	CPT_SE_TYPE = 2,
199 	CPT_IE_TYPE = 3,
200 };
201 
202 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
203 						blk_addr, NDC_AF_CONST) & 0xFF)
204 
205 #define rvu_dbg_NULL NULL
206 #define rvu_dbg_open_NULL NULL
207 
208 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)	\
209 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
210 { \
211 	return single_open(file, rvu_dbg_##read_op, inode->i_private); \
212 } \
213 static const struct file_operations rvu_dbg_##name##_fops = { \
214 	.owner		= THIS_MODULE, \
215 	.open		= rvu_dbg_open_##name, \
216 	.read		= seq_read, \
217 	.write		= rvu_dbg_##write_op, \
218 	.llseek		= seq_lseek, \
219 	.release	= single_release, \
220 }
221 
222 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
223 static const struct file_operations rvu_dbg_##name##_fops = { \
224 	.owner = THIS_MODULE, \
225 	.open = simple_open, \
226 	.read = rvu_dbg_##read_op, \
227 	.write = rvu_dbg_##write_op \
228 }
229 
230 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
231 
232 /* Dumps current provisioning status of all RVU block LFs */
233 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
234 					  char __user *buffer,
235 					  size_t count, loff_t *ppos)
236 {
237 	int index, off = 0, flag = 0, go_back = 0, len = 0;
238 	struct rvu *rvu = filp->private_data;
239 	int lf, pf, vf, pcifunc;
240 	struct rvu_block block;
241 	int bytes_not_copied;
242 	int lf_str_size = 12;
243 	int buf_size = 2048;
244 	char *lfs;
245 	char *buf;
246 
247 	/* don't allow partial reads */
248 	if (*ppos != 0)
249 		return 0;
250 
251 	buf = kzalloc(buf_size, GFP_KERNEL);
252 	if (!buf)
253 		return -ENOSPC;
254 
255 	lfs = kzalloc(lf_str_size, GFP_KERNEL);
256 	if (!lfs) {
257 		kfree(buf);
258 		return -ENOMEM;
259 	}
260 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
261 			  "pcifunc");
262 	for (index = 0; index < BLK_COUNT; index++)
263 		if (strlen(rvu->hw->block[index].name)) {
264 			off += scnprintf(&buf[off], buf_size - 1 - off,
265 					 "%-*s", lf_str_size,
266 					 rvu->hw->block[index].name);
267 		}
268 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
269 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
270 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
271 			pcifunc = pf << 10 | vf;
272 			if (!pcifunc)
273 				continue;
274 
275 			if (vf) {
276 				sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
277 				go_back = scnprintf(&buf[off],
278 						    buf_size - 1 - off,
279 						    "%-*s", lf_str_size, lfs);
280 			} else {
281 				sprintf(lfs, "PF%d", pf);
282 				go_back = scnprintf(&buf[off],
283 						    buf_size - 1 - off,
284 						    "%-*s", lf_str_size, lfs);
285 			}
286 
287 			off += go_back;
288 			for (index = 0; index < BLKTYPE_MAX; index++) {
289 				block = rvu->hw->block[index];
290 				if (!strlen(block.name))
291 					continue;
292 				len = 0;
293 				lfs[len] = '\0';
294 				for (lf = 0; lf < block.lf.max; lf++) {
295 					if (block.fn_map[lf] != pcifunc)
296 						continue;
297 					flag = 1;
298 					len += sprintf(&lfs[len], "%d,", lf);
299 				}
300 
301 				if (flag)
302 					len--;
303 				lfs[len] = '\0';
304 				off += scnprintf(&buf[off], buf_size - 1 - off,
305 						 "%-*s", lf_str_size, lfs);
306 				if (!strlen(lfs))
307 					go_back += lf_str_size;
308 			}
309 			if (!flag)
310 				off -= go_back;
311 			else
312 				flag = 0;
313 			off--;
314 			off +=	scnprintf(&buf[off], buf_size - 1 - off, "\n");
315 		}
316 	}
317 
318 	bytes_not_copied = copy_to_user(buffer, buf, off);
319 	kfree(lfs);
320 	kfree(buf);
321 
322 	if (bytes_not_copied)
323 		return -EFAULT;
324 
325 	*ppos = off;
326 	return off;
327 }
328 
329 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
330 
331 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
332 {
333 	struct rvu *rvu = filp->private;
334 	struct pci_dev *pdev = NULL;
335 	struct mac_ops *mac_ops;
336 	char cgx[10], lmac[10];
337 	struct rvu_pfvf *pfvf;
338 	int pf, domain, blkid;
339 	u8 cgx_id, lmac_id;
340 	u16 pcifunc;
341 
342 	domain = 2;
343 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
344 	/* There can be no CGX devices at all */
345 	if (!mac_ops)
346 		return 0;
347 	seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
348 		   mac_ops->name);
349 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
350 		if (!is_pf_cgxmapped(rvu, pf))
351 			continue;
352 
353 		pdev =  pci_get_domain_bus_and_slot(domain, pf + 1, 0);
354 		if (!pdev)
355 			continue;
356 
357 		cgx[0] = 0;
358 		lmac[0] = 0;
359 		pcifunc = pf << 10;
360 		pfvf = rvu_get_pfvf(rvu, pcifunc);
361 
362 		if (pfvf->nix_blkaddr == BLKADDR_NIX0)
363 			blkid = 0;
364 		else
365 			blkid = 1;
366 
367 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
368 				    &lmac_id);
369 		sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
370 		sprintf(lmac, "LMAC%d", lmac_id);
371 		seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
372 			   dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
373 	}
374 	return 0;
375 }
376 
377 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
378 
379 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
380 				u16 *pcifunc)
381 {
382 	struct rvu_block *block;
383 	struct rvu_hwinfo *hw;
384 
385 	hw = rvu->hw;
386 	block = &hw->block[blkaddr];
387 
388 	if (lf < 0 || lf >= block->lf.max) {
389 		dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
390 			 block->lf.max - 1);
391 		return false;
392 	}
393 
394 	*pcifunc = block->fn_map[lf];
395 	if (!*pcifunc) {
396 		dev_warn(rvu->dev,
397 			 "This LF is not attached to any RVU PFFUNC\n");
398 		return false;
399 	}
400 	return true;
401 }
402 
403 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
404 {
405 	char *buf;
406 
407 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
408 	if (!buf)
409 		return;
410 
411 	if (!pfvf->aura_ctx) {
412 		seq_puts(m, "Aura context is not initialized\n");
413 	} else {
414 		bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
415 					pfvf->aura_ctx->qsize);
416 		seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
417 		seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
418 	}
419 
420 	if (!pfvf->pool_ctx) {
421 		seq_puts(m, "Pool context is not initialized\n");
422 	} else {
423 		bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
424 					pfvf->pool_ctx->qsize);
425 		seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
426 		seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
427 	}
428 	kfree(buf);
429 }
430 
431 /* The 'qsize' entry dumps current Aura/Pool context Qsize
432  * and each context's current enable/disable status in a bitmap.
433  */
434 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
435 				 int blktype)
436 {
437 	void (*print_qsize)(struct seq_file *filp,
438 			    struct rvu_pfvf *pfvf) = NULL;
439 	struct dentry *current_dir;
440 	struct rvu_pfvf *pfvf;
441 	struct rvu *rvu;
442 	int qsize_id;
443 	u16 pcifunc;
444 	int blkaddr;
445 
446 	rvu = filp->private;
447 	switch (blktype) {
448 	case BLKTYPE_NPA:
449 		qsize_id = rvu->rvu_dbg.npa_qsize_id;
450 		print_qsize = print_npa_qsize;
451 		break;
452 
453 	case BLKTYPE_NIX:
454 		qsize_id = rvu->rvu_dbg.nix_qsize_id;
455 		print_qsize = print_nix_qsize;
456 		break;
457 
458 	default:
459 		return -EINVAL;
460 	}
461 
462 	if (blktype == BLKTYPE_NPA) {
463 		blkaddr = BLKADDR_NPA;
464 	} else {
465 		current_dir = filp->file->f_path.dentry->d_parent;
466 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
467 				   BLKADDR_NIX1 : BLKADDR_NIX0);
468 	}
469 
470 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
471 		return -EINVAL;
472 
473 	pfvf = rvu_get_pfvf(rvu, pcifunc);
474 	print_qsize(filp, pfvf);
475 
476 	return 0;
477 }
478 
479 static ssize_t rvu_dbg_qsize_write(struct file *filp,
480 				   const char __user *buffer, size_t count,
481 				   loff_t *ppos, int blktype)
482 {
483 	char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
484 	struct seq_file *seqfile = filp->private_data;
485 	char *cmd_buf, *cmd_buf_tmp, *subtoken;
486 	struct rvu *rvu = seqfile->private;
487 	struct dentry *current_dir;
488 	int blkaddr;
489 	u16 pcifunc;
490 	int ret, lf;
491 
492 	cmd_buf = memdup_user(buffer, count + 1);
493 	if (IS_ERR(cmd_buf))
494 		return -ENOMEM;
495 
496 	cmd_buf[count] = '\0';
497 
498 	cmd_buf_tmp = strchr(cmd_buf, '\n');
499 	if (cmd_buf_tmp) {
500 		*cmd_buf_tmp = '\0';
501 		count = cmd_buf_tmp - cmd_buf + 1;
502 	}
503 
504 	cmd_buf_tmp = cmd_buf;
505 	subtoken = strsep(&cmd_buf, " ");
506 	ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
507 	if (cmd_buf)
508 		ret = -EINVAL;
509 
510 	if (!strncmp(subtoken, "help", 4) || ret < 0) {
511 		dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
512 		goto qsize_write_done;
513 	}
514 
515 	if (blktype == BLKTYPE_NPA) {
516 		blkaddr = BLKADDR_NPA;
517 	} else {
518 		current_dir = filp->f_path.dentry->d_parent;
519 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
520 				   BLKADDR_NIX1 : BLKADDR_NIX0);
521 	}
522 
523 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
524 		ret = -EINVAL;
525 		goto qsize_write_done;
526 	}
527 	if (blktype  == BLKTYPE_NPA)
528 		rvu->rvu_dbg.npa_qsize_id = lf;
529 	else
530 		rvu->rvu_dbg.nix_qsize_id = lf;
531 
532 qsize_write_done:
533 	kfree(cmd_buf_tmp);
534 	return ret ? ret : count;
535 }
536 
537 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
538 				       const char __user *buffer,
539 				       size_t count, loff_t *ppos)
540 {
541 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
542 					    BLKTYPE_NPA);
543 }
544 
545 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
546 {
547 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
548 }
549 
550 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
551 
552 /* Dumps given NPA Aura's context */
553 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
554 {
555 	struct npa_aura_s *aura = &rsp->aura;
556 	struct rvu *rvu = m->private;
557 
558 	seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
559 
560 	seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
561 		   aura->ena, aura->pool_caching);
562 	seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
563 		   aura->pool_way_mask, aura->avg_con);
564 	seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
565 		   aura->pool_drop_ena, aura->aura_drop_ena);
566 	seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
567 		   aura->bp_ena, aura->aura_drop);
568 	seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
569 		   aura->shift, aura->avg_level);
570 
571 	seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
572 		   (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
573 
574 	seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
575 		   (u64)aura->limit, aura->bp, aura->fc_ena);
576 
577 	if (!is_rvu_otx2(rvu))
578 		seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
579 	seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
580 		   aura->fc_up_crossing, aura->fc_stype);
581 	seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
582 
583 	seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
584 
585 	seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
586 		   aura->pool_drop, aura->update_time);
587 	seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
588 		   aura->err_int, aura->err_int_ena);
589 	seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
590 		   aura->thresh_int, aura->thresh_int_ena);
591 	seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
592 		   aura->thresh_up, aura->thresh_qint_idx);
593 	seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
594 
595 	seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
596 	if (!is_rvu_otx2(rvu))
597 		seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
598 }
599 
600 /* Dumps given NPA Pool's context */
601 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
602 {
603 	struct npa_pool_s *pool = &rsp->pool;
604 	struct rvu *rvu = m->private;
605 
606 	seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
607 
608 	seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
609 		   pool->ena, pool->nat_align);
610 	seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
611 		   pool->stack_caching, pool->stack_way_mask);
612 	seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
613 		   pool->buf_offset, pool->buf_size);
614 
615 	seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
616 		   pool->stack_max_pages, pool->stack_pages);
617 
618 	seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
619 
620 	seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
621 		   pool->stack_offset, pool->shift, pool->avg_level);
622 	seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
623 		   pool->avg_con, pool->fc_ena, pool->fc_stype);
624 	seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
625 		   pool->fc_hyst_bits, pool->fc_up_crossing);
626 	if (!is_rvu_otx2(rvu))
627 		seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
628 	seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
629 
630 	seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
631 
632 	seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
633 
634 	seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
635 
636 	seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
637 		   pool->err_int, pool->err_int_ena);
638 	seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
639 	seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
640 		   pool->thresh_int_ena, pool->thresh_up);
641 	seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
642 		   pool->thresh_qint_idx, pool->err_qint_idx);
643 	if (!is_rvu_otx2(rvu))
644 		seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
645 }
646 
647 /* Reads aura/pool's ctx from admin queue */
648 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
649 {
650 	void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
651 	struct npa_aq_enq_req aq_req;
652 	struct npa_aq_enq_rsp rsp;
653 	struct rvu_pfvf *pfvf;
654 	int aura, rc, max_id;
655 	int npalf, id, all;
656 	struct rvu *rvu;
657 	u16 pcifunc;
658 
659 	rvu = m->private;
660 
661 	switch (ctype) {
662 	case NPA_AQ_CTYPE_AURA:
663 		npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
664 		id = rvu->rvu_dbg.npa_aura_ctx.id;
665 		all = rvu->rvu_dbg.npa_aura_ctx.all;
666 		break;
667 
668 	case NPA_AQ_CTYPE_POOL:
669 		npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
670 		id = rvu->rvu_dbg.npa_pool_ctx.id;
671 		all = rvu->rvu_dbg.npa_pool_ctx.all;
672 		break;
673 	default:
674 		return -EINVAL;
675 	}
676 
677 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
678 		return -EINVAL;
679 
680 	pfvf = rvu_get_pfvf(rvu, pcifunc);
681 	if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
682 		seq_puts(m, "Aura context is not initialized\n");
683 		return -EINVAL;
684 	} else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
685 		seq_puts(m, "Pool context is not initialized\n");
686 		return -EINVAL;
687 	}
688 
689 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
690 	aq_req.hdr.pcifunc = pcifunc;
691 	aq_req.ctype = ctype;
692 	aq_req.op = NPA_AQ_INSTOP_READ;
693 	if (ctype == NPA_AQ_CTYPE_AURA) {
694 		max_id = pfvf->aura_ctx->qsize;
695 		print_npa_ctx = print_npa_aura_ctx;
696 	} else {
697 		max_id = pfvf->pool_ctx->qsize;
698 		print_npa_ctx = print_npa_pool_ctx;
699 	}
700 
701 	if (id < 0 || id >= max_id) {
702 		seq_printf(m, "Invalid %s, valid range is 0-%d\n",
703 			   (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
704 			max_id - 1);
705 		return -EINVAL;
706 	}
707 
708 	if (all)
709 		id = 0;
710 	else
711 		max_id = id + 1;
712 
713 	for (aura = id; aura < max_id; aura++) {
714 		aq_req.aura_id = aura;
715 		seq_printf(m, "======%s : %d=======\n",
716 			   (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
717 			aq_req.aura_id);
718 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
719 		if (rc) {
720 			seq_puts(m, "Failed to read context\n");
721 			return -EINVAL;
722 		}
723 		print_npa_ctx(m, &rsp);
724 	}
725 	return 0;
726 }
727 
728 static int write_npa_ctx(struct rvu *rvu, bool all,
729 			 int npalf, int id, int ctype)
730 {
731 	struct rvu_pfvf *pfvf;
732 	int max_id = 0;
733 	u16 pcifunc;
734 
735 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
736 		return -EINVAL;
737 
738 	pfvf = rvu_get_pfvf(rvu, pcifunc);
739 
740 	if (ctype == NPA_AQ_CTYPE_AURA) {
741 		if (!pfvf->aura_ctx) {
742 			dev_warn(rvu->dev, "Aura context is not initialized\n");
743 			return -EINVAL;
744 		}
745 		max_id = pfvf->aura_ctx->qsize;
746 	} else if (ctype == NPA_AQ_CTYPE_POOL) {
747 		if (!pfvf->pool_ctx) {
748 			dev_warn(rvu->dev, "Pool context is not initialized\n");
749 			return -EINVAL;
750 		}
751 		max_id = pfvf->pool_ctx->qsize;
752 	}
753 
754 	if (id < 0 || id >= max_id) {
755 		dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
756 			 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
757 			max_id - 1);
758 		return -EINVAL;
759 	}
760 
761 	switch (ctype) {
762 	case NPA_AQ_CTYPE_AURA:
763 		rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
764 		rvu->rvu_dbg.npa_aura_ctx.id = id;
765 		rvu->rvu_dbg.npa_aura_ctx.all = all;
766 		break;
767 
768 	case NPA_AQ_CTYPE_POOL:
769 		rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
770 		rvu->rvu_dbg.npa_pool_ctx.id = id;
771 		rvu->rvu_dbg.npa_pool_ctx.all = all;
772 		break;
773 	default:
774 		return -EINVAL;
775 	}
776 	return 0;
777 }
778 
779 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
780 				const char __user *buffer, int *npalf,
781 				int *id, bool *all)
782 {
783 	int bytes_not_copied;
784 	char *cmd_buf_tmp;
785 	char *subtoken;
786 	int ret;
787 
788 	bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
789 	if (bytes_not_copied)
790 		return -EFAULT;
791 
792 	cmd_buf[*count] = '\0';
793 	cmd_buf_tmp = strchr(cmd_buf, '\n');
794 
795 	if (cmd_buf_tmp) {
796 		*cmd_buf_tmp = '\0';
797 		*count = cmd_buf_tmp - cmd_buf + 1;
798 	}
799 
800 	subtoken = strsep(&cmd_buf, " ");
801 	ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
802 	if (ret < 0)
803 		return ret;
804 	subtoken = strsep(&cmd_buf, " ");
805 	if (subtoken && strcmp(subtoken, "all") == 0) {
806 		*all = true;
807 	} else {
808 		ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
809 		if (ret < 0)
810 			return ret;
811 	}
812 	if (cmd_buf)
813 		return -EINVAL;
814 	return ret;
815 }
816 
817 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
818 				     const char __user *buffer,
819 				     size_t count, loff_t *ppos, int ctype)
820 {
821 	char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
822 					"aura" : "pool";
823 	struct seq_file *seqfp = filp->private_data;
824 	struct rvu *rvu = seqfp->private;
825 	int npalf, id = 0, ret;
826 	bool all = false;
827 
828 	if ((*ppos != 0) || !count)
829 		return -EINVAL;
830 
831 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
832 	if (!cmd_buf)
833 		return count;
834 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
835 				   &npalf, &id, &all);
836 	if (ret < 0) {
837 		dev_info(rvu->dev,
838 			 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
839 			 ctype_string, ctype_string);
840 		goto done;
841 	} else {
842 		ret = write_npa_ctx(rvu, all, npalf, id, ctype);
843 	}
844 done:
845 	kfree(cmd_buf);
846 	return ret ? ret : count;
847 }
848 
849 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
850 					  const char __user *buffer,
851 					  size_t count, loff_t *ppos)
852 {
853 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
854 				     NPA_AQ_CTYPE_AURA);
855 }
856 
857 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
858 {
859 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
860 }
861 
862 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
863 
864 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
865 					  const char __user *buffer,
866 					  size_t count, loff_t *ppos)
867 {
868 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
869 				     NPA_AQ_CTYPE_POOL);
870 }
871 
872 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
873 {
874 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
875 }
876 
877 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
878 
879 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
880 			    int ctype, int transaction)
881 {
882 	u64 req, out_req, lat, cant_alloc;
883 	struct nix_hw *nix_hw;
884 	struct rvu *rvu;
885 	int port;
886 
887 	if (blk_addr == BLKADDR_NDC_NPA0) {
888 		rvu = s->private;
889 	} else {
890 		nix_hw = s->private;
891 		rvu = nix_hw->rvu;
892 	}
893 
894 	for (port = 0; port < NDC_MAX_PORT; port++) {
895 		req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
896 						(port, ctype, transaction));
897 		lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
898 						(port, ctype, transaction));
899 		out_req = rvu_read64(rvu, blk_addr,
900 				     NDC_AF_PORTX_RTX_RWX_OSTDN_PC
901 				     (port, ctype, transaction));
902 		cant_alloc = rvu_read64(rvu, blk_addr,
903 					NDC_AF_PORTX_RTX_CANT_ALLOC_PC
904 					(port, transaction));
905 		seq_printf(s, "\nPort:%d\n", port);
906 		seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
907 		seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
908 		seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
909 		seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
910 		seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
911 	}
912 }
913 
914 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
915 {
916 	seq_puts(s, "\n***** CACHE mode read stats *****\n");
917 	ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
918 	seq_puts(s, "\n***** CACHE mode write stats *****\n");
919 	ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
920 	seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
921 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
922 	seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
923 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
924 	return 0;
925 }
926 
927 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
928 {
929 	return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
930 }
931 
932 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
933 
934 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
935 {
936 	struct nix_hw *nix_hw;
937 	struct rvu *rvu;
938 	int bank, max_bank;
939 
940 	if (blk_addr == BLKADDR_NDC_NPA0) {
941 		rvu = s->private;
942 	} else {
943 		nix_hw = s->private;
944 		rvu = nix_hw->rvu;
945 	}
946 
947 	max_bank = NDC_MAX_BANK(rvu, blk_addr);
948 	for (bank = 0; bank < max_bank; bank++) {
949 		seq_printf(s, "BANK:%d\n", bank);
950 		seq_printf(s, "\tHits:\t%lld\n",
951 			   (u64)rvu_read64(rvu, blk_addr,
952 			   NDC_AF_BANKX_HIT_PC(bank)));
953 		seq_printf(s, "\tMiss:\t%lld\n",
954 			   (u64)rvu_read64(rvu, blk_addr,
955 			    NDC_AF_BANKX_MISS_PC(bank)));
956 	}
957 	return 0;
958 }
959 
960 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
961 {
962 	struct nix_hw *nix_hw = filp->private;
963 	int blkaddr = 0;
964 	int ndc_idx = 0;
965 
966 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
967 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
968 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
969 
970 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
971 }
972 
973 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
974 
975 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
976 {
977 	struct nix_hw *nix_hw = filp->private;
978 	int blkaddr = 0;
979 	int ndc_idx = 0;
980 
981 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
982 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
983 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
984 
985 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
986 }
987 
988 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
989 
990 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
991 					     void *unused)
992 {
993 	return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
994 }
995 
996 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
997 
998 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
999 						void *unused)
1000 {
1001 	struct nix_hw *nix_hw = filp->private;
1002 	int ndc_idx = NPA0_U;
1003 	int blkaddr = 0;
1004 
1005 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1006 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1007 
1008 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1009 }
1010 
1011 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1012 
1013 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1014 						void *unused)
1015 {
1016 	struct nix_hw *nix_hw = filp->private;
1017 	int ndc_idx = NPA0_U;
1018 	int blkaddr = 0;
1019 
1020 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1021 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1022 
1023 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1024 }
1025 
1026 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1027 
1028 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1029 				   struct nix_cn10k_sq_ctx_s *sq_ctx)
1030 {
1031 	seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1032 		   sq_ctx->ena, sq_ctx->qint_idx);
1033 	seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1034 		   sq_ctx->substream, sq_ctx->sdp_mcast);
1035 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1036 		   sq_ctx->cq, sq_ctx->sqe_way_mask);
1037 
1038 	seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1039 		   sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1040 	seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1041 		   sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1042 	seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1043 		   sq_ctx->default_chan, sq_ctx->sqb_count);
1044 
1045 	seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1046 	seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1047 	seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1048 		   sq_ctx->sqb_aura, sq_ctx->sq_int);
1049 	seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1050 		   sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1051 
1052 	seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1053 		   sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1054 	seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1055 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1056 	seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1057 		   sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1058 	seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1059 		   sq_ctx->tail_offset, sq_ctx->smenq_offset);
1060 	seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1061 		   sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1062 
1063 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1064 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1065 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1066 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1067 		   sq_ctx->smenq_next_sqb);
1068 
1069 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1070 
1071 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1072 	seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1073 		   sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1074 	seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1075 		   sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1076 	seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1077 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1078 
1079 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1080 		   (u64)sq_ctx->scm_lso_rem);
1081 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1082 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1083 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1084 		   (u64)sq_ctx->dropped_octs);
1085 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1086 		   (u64)sq_ctx->dropped_pkts);
1087 }
1088 
1089 /* Dumps given nix_sq's context */
1090 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1091 {
1092 	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1093 	struct nix_hw *nix_hw = m->private;
1094 	struct rvu *rvu = nix_hw->rvu;
1095 
1096 	if (!is_rvu_otx2(rvu)) {
1097 		print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1098 		return;
1099 	}
1100 	seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1101 		   sq_ctx->sqe_way_mask, sq_ctx->cq);
1102 	seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1103 		   sq_ctx->sdp_mcast, sq_ctx->substream);
1104 	seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1105 		   sq_ctx->qint_idx, sq_ctx->ena);
1106 
1107 	seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1108 		   sq_ctx->sqb_count, sq_ctx->default_chan);
1109 	seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1110 		   sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1111 	seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1112 		   sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1113 
1114 	seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1115 		   sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1116 	seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1117 		   sq_ctx->sq_int, sq_ctx->sqb_aura);
1118 	seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1119 
1120 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1121 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1122 	seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1123 		   sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1124 	seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1125 		   sq_ctx->smenq_offset, sq_ctx->tail_offset);
1126 	seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1127 		   sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1128 	seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1129 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1130 	seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1131 		   sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1132 
1133 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1134 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1135 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1136 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1137 		   sq_ctx->smenq_next_sqb);
1138 
1139 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1140 
1141 	seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1142 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1143 	seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1144 		   sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1145 	seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1146 		   sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1147 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1148 
1149 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1150 		   (u64)sq_ctx->scm_lso_rem);
1151 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1152 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1153 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1154 		   (u64)sq_ctx->dropped_octs);
1155 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1156 		   (u64)sq_ctx->dropped_pkts);
1157 }
1158 
1159 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1160 				   struct nix_cn10k_rq_ctx_s *rq_ctx)
1161 {
1162 	seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1163 		   rq_ctx->ena, rq_ctx->sso_ena);
1164 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1165 		   rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1166 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1167 		   rq_ctx->cq, rq_ctx->lenerr_dis);
1168 	seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1169 		   rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1170 	seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1171 		   rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1172 	seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1173 		   rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1174 	seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1175 
1176 	seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1177 		   rq_ctx->spb_aura, rq_ctx->lpb_aura);
1178 	seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1179 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1180 		   rq_ctx->sso_grp, rq_ctx->sso_tt);
1181 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1182 		   rq_ctx->pb_caching, rq_ctx->wqe_caching);
1183 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1184 		   rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1185 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1186 		   rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1187 	seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1188 		   rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1189 
1190 	seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1191 	seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1192 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1193 	seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1194 		   rq_ctx->wqe_skip, rq_ctx->spb_ena);
1195 	seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1196 		   rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1197 	seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1198 		   rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1199 	seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1200 		   rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1201 
1202 	seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1203 		   rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1204 	seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1205 		   rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1206 	seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1207 		   rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1208 	seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1209 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1210 
1211 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1212 		   rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1213 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1214 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1215 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1216 		   rq_ctx->rq_int, rq_ctx->rq_int_ena);
1217 	seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1218 
1219 	seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1220 		   rq_ctx->ltag, rq_ctx->good_utag);
1221 	seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1222 		   rq_ctx->bad_utag, rq_ctx->flow_tagw);
1223 	seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1224 		   rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1225 	seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1226 		   rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1227 	seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1228 
1229 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1230 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1231 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1232 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1233 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1234 }
1235 
1236 /* Dumps given nix_rq's context */
1237 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1238 {
1239 	struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1240 	struct nix_hw *nix_hw = m->private;
1241 	struct rvu *rvu = nix_hw->rvu;
1242 
1243 	if (!is_rvu_otx2(rvu)) {
1244 		print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1245 		return;
1246 	}
1247 
1248 	seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1249 		   rq_ctx->wqe_aura, rq_ctx->substream);
1250 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1251 		   rq_ctx->cq, rq_ctx->ena_wqwd);
1252 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1253 		   rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1254 	seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1255 
1256 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1257 		   rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1258 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1259 		   rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1260 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1261 		   rq_ctx->pb_caching, rq_ctx->sso_tt);
1262 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1263 		   rq_ctx->sso_grp, rq_ctx->lpb_aura);
1264 	seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1265 
1266 	seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1267 		   rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1268 	seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1269 		   rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1270 	seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1271 		   rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1272 	seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1273 		   rq_ctx->spb_ena, rq_ctx->wqe_skip);
1274 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1275 
1276 	seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1277 		   rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1278 	seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1279 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1280 	seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1281 		   rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1282 	seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1283 		   rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1284 
1285 	seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1286 		   rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1287 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1288 		   rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1289 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1290 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1291 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1292 
1293 	seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1294 		   rq_ctx->flow_tagw, rq_ctx->bad_utag);
1295 	seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1296 		   rq_ctx->good_utag, rq_ctx->ltag);
1297 
1298 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1299 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1300 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1301 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1302 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1303 }
1304 
1305 /* Dumps given nix_cq's context */
1306 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1307 {
1308 	struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1309 
1310 	seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1311 
1312 	seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1313 	seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1314 		   cq_ctx->avg_con, cq_ctx->cint_idx);
1315 	seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1316 		   cq_ctx->cq_err, cq_ctx->qint_idx);
1317 	seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1318 		   cq_ctx->bpid, cq_ctx->bp_ena);
1319 
1320 	seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1321 		   cq_ctx->update_time, cq_ctx->avg_level);
1322 	seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1323 		   cq_ctx->head, cq_ctx->tail);
1324 
1325 	seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1326 		   cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1327 	seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1328 		   cq_ctx->qsize, cq_ctx->caching);
1329 	seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1330 		   cq_ctx->substream, cq_ctx->ena);
1331 	seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1332 		   cq_ctx->drop_ena, cq_ctx->drop);
1333 	seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1334 }
1335 
1336 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1337 					 void *unused, int ctype)
1338 {
1339 	void (*print_nix_ctx)(struct seq_file *filp,
1340 			      struct nix_aq_enq_rsp *rsp) = NULL;
1341 	struct nix_hw *nix_hw = filp->private;
1342 	struct rvu *rvu = nix_hw->rvu;
1343 	struct nix_aq_enq_req aq_req;
1344 	struct nix_aq_enq_rsp rsp;
1345 	char *ctype_string = NULL;
1346 	int qidx, rc, max_id = 0;
1347 	struct rvu_pfvf *pfvf;
1348 	int nixlf, id, all;
1349 	u16 pcifunc;
1350 
1351 	switch (ctype) {
1352 	case NIX_AQ_CTYPE_CQ:
1353 		nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1354 		id = rvu->rvu_dbg.nix_cq_ctx.id;
1355 		all = rvu->rvu_dbg.nix_cq_ctx.all;
1356 		break;
1357 
1358 	case NIX_AQ_CTYPE_SQ:
1359 		nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1360 		id = rvu->rvu_dbg.nix_sq_ctx.id;
1361 		all = rvu->rvu_dbg.nix_sq_ctx.all;
1362 		break;
1363 
1364 	case NIX_AQ_CTYPE_RQ:
1365 		nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1366 		id = rvu->rvu_dbg.nix_rq_ctx.id;
1367 		all = rvu->rvu_dbg.nix_rq_ctx.all;
1368 		break;
1369 
1370 	default:
1371 		return -EINVAL;
1372 	}
1373 
1374 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1375 		return -EINVAL;
1376 
1377 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1378 	if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1379 		seq_puts(filp, "SQ context is not initialized\n");
1380 		return -EINVAL;
1381 	} else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1382 		seq_puts(filp, "RQ context is not initialized\n");
1383 		return -EINVAL;
1384 	} else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1385 		seq_puts(filp, "CQ context is not initialized\n");
1386 		return -EINVAL;
1387 	}
1388 
1389 	if (ctype == NIX_AQ_CTYPE_SQ) {
1390 		max_id = pfvf->sq_ctx->qsize;
1391 		ctype_string = "sq";
1392 		print_nix_ctx = print_nix_sq_ctx;
1393 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1394 		max_id = pfvf->rq_ctx->qsize;
1395 		ctype_string = "rq";
1396 		print_nix_ctx = print_nix_rq_ctx;
1397 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1398 		max_id = pfvf->cq_ctx->qsize;
1399 		ctype_string = "cq";
1400 		print_nix_ctx = print_nix_cq_ctx;
1401 	}
1402 
1403 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1404 	aq_req.hdr.pcifunc = pcifunc;
1405 	aq_req.ctype = ctype;
1406 	aq_req.op = NIX_AQ_INSTOP_READ;
1407 	if (all)
1408 		id = 0;
1409 	else
1410 		max_id = id + 1;
1411 	for (qidx = id; qidx < max_id; qidx++) {
1412 		aq_req.qidx = qidx;
1413 		seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1414 			   ctype_string, nixlf, aq_req.qidx);
1415 		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1416 		if (rc) {
1417 			seq_puts(filp, "Failed to read the context\n");
1418 			return -EINVAL;
1419 		}
1420 		print_nix_ctx(filp, &rsp);
1421 	}
1422 	return 0;
1423 }
1424 
1425 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1426 			       int id, int ctype, char *ctype_string,
1427 			       struct seq_file *m)
1428 {
1429 	struct nix_hw *nix_hw = m->private;
1430 	struct rvu_pfvf *pfvf;
1431 	int max_id = 0;
1432 	u16 pcifunc;
1433 
1434 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1435 		return -EINVAL;
1436 
1437 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1438 
1439 	if (ctype == NIX_AQ_CTYPE_SQ) {
1440 		if (!pfvf->sq_ctx) {
1441 			dev_warn(rvu->dev, "SQ context is not initialized\n");
1442 			return -EINVAL;
1443 		}
1444 		max_id = pfvf->sq_ctx->qsize;
1445 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1446 		if (!pfvf->rq_ctx) {
1447 			dev_warn(rvu->dev, "RQ context is not initialized\n");
1448 			return -EINVAL;
1449 		}
1450 		max_id = pfvf->rq_ctx->qsize;
1451 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1452 		if (!pfvf->cq_ctx) {
1453 			dev_warn(rvu->dev, "CQ context is not initialized\n");
1454 			return -EINVAL;
1455 		}
1456 		max_id = pfvf->cq_ctx->qsize;
1457 	}
1458 
1459 	if (id < 0 || id >= max_id) {
1460 		dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1461 			 ctype_string, max_id - 1);
1462 		return -EINVAL;
1463 	}
1464 	switch (ctype) {
1465 	case NIX_AQ_CTYPE_CQ:
1466 		rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1467 		rvu->rvu_dbg.nix_cq_ctx.id = id;
1468 		rvu->rvu_dbg.nix_cq_ctx.all = all;
1469 		break;
1470 
1471 	case NIX_AQ_CTYPE_SQ:
1472 		rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1473 		rvu->rvu_dbg.nix_sq_ctx.id = id;
1474 		rvu->rvu_dbg.nix_sq_ctx.all = all;
1475 		break;
1476 
1477 	case NIX_AQ_CTYPE_RQ:
1478 		rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1479 		rvu->rvu_dbg.nix_rq_ctx.id = id;
1480 		rvu->rvu_dbg.nix_rq_ctx.all = all;
1481 		break;
1482 	default:
1483 		return -EINVAL;
1484 	}
1485 	return 0;
1486 }
1487 
1488 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
1489 					   const char __user *buffer,
1490 					   size_t count, loff_t *ppos,
1491 					   int ctype)
1492 {
1493 	struct seq_file *m = filp->private_data;
1494 	struct nix_hw *nix_hw = m->private;
1495 	struct rvu *rvu = nix_hw->rvu;
1496 	char *cmd_buf, *ctype_string;
1497 	int nixlf, id = 0, ret;
1498 	bool all = false;
1499 
1500 	if ((*ppos != 0) || !count)
1501 		return -EINVAL;
1502 
1503 	switch (ctype) {
1504 	case NIX_AQ_CTYPE_SQ:
1505 		ctype_string = "sq";
1506 		break;
1507 	case NIX_AQ_CTYPE_RQ:
1508 		ctype_string = "rq";
1509 		break;
1510 	case NIX_AQ_CTYPE_CQ:
1511 		ctype_string = "cq";
1512 		break;
1513 	default:
1514 		return -EINVAL;
1515 	}
1516 
1517 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1518 
1519 	if (!cmd_buf)
1520 		return count;
1521 
1522 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1523 				   &nixlf, &id, &all);
1524 	if (ret < 0) {
1525 		dev_info(rvu->dev,
1526 			 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
1527 			 ctype_string, ctype_string);
1528 		goto done;
1529 	} else {
1530 		ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
1531 					  ctype_string, m);
1532 	}
1533 done:
1534 	kfree(cmd_buf);
1535 	return ret ? ret : count;
1536 }
1537 
1538 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
1539 					const char __user *buffer,
1540 					size_t count, loff_t *ppos)
1541 {
1542 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1543 					    NIX_AQ_CTYPE_SQ);
1544 }
1545 
1546 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
1547 {
1548 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
1549 }
1550 
1551 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
1552 
1553 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
1554 					const char __user *buffer,
1555 					size_t count, loff_t *ppos)
1556 {
1557 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1558 					    NIX_AQ_CTYPE_RQ);
1559 }
1560 
1561 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
1562 {
1563 	return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
1564 }
1565 
1566 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
1567 
1568 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
1569 					const char __user *buffer,
1570 					size_t count, loff_t *ppos)
1571 {
1572 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1573 					    NIX_AQ_CTYPE_CQ);
1574 }
1575 
1576 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1577 {
1578 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1579 }
1580 
1581 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1582 
1583 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1584 				 unsigned long *bmap, char *qtype)
1585 {
1586 	char *buf;
1587 
1588 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1589 	if (!buf)
1590 		return;
1591 
1592 	bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1593 	seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1594 	seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1595 		   qtype, buf);
1596 	kfree(buf);
1597 }
1598 
1599 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1600 {
1601 	if (!pfvf->cq_ctx)
1602 		seq_puts(filp, "cq context is not initialized\n");
1603 	else
1604 		print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1605 				     "cq");
1606 
1607 	if (!pfvf->rq_ctx)
1608 		seq_puts(filp, "rq context is not initialized\n");
1609 	else
1610 		print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1611 				     "rq");
1612 
1613 	if (!pfvf->sq_ctx)
1614 		seq_puts(filp, "sq context is not initialized\n");
1615 	else
1616 		print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1617 				     "sq");
1618 }
1619 
1620 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1621 				       const char __user *buffer,
1622 				       size_t count, loff_t *ppos)
1623 {
1624 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1625 				   BLKTYPE_NIX);
1626 }
1627 
1628 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1629 {
1630 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1631 }
1632 
1633 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1634 
1635 static void print_band_prof_ctx(struct seq_file *m,
1636 				struct nix_bandprof_s *prof)
1637 {
1638 	char *str;
1639 
1640 	switch (prof->pc_mode) {
1641 	case NIX_RX_PC_MODE_VLAN:
1642 		str = "VLAN";
1643 		break;
1644 	case NIX_RX_PC_MODE_DSCP:
1645 		str = "DSCP";
1646 		break;
1647 	case NIX_RX_PC_MODE_GEN:
1648 		str = "Generic";
1649 		break;
1650 	case NIX_RX_PC_MODE_RSVD:
1651 		str = "Reserved";
1652 		break;
1653 	}
1654 	seq_printf(m, "W0: pc_mode\t\t%s\n", str);
1655 	str = (prof->icolor == 3) ? "Color blind" :
1656 		(prof->icolor == 0) ? "Green" :
1657 		(prof->icolor == 1) ? "Yellow" : "Red";
1658 	seq_printf(m, "W0: icolor\t\t%s\n", str);
1659 	seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
1660 	seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
1661 	seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
1662 	seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
1663 	seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
1664 	seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
1665 	seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
1666 	seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
1667 
1668 	seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
1669 	str = (prof->lmode == 0) ? "byte" : "packet";
1670 	seq_printf(m, "W1: lmode\t\t%s\n", str);
1671 	seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
1672 	seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
1673 	seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
1674 	seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
1675 	str = (prof->gc_action == 0) ? "PASS" :
1676 		(prof->gc_action == 1) ? "DROP" : "RED";
1677 	seq_printf(m, "W1: gc_action\t\t%s\n", str);
1678 	str = (prof->yc_action == 0) ? "PASS" :
1679 		(prof->yc_action == 1) ? "DROP" : "RED";
1680 	seq_printf(m, "W1: yc_action\t\t%s\n", str);
1681 	str = (prof->rc_action == 0) ? "PASS" :
1682 		(prof->rc_action == 1) ? "DROP" : "RED";
1683 	seq_printf(m, "W1: rc_action\t\t%s\n", str);
1684 	seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
1685 	seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
1686 	seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
1687 
1688 	seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
1689 	seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
1690 	seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
1691 	seq_printf(m, "W4: green_pkt_pass\t%lld\n",
1692 		   (u64)prof->green_pkt_pass);
1693 	seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
1694 		   (u64)prof->yellow_pkt_pass);
1695 	seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
1696 	seq_printf(m, "W7: green_octs_pass\t%lld\n",
1697 		   (u64)prof->green_octs_pass);
1698 	seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
1699 		   (u64)prof->yellow_octs_pass);
1700 	seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
1701 	seq_printf(m, "W10: green_pkt_drop\t%lld\n",
1702 		   (u64)prof->green_pkt_drop);
1703 	seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
1704 		   (u64)prof->yellow_pkt_drop);
1705 	seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
1706 	seq_printf(m, "W13: green_octs_drop\t%lld\n",
1707 		   (u64)prof->green_octs_drop);
1708 	seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
1709 		   (u64)prof->yellow_octs_drop);
1710 	seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
1711 	seq_puts(m, "==============================\n");
1712 }
1713 
1714 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
1715 {
1716 	struct nix_hw *nix_hw = m->private;
1717 	struct nix_cn10k_aq_enq_req aq_req;
1718 	struct nix_cn10k_aq_enq_rsp aq_rsp;
1719 	struct rvu *rvu = nix_hw->rvu;
1720 	struct nix_ipolicer *ipolicer;
1721 	int layer, prof_idx, idx, rc;
1722 	u16 pcifunc;
1723 	char *str;
1724 
1725 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1726 		if (layer == BAND_PROF_INVAL_LAYER)
1727 			continue;
1728 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1729 			(layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
1730 
1731 		seq_printf(m, "\n%s bandwidth profiles\n", str);
1732 		seq_puts(m, "=======================\n");
1733 
1734 		ipolicer = &nix_hw->ipolicer[layer];
1735 
1736 		for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
1737 			if (is_rsrc_free(&ipolicer->band_prof, idx))
1738 				continue;
1739 
1740 			prof_idx = (idx & 0x3FFF) | (layer << 14);
1741 			rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1742 						 0x00, NIX_AQ_CTYPE_BANDPROF,
1743 						 prof_idx);
1744 			if (rc) {
1745 				dev_err(rvu->dev,
1746 					"%s: Failed to fetch context of %s profile %d, err %d\n",
1747 					__func__, str, idx, rc);
1748 				return 0;
1749 			}
1750 			seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
1751 			pcifunc = ipolicer->pfvf_map[idx];
1752 			if (!(pcifunc & RVU_PFVF_FUNC_MASK))
1753 				seq_printf(m, "Allocated to :: PF %d\n",
1754 					   rvu_get_pf(pcifunc));
1755 			else
1756 				seq_printf(m, "Allocated to :: PF %d VF %d\n",
1757 					   rvu_get_pf(pcifunc),
1758 					   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1759 			print_band_prof_ctx(m, &aq_rsp.prof);
1760 		}
1761 	}
1762 	return 0;
1763 }
1764 
1765 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
1766 
1767 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
1768 {
1769 	struct nix_hw *nix_hw = m->private;
1770 	struct nix_ipolicer *ipolicer;
1771 	int layer;
1772 	char *str;
1773 
1774 	seq_puts(m, "\nBandwidth profile resource free count\n");
1775 	seq_puts(m, "=====================================\n");
1776 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1777 		if (layer == BAND_PROF_INVAL_LAYER)
1778 			continue;
1779 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1780 			(layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
1781 
1782 		ipolicer = &nix_hw->ipolicer[layer];
1783 		seq_printf(m, "%s :: Max: %4d  Free: %4d\n", str,
1784 			   ipolicer->band_prof.max,
1785 			   rvu_rsrc_free_count(&ipolicer->band_prof));
1786 	}
1787 	seq_puts(m, "=====================================\n");
1788 
1789 	return 0;
1790 }
1791 
1792 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
1793 
1794 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
1795 {
1796 	struct nix_hw *nix_hw;
1797 
1798 	if (!is_block_implemented(rvu->hw, blkaddr))
1799 		return;
1800 
1801 	if (blkaddr == BLKADDR_NIX0) {
1802 		rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1803 		nix_hw = &rvu->hw->nix[0];
1804 	} else {
1805 		rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
1806 						      rvu->rvu_dbg.root);
1807 		nix_hw = &rvu->hw->nix[1];
1808 	}
1809 
1810 	debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1811 			    &rvu_dbg_nix_sq_ctx_fops);
1812 	debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1813 			    &rvu_dbg_nix_rq_ctx_fops);
1814 	debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1815 			    &rvu_dbg_nix_cq_ctx_fops);
1816 	debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1817 			    &rvu_dbg_nix_ndc_tx_cache_fops);
1818 	debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1819 			    &rvu_dbg_nix_ndc_rx_cache_fops);
1820 	debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1821 			    &rvu_dbg_nix_ndc_tx_hits_miss_fops);
1822 	debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1823 			    &rvu_dbg_nix_ndc_rx_hits_miss_fops);
1824 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
1825 			    &rvu_dbg_nix_qsize_fops);
1826 	debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1827 			    &rvu_dbg_nix_band_prof_ctx_fops);
1828 	debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
1829 			    &rvu_dbg_nix_band_prof_rsrc_fops);
1830 }
1831 
1832 static void rvu_dbg_npa_init(struct rvu *rvu)
1833 {
1834 	rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
1835 
1836 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
1837 			    &rvu_dbg_npa_qsize_fops);
1838 	debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1839 			    &rvu_dbg_npa_aura_ctx_fops);
1840 	debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1841 			    &rvu_dbg_npa_pool_ctx_fops);
1842 	debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
1843 			    &rvu_dbg_npa_ndc_cache_fops);
1844 	debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
1845 			    &rvu_dbg_npa_ndc_hits_miss_fops);
1846 }
1847 
1848 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)				\
1849 	({								\
1850 		u64 cnt;						\
1851 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
1852 					     NIX_STATS_RX, &(cnt));	\
1853 		if (!err)						\
1854 			seq_printf(s, "%s: %llu\n", name, cnt);		\
1855 		cnt;							\
1856 	})
1857 
1858 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)			\
1859 	({								\
1860 		u64 cnt;						\
1861 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
1862 					  NIX_STATS_TX, &(cnt));	\
1863 		if (!err)						\
1864 			seq_printf(s, "%s: %llu\n", name, cnt);		\
1865 		cnt;							\
1866 	})
1867 
1868 static int cgx_print_stats(struct seq_file *s, int lmac_id)
1869 {
1870 	struct cgx_link_user_info linfo;
1871 	struct mac_ops *mac_ops;
1872 	void *cgxd = s->private;
1873 	u64 ucast, mcast, bcast;
1874 	int stat = 0, err = 0;
1875 	u64 tx_stat, rx_stat;
1876 	struct rvu *rvu;
1877 
1878 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
1879 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
1880 	if (!rvu)
1881 		return -ENODEV;
1882 
1883 	mac_ops = get_mac_ops(cgxd);
1884 
1885 	if (!mac_ops)
1886 		return 0;
1887 
1888 	/* Link status */
1889 	seq_puts(s, "\n=======Link Status======\n\n");
1890 	err = cgx_get_link_info(cgxd, lmac_id, &linfo);
1891 	if (err)
1892 		seq_puts(s, "Failed to read link status\n");
1893 	seq_printf(s, "\nLink is %s %d Mbps\n\n",
1894 		   linfo.link_up ? "UP" : "DOWN", linfo.speed);
1895 
1896 	/* Rx stats */
1897 	seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
1898 		   mac_ops->name);
1899 	ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
1900 	if (err)
1901 		return err;
1902 	mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
1903 	if (err)
1904 		return err;
1905 	bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
1906 	if (err)
1907 		return err;
1908 	seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
1909 	PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
1910 	if (err)
1911 		return err;
1912 	PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
1913 	if (err)
1914 		return err;
1915 	PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
1916 	if (err)
1917 		return err;
1918 
1919 	/* Tx stats */
1920 	seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
1921 		   mac_ops->name);
1922 	ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
1923 	if (err)
1924 		return err;
1925 	mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
1926 	if (err)
1927 		return err;
1928 	bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
1929 	if (err)
1930 		return err;
1931 	seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
1932 	PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
1933 	if (err)
1934 		return err;
1935 	PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
1936 	if (err)
1937 		return err;
1938 
1939 	/* Rx stats */
1940 	seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
1941 	while (stat < mac_ops->rx_stats_cnt) {
1942 		err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
1943 		if (err)
1944 			return err;
1945 		if (is_rvu_otx2(rvu))
1946 			seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
1947 				   rx_stat);
1948 		else
1949 			seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
1950 				   rx_stat);
1951 		stat++;
1952 	}
1953 
1954 	/* Tx stats */
1955 	stat = 0;
1956 	seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
1957 	while (stat < mac_ops->tx_stats_cnt) {
1958 		err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
1959 		if (err)
1960 			return err;
1961 
1962 	if (is_rvu_otx2(rvu))
1963 		seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
1964 			   tx_stat);
1965 	else
1966 		seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
1967 			   tx_stat);
1968 	stat++;
1969 	}
1970 
1971 	return err;
1972 }
1973 
1974 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
1975 {
1976 	struct dentry *current_dir;
1977 	int err, lmac_id;
1978 	char *buf;
1979 
1980 	current_dir = filp->file->f_path.dentry->d_parent;
1981 	buf = strrchr(current_dir->d_name.name, 'c');
1982 	if (!buf)
1983 		return -EINVAL;
1984 
1985 	err = kstrtoint(buf + 1, 10, &lmac_id);
1986 	if (!err) {
1987 		err = cgx_print_stats(filp, lmac_id);
1988 		if (err)
1989 			return err;
1990 	}
1991 	return err;
1992 }
1993 
1994 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
1995 
1996 static void rvu_dbg_cgx_init(struct rvu *rvu)
1997 {
1998 	struct mac_ops *mac_ops;
1999 	unsigned long lmac_bmap;
2000 	int i, lmac_id;
2001 	char dname[20];
2002 	void *cgx;
2003 
2004 	if (!cgx_get_cgxcnt_max())
2005 		return;
2006 
2007 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2008 	if (!mac_ops)
2009 		return;
2010 
2011 	rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2012 						   rvu->rvu_dbg.root);
2013 
2014 	for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2015 		cgx = rvu_cgx_pdata(i, rvu);
2016 		if (!cgx)
2017 			continue;
2018 		lmac_bmap = cgx_get_lmac_bmap(cgx);
2019 		/* cgx debugfs dir */
2020 		sprintf(dname, "%s%d", mac_ops->name, i);
2021 		rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2022 						      rvu->rvu_dbg.cgx_root);
2023 
2024 		for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
2025 			/* lmac debugfs dir */
2026 			sprintf(dname, "lmac%d", lmac_id);
2027 			rvu->rvu_dbg.lmac =
2028 				debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2029 
2030 			debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2031 					    cgx, &rvu_dbg_cgx_stat_fops);
2032 		}
2033 	}
2034 }
2035 
2036 /* NPC debugfs APIs */
2037 static void rvu_print_npc_mcam_info(struct seq_file *s,
2038 				    u16 pcifunc, int blkaddr)
2039 {
2040 	struct rvu *rvu = s->private;
2041 	int entry_acnt, entry_ecnt;
2042 	int cntr_acnt, cntr_ecnt;
2043 
2044 	/* Skip PF0 */
2045 	if (!pcifunc)
2046 		return;
2047 	rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2048 					  &entry_acnt, &entry_ecnt);
2049 	rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2050 					    &cntr_acnt, &cntr_ecnt);
2051 	if (!entry_acnt && !cntr_acnt)
2052 		return;
2053 
2054 	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2055 		seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2056 			   rvu_get_pf(pcifunc));
2057 	else
2058 		seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2059 			   rvu_get_pf(pcifunc),
2060 			   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2061 
2062 	if (entry_acnt) {
2063 		seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2064 		seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2065 	}
2066 	if (cntr_acnt) {
2067 		seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2068 		seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2069 	}
2070 }
2071 
2072 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2073 {
2074 	struct rvu *rvu = filp->private;
2075 	int pf, vf, numvfs, blkaddr;
2076 	struct npc_mcam *mcam;
2077 	u16 pcifunc, counters;
2078 	u64 cfg;
2079 
2080 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2081 	if (blkaddr < 0)
2082 		return -ENODEV;
2083 
2084 	mcam = &rvu->hw->mcam;
2085 	counters = rvu->hw->npc_counters;
2086 
2087 	seq_puts(filp, "\nNPC MCAM info:\n");
2088 	/* MCAM keywidth on receive and transmit sides */
2089 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2090 	cfg = (cfg >> 32) & 0x07;
2091 	seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2092 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2093 		   "224bits" : "448bits"));
2094 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2095 	cfg = (cfg >> 32) & 0x07;
2096 	seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2097 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2098 		   "224bits" : "448bits"));
2099 
2100 	mutex_lock(&mcam->lock);
2101 	/* MCAM entries */
2102 	seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2103 	seq_printf(filp, "\t\t Reserved \t: %d\n",
2104 		   mcam->total_entries - mcam->bmap_entries);
2105 	seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2106 
2107 	/* MCAM counters */
2108 	seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2109 	seq_printf(filp, "\t\t Reserved \t: %d\n",
2110 		   counters - mcam->counters.max);
2111 	seq_printf(filp, "\t\t Available \t: %d\n",
2112 		   rvu_rsrc_free_count(&mcam->counters));
2113 
2114 	if (mcam->bmap_entries == mcam->bmap_fcnt) {
2115 		mutex_unlock(&mcam->lock);
2116 		return 0;
2117 	}
2118 
2119 	seq_puts(filp, "\n\t\t Current allocation\n");
2120 	seq_puts(filp, "\t\t====================\n");
2121 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2122 		pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2123 		rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2124 
2125 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2126 		numvfs = (cfg >> 12) & 0xFF;
2127 		for (vf = 0; vf < numvfs; vf++) {
2128 			pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2129 			rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2130 		}
2131 	}
2132 
2133 	mutex_unlock(&mcam->lock);
2134 	return 0;
2135 }
2136 
2137 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2138 
2139 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2140 					     void *unused)
2141 {
2142 	struct rvu *rvu = filp->private;
2143 	struct npc_mcam *mcam;
2144 	int blkaddr;
2145 
2146 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2147 	if (blkaddr < 0)
2148 		return -ENODEV;
2149 
2150 	mcam = &rvu->hw->mcam;
2151 
2152 	seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2153 	seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2154 		   rvu_read64(rvu, blkaddr,
2155 			      NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2156 
2157 	return 0;
2158 }
2159 
2160 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2161 
2162 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2163 					struct rvu_npc_mcam_rule *rule)
2164 {
2165 	u8 bit;
2166 
2167 	for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2168 		seq_printf(s, "\t%s  ", npc_get_field_name(bit));
2169 		switch (bit) {
2170 		case NPC_DMAC:
2171 			seq_printf(s, "%pM ", rule->packet.dmac);
2172 			seq_printf(s, "mask %pM\n", rule->mask.dmac);
2173 			break;
2174 		case NPC_SMAC:
2175 			seq_printf(s, "%pM ", rule->packet.smac);
2176 			seq_printf(s, "mask %pM\n", rule->mask.smac);
2177 			break;
2178 		case NPC_ETYPE:
2179 			seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2180 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2181 			break;
2182 		case NPC_OUTER_VID:
2183 			seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2184 			seq_printf(s, "mask 0x%x\n",
2185 				   ntohs(rule->mask.vlan_tci));
2186 			break;
2187 		case NPC_TOS:
2188 			seq_printf(s, "%d ", rule->packet.tos);
2189 			seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2190 			break;
2191 		case NPC_SIP_IPV4:
2192 			seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2193 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2194 			break;
2195 		case NPC_DIP_IPV4:
2196 			seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2197 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2198 			break;
2199 		case NPC_SIP_IPV6:
2200 			seq_printf(s, "%pI6 ", rule->packet.ip6src);
2201 			seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2202 			break;
2203 		case NPC_DIP_IPV6:
2204 			seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2205 			seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2206 			break;
2207 		case NPC_SPORT_TCP:
2208 		case NPC_SPORT_UDP:
2209 		case NPC_SPORT_SCTP:
2210 			seq_printf(s, "%d ", ntohs(rule->packet.sport));
2211 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2212 			break;
2213 		case NPC_DPORT_TCP:
2214 		case NPC_DPORT_UDP:
2215 		case NPC_DPORT_SCTP:
2216 			seq_printf(s, "%d ", ntohs(rule->packet.dport));
2217 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2218 			break;
2219 		default:
2220 			seq_puts(s, "\n");
2221 			break;
2222 		}
2223 	}
2224 }
2225 
2226 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2227 					 struct rvu_npc_mcam_rule *rule)
2228 {
2229 	if (rule->intf == NIX_INTF_TX) {
2230 		switch (rule->tx_action.op) {
2231 		case NIX_TX_ACTIONOP_DROP:
2232 			seq_puts(s, "\taction: Drop\n");
2233 			break;
2234 		case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2235 			seq_puts(s, "\taction: Unicast to default channel\n");
2236 			break;
2237 		case NIX_TX_ACTIONOP_UCAST_CHAN:
2238 			seq_printf(s, "\taction: Unicast to channel %d\n",
2239 				   rule->tx_action.index);
2240 			break;
2241 		case NIX_TX_ACTIONOP_MCAST:
2242 			seq_puts(s, "\taction: Multicast\n");
2243 			break;
2244 		case NIX_TX_ACTIONOP_DROP_VIOL:
2245 			seq_puts(s, "\taction: Lockdown Violation Drop\n");
2246 			break;
2247 		default:
2248 			break;
2249 		}
2250 	} else {
2251 		switch (rule->rx_action.op) {
2252 		case NIX_RX_ACTIONOP_DROP:
2253 			seq_puts(s, "\taction: Drop\n");
2254 			break;
2255 		case NIX_RX_ACTIONOP_UCAST:
2256 			seq_printf(s, "\taction: Direct to queue %d\n",
2257 				   rule->rx_action.index);
2258 			break;
2259 		case NIX_RX_ACTIONOP_RSS:
2260 			seq_puts(s, "\taction: RSS\n");
2261 			break;
2262 		case NIX_RX_ACTIONOP_UCAST_IPSEC:
2263 			seq_puts(s, "\taction: Unicast ipsec\n");
2264 			break;
2265 		case NIX_RX_ACTIONOP_MCAST:
2266 			seq_puts(s, "\taction: Multicast\n");
2267 			break;
2268 		default:
2269 			break;
2270 		}
2271 	}
2272 }
2273 
2274 static const char *rvu_dbg_get_intf_name(int intf)
2275 {
2276 	switch (intf) {
2277 	case NIX_INTFX_RX(0):
2278 		return "NIX0_RX";
2279 	case NIX_INTFX_RX(1):
2280 		return "NIX1_RX";
2281 	case NIX_INTFX_TX(0):
2282 		return "NIX0_TX";
2283 	case NIX_INTFX_TX(1):
2284 		return "NIX1_TX";
2285 	default:
2286 		break;
2287 	}
2288 
2289 	return "unknown";
2290 }
2291 
2292 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2293 {
2294 	struct rvu_npc_mcam_rule *iter;
2295 	struct rvu *rvu = s->private;
2296 	struct npc_mcam *mcam;
2297 	int pf, vf = -1;
2298 	bool enabled;
2299 	int blkaddr;
2300 	u16 target;
2301 	u64 hits;
2302 
2303 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2304 	if (blkaddr < 0)
2305 		return 0;
2306 
2307 	mcam = &rvu->hw->mcam;
2308 
2309 	mutex_lock(&mcam->lock);
2310 	list_for_each_entry(iter, &mcam->mcam_rules, list) {
2311 		pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2312 		seq_printf(s, "\n\tInstalled by: PF%d ", pf);
2313 
2314 		if (iter->owner & RVU_PFVF_FUNC_MASK) {
2315 			vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
2316 			seq_printf(s, "VF%d", vf);
2317 		}
2318 		seq_puts(s, "\n");
2319 
2320 		seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
2321 						    "RX" : "TX");
2322 		seq_printf(s, "\tinterface: %s\n",
2323 			   rvu_dbg_get_intf_name(iter->intf));
2324 		seq_printf(s, "\tmcam entry: %d\n", iter->entry);
2325 
2326 		rvu_dbg_npc_mcam_show_flows(s, iter);
2327 		if (is_npc_intf_rx(iter->intf)) {
2328 			target = iter->rx_action.pf_func;
2329 			pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2330 			seq_printf(s, "\tForward to: PF%d ", pf);
2331 
2332 			if (target & RVU_PFVF_FUNC_MASK) {
2333 				vf = (target & RVU_PFVF_FUNC_MASK) - 1;
2334 				seq_printf(s, "VF%d", vf);
2335 			}
2336 			seq_puts(s, "\n");
2337 		}
2338 
2339 		rvu_dbg_npc_mcam_show_action(s, iter);
2340 
2341 		enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
2342 		seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
2343 
2344 		if (!iter->has_cntr)
2345 			continue;
2346 		seq_printf(s, "\tcounter: %d\n", iter->cntr);
2347 
2348 		hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
2349 		seq_printf(s, "\thits: %lld\n", hits);
2350 	}
2351 	mutex_unlock(&mcam->lock);
2352 
2353 	return 0;
2354 }
2355 
2356 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
2357 
2358 static void rvu_dbg_npc_init(struct rvu *rvu)
2359 {
2360 	rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
2361 
2362 	debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
2363 			    &rvu_dbg_npc_mcam_info_fops);
2364 	debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
2365 			    &rvu_dbg_npc_mcam_rules_fops);
2366 	debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
2367 			    &rvu_dbg_npc_rx_miss_act_fops);
2368 }
2369 
2370 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
2371 {
2372 	struct cpt_ctx *ctx = filp->private;
2373 	u64 busy_sts = 0, free_sts = 0;
2374 	u32 e_min = 0, e_max = 0, e, i;
2375 	u16 max_ses, max_ies, max_aes;
2376 	struct rvu *rvu = ctx->rvu;
2377 	int blkaddr = ctx->blkaddr;
2378 	u64 reg;
2379 
2380 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2381 	max_ses = reg & 0xffff;
2382 	max_ies = (reg >> 16) & 0xffff;
2383 	max_aes = (reg >> 32) & 0xffff;
2384 
2385 	switch (eng_type) {
2386 	case CPT_AE_TYPE:
2387 		e_min = max_ses + max_ies;
2388 		e_max = max_ses + max_ies + max_aes;
2389 		break;
2390 	case CPT_SE_TYPE:
2391 		e_min = 0;
2392 		e_max = max_ses;
2393 		break;
2394 	case CPT_IE_TYPE:
2395 		e_min = max_ses;
2396 		e_max = max_ses + max_ies;
2397 		break;
2398 	default:
2399 		return -EINVAL;
2400 	}
2401 
2402 	for (e = e_min, i = 0; e < e_max; e++, i++) {
2403 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
2404 		if (reg & 0x1)
2405 			busy_sts |= 1ULL << i;
2406 
2407 		if (reg & 0x2)
2408 			free_sts |= 1ULL << i;
2409 	}
2410 	seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
2411 	seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
2412 
2413 	return 0;
2414 }
2415 
2416 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
2417 {
2418 	return cpt_eng_sts_display(filp, CPT_AE_TYPE);
2419 }
2420 
2421 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
2422 
2423 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
2424 {
2425 	return cpt_eng_sts_display(filp, CPT_SE_TYPE);
2426 }
2427 
2428 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
2429 
2430 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
2431 {
2432 	return cpt_eng_sts_display(filp, CPT_IE_TYPE);
2433 }
2434 
2435 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
2436 
2437 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
2438 {
2439 	struct cpt_ctx *ctx = filp->private;
2440 	u16 max_ses, max_ies, max_aes;
2441 	struct rvu *rvu = ctx->rvu;
2442 	int blkaddr = ctx->blkaddr;
2443 	u32 e_max, e;
2444 	u64 reg;
2445 
2446 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2447 	max_ses = reg & 0xffff;
2448 	max_ies = (reg >> 16) & 0xffff;
2449 	max_aes = (reg >> 32) & 0xffff;
2450 
2451 	e_max = max_ses + max_ies + max_aes;
2452 
2453 	seq_puts(filp, "===========================================\n");
2454 	for (e = 0; e < e_max; e++) {
2455 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
2456 		seq_printf(filp, "CPT Engine[%u] Group Enable   0x%02llx\n", e,
2457 			   reg & 0xff);
2458 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
2459 		seq_printf(filp, "CPT Engine[%u] Active Info    0x%llx\n", e,
2460 			   reg);
2461 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
2462 		seq_printf(filp, "CPT Engine[%u] Control        0x%llx\n", e,
2463 			   reg);
2464 		seq_puts(filp, "===========================================\n");
2465 	}
2466 	return 0;
2467 }
2468 
2469 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
2470 
2471 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
2472 {
2473 	struct cpt_ctx *ctx = filp->private;
2474 	int blkaddr = ctx->blkaddr;
2475 	struct rvu *rvu = ctx->rvu;
2476 	struct rvu_block *block;
2477 	struct rvu_hwinfo *hw;
2478 	u64 reg;
2479 	u32 lf;
2480 
2481 	hw = rvu->hw;
2482 	block = &hw->block[blkaddr];
2483 	if (!block->lf.bmap)
2484 		return -ENODEV;
2485 
2486 	seq_puts(filp, "===========================================\n");
2487 	for (lf = 0; lf < block->lf.max; lf++) {
2488 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
2489 		seq_printf(filp, "CPT Lf[%u] CTL          0x%llx\n", lf, reg);
2490 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
2491 		seq_printf(filp, "CPT Lf[%u] CTL2         0x%llx\n", lf, reg);
2492 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
2493 		seq_printf(filp, "CPT Lf[%u] PTR_CTL      0x%llx\n", lf, reg);
2494 		reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
2495 				(lf << block->lfshift));
2496 		seq_printf(filp, "CPT Lf[%u] CFG          0x%llx\n", lf, reg);
2497 		seq_puts(filp, "===========================================\n");
2498 	}
2499 	return 0;
2500 }
2501 
2502 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
2503 
2504 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
2505 {
2506 	struct cpt_ctx *ctx = filp->private;
2507 	struct rvu *rvu = ctx->rvu;
2508 	int blkaddr = ctx->blkaddr;
2509 	u64 reg0, reg1;
2510 
2511 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
2512 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
2513 	seq_printf(filp, "CPT_AF_FLTX_INT:       0x%llx 0x%llx\n", reg0, reg1);
2514 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
2515 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
2516 	seq_printf(filp, "CPT_AF_PSNX_EXE:       0x%llx 0x%llx\n", reg0, reg1);
2517 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
2518 	seq_printf(filp, "CPT_AF_PSNX_LF:        0x%llx\n", reg0);
2519 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
2520 	seq_printf(filp, "CPT_AF_RVU_INT:        0x%llx\n", reg0);
2521 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
2522 	seq_printf(filp, "CPT_AF_RAS_INT:        0x%llx\n", reg0);
2523 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
2524 	seq_printf(filp, "CPT_AF_EXE_ERR_INFO:   0x%llx\n", reg0);
2525 
2526 	return 0;
2527 }
2528 
2529 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
2530 
2531 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
2532 {
2533 	struct cpt_ctx *ctx = filp->private;
2534 	struct rvu *rvu = ctx->rvu;
2535 	int blkaddr = ctx->blkaddr;
2536 	u64 reg;
2537 
2538 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
2539 	seq_printf(filp, "CPT instruction requests   %llu\n", reg);
2540 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
2541 	seq_printf(filp, "CPT instruction latency    %llu\n", reg);
2542 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
2543 	seq_printf(filp, "CPT NCB read requests      %llu\n", reg);
2544 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
2545 	seq_printf(filp, "CPT NCB read latency       %llu\n", reg);
2546 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
2547 	seq_printf(filp, "CPT read requests caused by UC fills   %llu\n", reg);
2548 	reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
2549 	seq_printf(filp, "CPT active cycles pc       %llu\n", reg);
2550 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
2551 	seq_printf(filp, "CPT clock count pc         %llu\n", reg);
2552 
2553 	return 0;
2554 }
2555 
2556 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
2557 
2558 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
2559 {
2560 	struct cpt_ctx *ctx;
2561 
2562 	if (!is_block_implemented(rvu->hw, blkaddr))
2563 		return;
2564 
2565 	if (blkaddr == BLKADDR_CPT0) {
2566 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
2567 		ctx = &rvu->rvu_dbg.cpt_ctx[0];
2568 		ctx->blkaddr = BLKADDR_CPT0;
2569 		ctx->rvu = rvu;
2570 	} else {
2571 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
2572 						      rvu->rvu_dbg.root);
2573 		ctx = &rvu->rvu_dbg.cpt_ctx[1];
2574 		ctx->blkaddr = BLKADDR_CPT1;
2575 		ctx->rvu = rvu;
2576 	}
2577 
2578 	debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
2579 			    &rvu_dbg_cpt_pc_fops);
2580 	debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2581 			    &rvu_dbg_cpt_ae_sts_fops);
2582 	debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2583 			    &rvu_dbg_cpt_se_sts_fops);
2584 	debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2585 			    &rvu_dbg_cpt_ie_sts_fops);
2586 	debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
2587 			    &rvu_dbg_cpt_engines_info_fops);
2588 	debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
2589 			    &rvu_dbg_cpt_lfs_info_fops);
2590 	debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
2591 			    &rvu_dbg_cpt_err_info_fops);
2592 }
2593 
2594 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
2595 {
2596 	if (!is_rvu_otx2(rvu))
2597 		return "cn10k";
2598 	else
2599 		return "octeontx2";
2600 }
2601 
2602 void rvu_dbg_init(struct rvu *rvu)
2603 {
2604 	rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
2605 
2606 	debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
2607 			    &rvu_dbg_rsrc_status_fops);
2608 
2609 	if (!cgx_get_cgxcnt_max())
2610 		goto create;
2611 
2612 	if (is_rvu_otx2(rvu))
2613 		debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
2614 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2615 	else
2616 		debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
2617 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2618 
2619 create:
2620 	rvu_dbg_npa_init(rvu);
2621 	rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
2622 
2623 	rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
2624 	rvu_dbg_cgx_init(rvu);
2625 	rvu_dbg_npc_init(rvu);
2626 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
2627 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
2628 }
2629 
2630 void rvu_dbg_exit(struct rvu *rvu)
2631 {
2632 	debugfs_remove_recursive(rvu->rvu_dbg.root);
2633 }
2634 
2635 #endif /* CONFIG_DEBUG_FS */
2636