1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2019 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #ifdef CONFIG_DEBUG_FS
12 
13 #include <linux/fs.h>
14 #include <linux/debugfs.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 
18 #include "rvu_struct.h"
19 #include "rvu_reg.h"
20 #include "rvu.h"
21 #include "cgx.h"
22 #include "npc.h"
23 
24 #define DEBUGFS_DIR_NAME "octeontx2"
25 
26 enum {
27 	CGX_STAT0,
28 	CGX_STAT1,
29 	CGX_STAT2,
30 	CGX_STAT3,
31 	CGX_STAT4,
32 	CGX_STAT5,
33 	CGX_STAT6,
34 	CGX_STAT7,
35 	CGX_STAT8,
36 	CGX_STAT9,
37 	CGX_STAT10,
38 	CGX_STAT11,
39 	CGX_STAT12,
40 	CGX_STAT13,
41 	CGX_STAT14,
42 	CGX_STAT15,
43 	CGX_STAT16,
44 	CGX_STAT17,
45 	CGX_STAT18,
46 };
47 
48 /* NIX TX stats */
49 enum nix_stat_lf_tx {
50 	TX_UCAST	= 0x0,
51 	TX_BCAST	= 0x1,
52 	TX_MCAST	= 0x2,
53 	TX_DROP		= 0x3,
54 	TX_OCTS		= 0x4,
55 	TX_STATS_ENUM_LAST,
56 };
57 
58 /* NIX RX stats */
59 enum nix_stat_lf_rx {
60 	RX_OCTS		= 0x0,
61 	RX_UCAST	= 0x1,
62 	RX_BCAST	= 0x2,
63 	RX_MCAST	= 0x3,
64 	RX_DROP		= 0x4,
65 	RX_DROP_OCTS	= 0x5,
66 	RX_FCS		= 0x6,
67 	RX_ERR		= 0x7,
68 	RX_DRP_BCAST	= 0x8,
69 	RX_DRP_MCAST	= 0x9,
70 	RX_DRP_L3BCAST	= 0xa,
71 	RX_DRP_L3MCAST	= 0xb,
72 	RX_STATS_ENUM_LAST,
73 };
74 
75 static char *cgx_rx_stats_fields[] = {
76 	[CGX_STAT0]	= "Received packets",
77 	[CGX_STAT1]	= "Octets of received packets",
78 	[CGX_STAT2]	= "Received PAUSE packets",
79 	[CGX_STAT3]	= "Received PAUSE and control packets",
80 	[CGX_STAT4]	= "Filtered DMAC0 (NIX-bound) packets",
81 	[CGX_STAT5]	= "Filtered DMAC0 (NIX-bound) octets",
82 	[CGX_STAT6]	= "Packets dropped due to RX FIFO full",
83 	[CGX_STAT7]	= "Octets dropped due to RX FIFO full",
84 	[CGX_STAT8]	= "Error packets",
85 	[CGX_STAT9]	= "Filtered DMAC1 (NCSI-bound) packets",
86 	[CGX_STAT10]	= "Filtered DMAC1 (NCSI-bound) octets",
87 	[CGX_STAT11]	= "NCSI-bound packets dropped",
88 	[CGX_STAT12]	= "NCSI-bound octets dropped",
89 };
90 
91 static char *cgx_tx_stats_fields[] = {
92 	[CGX_STAT0]	= "Packets dropped due to excessive collisions",
93 	[CGX_STAT1]	= "Packets dropped due to excessive deferral",
94 	[CGX_STAT2]	= "Multiple collisions before successful transmission",
95 	[CGX_STAT3]	= "Single collisions before successful transmission",
96 	[CGX_STAT4]	= "Total octets sent on the interface",
97 	[CGX_STAT5]	= "Total frames sent on the interface",
98 	[CGX_STAT6]	= "Packets sent with an octet count < 64",
99 	[CGX_STAT7]	= "Packets sent with an octet count == 64",
100 	[CGX_STAT8]	= "Packets sent with an octet count of 65–127",
101 	[CGX_STAT9]	= "Packets sent with an octet count of 128-255",
102 	[CGX_STAT10]	= "Packets sent with an octet count of 256-511",
103 	[CGX_STAT11]	= "Packets sent with an octet count of 512-1023",
104 	[CGX_STAT12]	= "Packets sent with an octet count of 1024-1518",
105 	[CGX_STAT13]	= "Packets sent with an octet count of > 1518",
106 	[CGX_STAT14]	= "Packets sent to a broadcast DMAC",
107 	[CGX_STAT15]	= "Packets sent to the multicast DMAC",
108 	[CGX_STAT16]	= "Transmit underflow and were truncated",
109 	[CGX_STAT17]	= "Control/PAUSE packets sent",
110 };
111 
112 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
113 						blk_addr, NDC_AF_CONST) & 0xFF)
114 
115 #define rvu_dbg_NULL NULL
116 #define rvu_dbg_open_NULL NULL
117 
118 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)	\
119 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
120 { \
121 	return single_open(file, rvu_dbg_##read_op, inode->i_private); \
122 } \
123 static const struct file_operations rvu_dbg_##name##_fops = { \
124 	.owner		= THIS_MODULE, \
125 	.open		= rvu_dbg_open_##name, \
126 	.read		= seq_read, \
127 	.write		= rvu_dbg_##write_op, \
128 	.llseek		= seq_lseek, \
129 	.release	= single_release, \
130 }
131 
132 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
133 static const struct file_operations rvu_dbg_##name##_fops = { \
134 	.owner = THIS_MODULE, \
135 	.open = simple_open, \
136 	.read = rvu_dbg_##read_op, \
137 	.write = rvu_dbg_##write_op \
138 }
139 
140 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
141 
142 /* Dumps current provisioning status of all RVU block LFs */
143 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
144 					  char __user *buffer,
145 					  size_t count, loff_t *ppos)
146 {
147 	int index, off = 0, flag = 0, go_back = 0, off_prev;
148 	struct rvu *rvu = filp->private_data;
149 	int lf, pf, vf, pcifunc;
150 	struct rvu_block block;
151 	int bytes_not_copied;
152 	int buf_size = 2048;
153 	char *buf;
154 
155 	/* don't allow partial reads */
156 	if (*ppos != 0)
157 		return 0;
158 
159 	buf = kzalloc(buf_size, GFP_KERNEL);
160 	if (!buf)
161 		return -ENOSPC;
162 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t");
163 	for (index = 0; index < BLK_COUNT; index++)
164 		if (strlen(rvu->hw->block[index].name))
165 			off +=	scnprintf(&buf[off], buf_size - 1 - off,
166 					  "%*s\t", (index - 1) * 2,
167 					  rvu->hw->block[index].name);
168 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
169 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
170 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
171 			pcifunc = pf << 10 | vf;
172 			if (!pcifunc)
173 				continue;
174 
175 			if (vf) {
176 				go_back = scnprintf(&buf[off],
177 						    buf_size - 1 - off,
178 						    "PF%d:VF%d\t\t", pf,
179 						    vf - 1);
180 			} else {
181 				go_back = scnprintf(&buf[off],
182 						    buf_size - 1 - off,
183 						    "PF%d\t\t", pf);
184 			}
185 
186 			off += go_back;
187 			for (index = 0; index < BLKTYPE_MAX; index++) {
188 				block = rvu->hw->block[index];
189 				if (!strlen(block.name))
190 					continue;
191 				off_prev = off;
192 				for (lf = 0; lf < block.lf.max; lf++) {
193 					if (block.fn_map[lf] != pcifunc)
194 						continue;
195 					flag = 1;
196 					off += scnprintf(&buf[off], buf_size - 1
197 							- off, "%3d,", lf);
198 				}
199 				if (flag && off_prev != off)
200 					off--;
201 				else
202 					go_back++;
203 				off += scnprintf(&buf[off], buf_size - 1 - off,
204 						"\t");
205 			}
206 			if (!flag)
207 				off -= go_back;
208 			else
209 				flag = 0;
210 			off--;
211 			off +=	scnprintf(&buf[off], buf_size - 1 - off, "\n");
212 		}
213 	}
214 
215 	bytes_not_copied = copy_to_user(buffer, buf, off);
216 	kfree(buf);
217 
218 	if (bytes_not_copied)
219 		return -EFAULT;
220 
221 	*ppos = off;
222 	return off;
223 }
224 
225 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
226 
227 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blktype, int lf,
228 				u16 *pcifunc)
229 {
230 	struct rvu_block *block;
231 	struct rvu_hwinfo *hw;
232 	int blkaddr;
233 
234 	blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
235 	if (blkaddr < 0) {
236 		dev_warn(rvu->dev, "Invalid blktype\n");
237 		return false;
238 	}
239 
240 	hw = rvu->hw;
241 	block = &hw->block[blkaddr];
242 
243 	if (lf < 0 || lf >= block->lf.max) {
244 		dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
245 			 block->lf.max - 1);
246 		return false;
247 	}
248 
249 	*pcifunc = block->fn_map[lf];
250 	if (!*pcifunc) {
251 		dev_warn(rvu->dev,
252 			 "This LF is not attached to any RVU PFFUNC\n");
253 		return false;
254 	}
255 	return true;
256 }
257 
258 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
259 {
260 	char *buf;
261 
262 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
263 	if (!buf)
264 		return;
265 
266 	if (!pfvf->aura_ctx) {
267 		seq_puts(m, "Aura context is not initialized\n");
268 	} else {
269 		bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
270 					pfvf->aura_ctx->qsize);
271 		seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
272 		seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
273 	}
274 
275 	if (!pfvf->pool_ctx) {
276 		seq_puts(m, "Pool context is not initialized\n");
277 	} else {
278 		bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
279 					pfvf->pool_ctx->qsize);
280 		seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
281 		seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
282 	}
283 	kfree(buf);
284 }
285 
286 /* The 'qsize' entry dumps current Aura/Pool context Qsize
287  * and each context's current enable/disable status in a bitmap.
288  */
289 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
290 				 int blktype)
291 {
292 	void (*print_qsize)(struct seq_file *filp,
293 			    struct rvu_pfvf *pfvf) = NULL;
294 	struct rvu_pfvf *pfvf;
295 	struct rvu *rvu;
296 	int qsize_id;
297 	u16 pcifunc;
298 
299 	rvu = filp->private;
300 	switch (blktype) {
301 	case BLKTYPE_NPA:
302 		qsize_id = rvu->rvu_dbg.npa_qsize_id;
303 		print_qsize = print_npa_qsize;
304 		break;
305 
306 	case BLKTYPE_NIX:
307 		qsize_id = rvu->rvu_dbg.nix_qsize_id;
308 		print_qsize = print_nix_qsize;
309 		break;
310 
311 	default:
312 		return -EINVAL;
313 	}
314 
315 	if (!rvu_dbg_is_valid_lf(rvu, blktype, qsize_id, &pcifunc))
316 		return -EINVAL;
317 
318 	pfvf = rvu_get_pfvf(rvu, pcifunc);
319 	print_qsize(filp, pfvf);
320 
321 	return 0;
322 }
323 
324 static ssize_t rvu_dbg_qsize_write(struct file *filp,
325 				   const char __user *buffer, size_t count,
326 				   loff_t *ppos, int blktype)
327 {
328 	char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
329 	struct seq_file *seqfile = filp->private_data;
330 	char *cmd_buf, *cmd_buf_tmp, *subtoken;
331 	struct rvu *rvu = seqfile->private;
332 	u16 pcifunc;
333 	int ret, lf;
334 
335 	cmd_buf = memdup_user(buffer, count);
336 	if (IS_ERR(cmd_buf))
337 		return -ENOMEM;
338 
339 	cmd_buf[count] = '\0';
340 
341 	cmd_buf_tmp = strchr(cmd_buf, '\n');
342 	if (cmd_buf_tmp) {
343 		*cmd_buf_tmp = '\0';
344 		count = cmd_buf_tmp - cmd_buf + 1;
345 	}
346 
347 	cmd_buf_tmp = cmd_buf;
348 	subtoken = strsep(&cmd_buf, " ");
349 	ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
350 	if (cmd_buf)
351 		ret = -EINVAL;
352 
353 	if (!strncmp(subtoken, "help", 4) || ret < 0) {
354 		dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
355 		goto qsize_write_done;
356 	}
357 
358 	if (!rvu_dbg_is_valid_lf(rvu, blktype, lf, &pcifunc)) {
359 		ret = -EINVAL;
360 		goto qsize_write_done;
361 	}
362 	if (blktype  == BLKTYPE_NPA)
363 		rvu->rvu_dbg.npa_qsize_id = lf;
364 	else
365 		rvu->rvu_dbg.nix_qsize_id = lf;
366 
367 qsize_write_done:
368 	kfree(cmd_buf_tmp);
369 	return ret ? ret : count;
370 }
371 
372 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
373 				       const char __user *buffer,
374 				       size_t count, loff_t *ppos)
375 {
376 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
377 					    BLKTYPE_NPA);
378 }
379 
380 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
381 {
382 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
383 }
384 
385 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
386 
387 /* Dumps given NPA Aura's context */
388 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
389 {
390 	struct npa_aura_s *aura = &rsp->aura;
391 
392 	seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
393 
394 	seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
395 		   aura->ena, aura->pool_caching);
396 	seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
397 		   aura->pool_way_mask, aura->avg_con);
398 	seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
399 		   aura->pool_drop_ena, aura->aura_drop_ena);
400 	seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
401 		   aura->bp_ena, aura->aura_drop);
402 	seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
403 		   aura->shift, aura->avg_level);
404 
405 	seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
406 		   (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
407 
408 	seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
409 		   (u64)aura->limit, aura->bp, aura->fc_ena);
410 	seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
411 		   aura->fc_up_crossing, aura->fc_stype);
412 	seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
413 
414 	seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
415 
416 	seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
417 		   aura->pool_drop, aura->update_time);
418 	seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
419 		   aura->err_int, aura->err_int_ena);
420 	seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
421 		   aura->thresh_int, aura->thresh_int_ena);
422 	seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
423 		   aura->thresh_up, aura->thresh_qint_idx);
424 	seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
425 
426 	seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
427 }
428 
429 /* Dumps given NPA Pool's context */
430 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
431 {
432 	struct npa_pool_s *pool = &rsp->pool;
433 
434 	seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
435 
436 	seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
437 		   pool->ena, pool->nat_align);
438 	seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
439 		   pool->stack_caching, pool->stack_way_mask);
440 	seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
441 		   pool->buf_offset, pool->buf_size);
442 
443 	seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
444 		   pool->stack_max_pages, pool->stack_pages);
445 
446 	seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
447 
448 	seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
449 		   pool->stack_offset, pool->shift, pool->avg_level);
450 	seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
451 		   pool->avg_con, pool->fc_ena, pool->fc_stype);
452 	seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
453 		   pool->fc_hyst_bits, pool->fc_up_crossing);
454 	seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
455 
456 	seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
457 
458 	seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
459 
460 	seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
461 
462 	seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
463 		   pool->err_int, pool->err_int_ena);
464 	seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
465 	seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
466 		   pool->thresh_int_ena, pool->thresh_up);
467 	seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t\t%d\n",
468 		   pool->thresh_qint_idx, pool->err_qint_idx);
469 }
470 
471 /* Reads aura/pool's ctx from admin queue */
472 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
473 {
474 	void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
475 	struct npa_aq_enq_req aq_req;
476 	struct npa_aq_enq_rsp rsp;
477 	struct rvu_pfvf *pfvf;
478 	int aura, rc, max_id;
479 	int npalf, id, all;
480 	struct rvu *rvu;
481 	u16 pcifunc;
482 
483 	rvu = m->private;
484 
485 	switch (ctype) {
486 	case NPA_AQ_CTYPE_AURA:
487 		npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
488 		id = rvu->rvu_dbg.npa_aura_ctx.id;
489 		all = rvu->rvu_dbg.npa_aura_ctx.all;
490 		break;
491 
492 	case NPA_AQ_CTYPE_POOL:
493 		npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
494 		id = rvu->rvu_dbg.npa_pool_ctx.id;
495 		all = rvu->rvu_dbg.npa_pool_ctx.all;
496 		break;
497 	default:
498 		return -EINVAL;
499 	}
500 
501 	if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
502 		return -EINVAL;
503 
504 	pfvf = rvu_get_pfvf(rvu, pcifunc);
505 	if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
506 		seq_puts(m, "Aura context is not initialized\n");
507 		return -EINVAL;
508 	} else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
509 		seq_puts(m, "Pool context is not initialized\n");
510 		return -EINVAL;
511 	}
512 
513 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
514 	aq_req.hdr.pcifunc = pcifunc;
515 	aq_req.ctype = ctype;
516 	aq_req.op = NPA_AQ_INSTOP_READ;
517 	if (ctype == NPA_AQ_CTYPE_AURA) {
518 		max_id = pfvf->aura_ctx->qsize;
519 		print_npa_ctx = print_npa_aura_ctx;
520 	} else {
521 		max_id = pfvf->pool_ctx->qsize;
522 		print_npa_ctx = print_npa_pool_ctx;
523 	}
524 
525 	if (id < 0 || id >= max_id) {
526 		seq_printf(m, "Invalid %s, valid range is 0-%d\n",
527 			   (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
528 			max_id - 1);
529 		return -EINVAL;
530 	}
531 
532 	if (all)
533 		id = 0;
534 	else
535 		max_id = id + 1;
536 
537 	for (aura = id; aura < max_id; aura++) {
538 		aq_req.aura_id = aura;
539 		seq_printf(m, "======%s : %d=======\n",
540 			   (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
541 			aq_req.aura_id);
542 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
543 		if (rc) {
544 			seq_puts(m, "Failed to read context\n");
545 			return -EINVAL;
546 		}
547 		print_npa_ctx(m, &rsp);
548 	}
549 	return 0;
550 }
551 
552 static int write_npa_ctx(struct rvu *rvu, bool all,
553 			 int npalf, int id, int ctype)
554 {
555 	struct rvu_pfvf *pfvf;
556 	int max_id = 0;
557 	u16 pcifunc;
558 
559 	if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
560 		return -EINVAL;
561 
562 	pfvf = rvu_get_pfvf(rvu, pcifunc);
563 
564 	if (ctype == NPA_AQ_CTYPE_AURA) {
565 		if (!pfvf->aura_ctx) {
566 			dev_warn(rvu->dev, "Aura context is not initialized\n");
567 			return -EINVAL;
568 		}
569 		max_id = pfvf->aura_ctx->qsize;
570 	} else if (ctype == NPA_AQ_CTYPE_POOL) {
571 		if (!pfvf->pool_ctx) {
572 			dev_warn(rvu->dev, "Pool context is not initialized\n");
573 			return -EINVAL;
574 		}
575 		max_id = pfvf->pool_ctx->qsize;
576 	}
577 
578 	if (id < 0 || id >= max_id) {
579 		dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
580 			 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
581 			max_id - 1);
582 		return -EINVAL;
583 	}
584 
585 	switch (ctype) {
586 	case NPA_AQ_CTYPE_AURA:
587 		rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
588 		rvu->rvu_dbg.npa_aura_ctx.id = id;
589 		rvu->rvu_dbg.npa_aura_ctx.all = all;
590 		break;
591 
592 	case NPA_AQ_CTYPE_POOL:
593 		rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
594 		rvu->rvu_dbg.npa_pool_ctx.id = id;
595 		rvu->rvu_dbg.npa_pool_ctx.all = all;
596 		break;
597 	default:
598 		return -EINVAL;
599 	}
600 	return 0;
601 }
602 
603 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
604 				const char __user *buffer, int *npalf,
605 				int *id, bool *all)
606 {
607 	int bytes_not_copied;
608 	char *cmd_buf_tmp;
609 	char *subtoken;
610 	int ret;
611 
612 	bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
613 	if (bytes_not_copied)
614 		return -EFAULT;
615 
616 	cmd_buf[*count] = '\0';
617 	cmd_buf_tmp = strchr(cmd_buf, '\n');
618 
619 	if (cmd_buf_tmp) {
620 		*cmd_buf_tmp = '\0';
621 		*count = cmd_buf_tmp - cmd_buf + 1;
622 	}
623 
624 	subtoken = strsep(&cmd_buf, " ");
625 	ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
626 	if (ret < 0)
627 		return ret;
628 	subtoken = strsep(&cmd_buf, " ");
629 	if (subtoken && strcmp(subtoken, "all") == 0) {
630 		*all = true;
631 	} else {
632 		ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
633 		if (ret < 0)
634 			return ret;
635 	}
636 	if (cmd_buf)
637 		return -EINVAL;
638 	return ret;
639 }
640 
641 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
642 				     const char __user *buffer,
643 				     size_t count, loff_t *ppos, int ctype)
644 {
645 	char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
646 					"aura" : "pool";
647 	struct seq_file *seqfp = filp->private_data;
648 	struct rvu *rvu = seqfp->private;
649 	int npalf, id = 0, ret;
650 	bool all = false;
651 
652 	if ((*ppos != 0) || !count)
653 		return -EINVAL;
654 
655 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
656 	if (!cmd_buf)
657 		return count;
658 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
659 				   &npalf, &id, &all);
660 	if (ret < 0) {
661 		dev_info(rvu->dev,
662 			 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
663 			 ctype_string, ctype_string);
664 		goto done;
665 	} else {
666 		ret = write_npa_ctx(rvu, all, npalf, id, ctype);
667 	}
668 done:
669 	kfree(cmd_buf);
670 	return ret ? ret : count;
671 }
672 
673 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
674 					  const char __user *buffer,
675 					  size_t count, loff_t *ppos)
676 {
677 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
678 				     NPA_AQ_CTYPE_AURA);
679 }
680 
681 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
682 {
683 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
684 }
685 
686 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
687 
688 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
689 					  const char __user *buffer,
690 					  size_t count, loff_t *ppos)
691 {
692 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
693 				     NPA_AQ_CTYPE_POOL);
694 }
695 
696 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
697 {
698 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
699 }
700 
701 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
702 
703 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
704 			    int ctype, int transaction)
705 {
706 	u64 req, out_req, lat, cant_alloc;
707 	struct rvu *rvu = s->private;
708 	int port;
709 
710 	for (port = 0; port < NDC_MAX_PORT; port++) {
711 		req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
712 						(port, ctype, transaction));
713 		lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
714 						(port, ctype, transaction));
715 		out_req = rvu_read64(rvu, blk_addr,
716 				     NDC_AF_PORTX_RTX_RWX_OSTDN_PC
717 				     (port, ctype, transaction));
718 		cant_alloc = rvu_read64(rvu, blk_addr,
719 					NDC_AF_PORTX_RTX_CANT_ALLOC_PC
720 					(port, transaction));
721 		seq_printf(s, "\nPort:%d\n", port);
722 		seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
723 		seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
724 		seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
725 		seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
726 		seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
727 	}
728 }
729 
730 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
731 {
732 	seq_puts(s, "\n***** CACHE mode read stats *****\n");
733 	ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
734 	seq_puts(s, "\n***** CACHE mode write stats *****\n");
735 	ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
736 	seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
737 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
738 	seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
739 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
740 	return 0;
741 }
742 
743 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
744 {
745 	return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
746 }
747 
748 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
749 
750 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
751 {
752 	struct rvu *rvu = s->private;
753 	int bank, max_bank;
754 
755 	max_bank = NDC_MAX_BANK(rvu, blk_addr);
756 	for (bank = 0; bank < max_bank; bank++) {
757 		seq_printf(s, "BANK:%d\n", bank);
758 		seq_printf(s, "\tHits:\t%lld\n",
759 			   (u64)rvu_read64(rvu, blk_addr,
760 			   NDC_AF_BANKX_HIT_PC(bank)));
761 		seq_printf(s, "\tMiss:\t%lld\n",
762 			   (u64)rvu_read64(rvu, blk_addr,
763 			    NDC_AF_BANKX_MISS_PC(bank)));
764 	}
765 	return 0;
766 }
767 
768 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
769 {
770 	return ndc_blk_cache_stats(filp, NIX0_RX,
771 				   BLKADDR_NDC_NIX0_RX);
772 }
773 
774 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
775 
776 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
777 {
778 	return ndc_blk_cache_stats(filp, NIX0_TX,
779 				   BLKADDR_NDC_NIX0_TX);
780 }
781 
782 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
783 
784 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
785 					     void *unused)
786 {
787 	return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
788 }
789 
790 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
791 
792 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
793 						void *unused)
794 {
795 	return ndc_blk_hits_miss_stats(filp,
796 				      NPA0_U, BLKADDR_NDC_NIX0_RX);
797 }
798 
799 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
800 
801 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
802 						void *unused)
803 {
804 	return ndc_blk_hits_miss_stats(filp,
805 				      NPA0_U, BLKADDR_NDC_NIX0_TX);
806 }
807 
808 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
809 
810 /* Dumps given nix_sq's context */
811 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
812 {
813 	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
814 
815 	seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
816 		   sq_ctx->sqe_way_mask, sq_ctx->cq);
817 	seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
818 		   sq_ctx->sdp_mcast, sq_ctx->substream);
819 	seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
820 		   sq_ctx->qint_idx, sq_ctx->ena);
821 
822 	seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
823 		   sq_ctx->sqb_count, sq_ctx->default_chan);
824 	seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
825 		   sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
826 	seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
827 		   sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
828 
829 	seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
830 		   sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
831 	seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
832 		   sq_ctx->sq_int, sq_ctx->sqb_aura);
833 	seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
834 
835 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
836 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
837 	seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
838 		   sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
839 	seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
840 		   sq_ctx->smenq_offset, sq_ctx->tail_offset);
841 	seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
842 		   sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
843 	seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
844 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
845 	seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
846 		   sq_ctx->cq_limit, sq_ctx->max_sqe_size);
847 
848 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
849 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
850 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
851 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
852 		   sq_ctx->smenq_next_sqb);
853 
854 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
855 
856 	seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
857 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
858 	seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
859 		   sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
860 	seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
861 		   sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
862 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
863 
864 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
865 		   (u64)sq_ctx->scm_lso_rem);
866 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
867 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
868 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
869 		   (u64)sq_ctx->dropped_octs);
870 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
871 		   (u64)sq_ctx->dropped_pkts);
872 }
873 
874 /* Dumps given nix_rq's context */
875 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
876 {
877 	struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
878 
879 	seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
880 		   rq_ctx->wqe_aura, rq_ctx->substream);
881 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
882 		   rq_ctx->cq, rq_ctx->ena_wqwd);
883 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
884 		   rq_ctx->ipsech_ena, rq_ctx->sso_ena);
885 	seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
886 
887 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
888 		   rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
889 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
890 		   rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
891 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
892 		   rq_ctx->pb_caching, rq_ctx->sso_tt);
893 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
894 		   rq_ctx->sso_grp, rq_ctx->lpb_aura);
895 	seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
896 
897 	seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
898 		   rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
899 	seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
900 		   rq_ctx->xqe_imm_size, rq_ctx->later_skip);
901 	seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
902 		   rq_ctx->first_skip, rq_ctx->lpb_sizem1);
903 	seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
904 		   rq_ctx->spb_ena, rq_ctx->wqe_skip);
905 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
906 
907 	seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
908 		   rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
909 	seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
910 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
911 	seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
912 		   rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
913 	seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
914 		   rq_ctx->xqe_pass, rq_ctx->xqe_drop);
915 
916 	seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
917 		   rq_ctx->qint_idx, rq_ctx->rq_int_ena);
918 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
919 		   rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
920 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
921 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
922 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
923 
924 	seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
925 		   rq_ctx->flow_tagw, rq_ctx->bad_utag);
926 	seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
927 		   rq_ctx->good_utag, rq_ctx->ltag);
928 
929 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
930 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
931 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
932 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
933 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
934 }
935 
936 /* Dumps given nix_cq's context */
937 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
938 {
939 	struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
940 
941 	seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
942 
943 	seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
944 	seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
945 		   cq_ctx->avg_con, cq_ctx->cint_idx);
946 	seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
947 		   cq_ctx->cq_err, cq_ctx->qint_idx);
948 	seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
949 		   cq_ctx->bpid, cq_ctx->bp_ena);
950 
951 	seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
952 		   cq_ctx->update_time, cq_ctx->avg_level);
953 	seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
954 		   cq_ctx->head, cq_ctx->tail);
955 
956 	seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
957 		   cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
958 	seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
959 		   cq_ctx->qsize, cq_ctx->caching);
960 	seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
961 		   cq_ctx->substream, cq_ctx->ena);
962 	seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
963 		   cq_ctx->drop_ena, cq_ctx->drop);
964 	seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
965 }
966 
967 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
968 					 void *unused, int ctype)
969 {
970 	void (*print_nix_ctx)(struct seq_file *filp,
971 			      struct nix_aq_enq_rsp *rsp) = NULL;
972 	struct rvu *rvu = filp->private;
973 	struct nix_aq_enq_req aq_req;
974 	struct nix_aq_enq_rsp rsp;
975 	char *ctype_string = NULL;
976 	int qidx, rc, max_id = 0;
977 	struct rvu_pfvf *pfvf;
978 	int nixlf, id, all;
979 	u16 pcifunc;
980 
981 	switch (ctype) {
982 	case NIX_AQ_CTYPE_CQ:
983 		nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
984 		id = rvu->rvu_dbg.nix_cq_ctx.id;
985 		all = rvu->rvu_dbg.nix_cq_ctx.all;
986 		break;
987 
988 	case NIX_AQ_CTYPE_SQ:
989 		nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
990 		id = rvu->rvu_dbg.nix_sq_ctx.id;
991 		all = rvu->rvu_dbg.nix_sq_ctx.all;
992 		break;
993 
994 	case NIX_AQ_CTYPE_RQ:
995 		nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
996 		id = rvu->rvu_dbg.nix_rq_ctx.id;
997 		all = rvu->rvu_dbg.nix_rq_ctx.all;
998 		break;
999 
1000 	default:
1001 		return -EINVAL;
1002 	}
1003 
1004 	if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
1005 		return -EINVAL;
1006 
1007 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1008 	if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1009 		seq_puts(filp, "SQ context is not initialized\n");
1010 		return -EINVAL;
1011 	} else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1012 		seq_puts(filp, "RQ context is not initialized\n");
1013 		return -EINVAL;
1014 	} else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1015 		seq_puts(filp, "CQ context is not initialized\n");
1016 		return -EINVAL;
1017 	}
1018 
1019 	if (ctype == NIX_AQ_CTYPE_SQ) {
1020 		max_id = pfvf->sq_ctx->qsize;
1021 		ctype_string = "sq";
1022 		print_nix_ctx = print_nix_sq_ctx;
1023 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1024 		max_id = pfvf->rq_ctx->qsize;
1025 		ctype_string = "rq";
1026 		print_nix_ctx = print_nix_rq_ctx;
1027 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1028 		max_id = pfvf->cq_ctx->qsize;
1029 		ctype_string = "cq";
1030 		print_nix_ctx = print_nix_cq_ctx;
1031 	}
1032 
1033 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1034 	aq_req.hdr.pcifunc = pcifunc;
1035 	aq_req.ctype = ctype;
1036 	aq_req.op = NIX_AQ_INSTOP_READ;
1037 	if (all)
1038 		id = 0;
1039 	else
1040 		max_id = id + 1;
1041 	for (qidx = id; qidx < max_id; qidx++) {
1042 		aq_req.qidx = qidx;
1043 		seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1044 			   ctype_string, nixlf, aq_req.qidx);
1045 		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1046 		if (rc) {
1047 			seq_puts(filp, "Failed to read the context\n");
1048 			return -EINVAL;
1049 		}
1050 		print_nix_ctx(filp, &rsp);
1051 	}
1052 	return 0;
1053 }
1054 
1055 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1056 			       int id, int ctype, char *ctype_string)
1057 {
1058 	struct rvu_pfvf *pfvf;
1059 	int max_id = 0;
1060 	u16 pcifunc;
1061 
1062 	if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
1063 		return -EINVAL;
1064 
1065 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1066 
1067 	if (ctype == NIX_AQ_CTYPE_SQ) {
1068 		if (!pfvf->sq_ctx) {
1069 			dev_warn(rvu->dev, "SQ context is not initialized\n");
1070 			return -EINVAL;
1071 		}
1072 		max_id = pfvf->sq_ctx->qsize;
1073 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1074 		if (!pfvf->rq_ctx) {
1075 			dev_warn(rvu->dev, "RQ context is not initialized\n");
1076 			return -EINVAL;
1077 		}
1078 		max_id = pfvf->rq_ctx->qsize;
1079 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1080 		if (!pfvf->cq_ctx) {
1081 			dev_warn(rvu->dev, "CQ context is not initialized\n");
1082 			return -EINVAL;
1083 		}
1084 		max_id = pfvf->cq_ctx->qsize;
1085 	}
1086 
1087 	if (id < 0 || id >= max_id) {
1088 		dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1089 			 ctype_string, max_id - 1);
1090 		return -EINVAL;
1091 	}
1092 	switch (ctype) {
1093 	case NIX_AQ_CTYPE_CQ:
1094 		rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1095 		rvu->rvu_dbg.nix_cq_ctx.id = id;
1096 		rvu->rvu_dbg.nix_cq_ctx.all = all;
1097 		break;
1098 
1099 	case NIX_AQ_CTYPE_SQ:
1100 		rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1101 		rvu->rvu_dbg.nix_sq_ctx.id = id;
1102 		rvu->rvu_dbg.nix_sq_ctx.all = all;
1103 		break;
1104 
1105 	case NIX_AQ_CTYPE_RQ:
1106 		rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1107 		rvu->rvu_dbg.nix_rq_ctx.id = id;
1108 		rvu->rvu_dbg.nix_rq_ctx.all = all;
1109 		break;
1110 	default:
1111 		return -EINVAL;
1112 	}
1113 	return 0;
1114 }
1115 
1116 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
1117 					   const char __user *buffer,
1118 					   size_t count, loff_t *ppos,
1119 					   int ctype)
1120 {
1121 	struct seq_file *m = filp->private_data;
1122 	struct rvu *rvu = m->private;
1123 	char *cmd_buf, *ctype_string;
1124 	int nixlf, id = 0, ret;
1125 	bool all = false;
1126 
1127 	if ((*ppos != 0) || !count)
1128 		return -EINVAL;
1129 
1130 	switch (ctype) {
1131 	case NIX_AQ_CTYPE_SQ:
1132 		ctype_string = "sq";
1133 		break;
1134 	case NIX_AQ_CTYPE_RQ:
1135 		ctype_string = "rq";
1136 		break;
1137 	case NIX_AQ_CTYPE_CQ:
1138 		ctype_string = "cq";
1139 		break;
1140 	default:
1141 		return -EINVAL;
1142 	}
1143 
1144 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1145 
1146 	if (!cmd_buf)
1147 		return count;
1148 
1149 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1150 				   &nixlf, &id, &all);
1151 	if (ret < 0) {
1152 		dev_info(rvu->dev,
1153 			 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
1154 			 ctype_string, ctype_string);
1155 		goto done;
1156 	} else {
1157 		ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
1158 					  ctype_string);
1159 	}
1160 done:
1161 	kfree(cmd_buf);
1162 	return ret ? ret : count;
1163 }
1164 
1165 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
1166 					const char __user *buffer,
1167 					size_t count, loff_t *ppos)
1168 {
1169 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1170 					    NIX_AQ_CTYPE_SQ);
1171 }
1172 
1173 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
1174 {
1175 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
1176 }
1177 
1178 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
1179 
1180 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
1181 					const char __user *buffer,
1182 					size_t count, loff_t *ppos)
1183 {
1184 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1185 					    NIX_AQ_CTYPE_RQ);
1186 }
1187 
1188 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
1189 {
1190 	return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
1191 }
1192 
1193 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
1194 
1195 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
1196 					const char __user *buffer,
1197 					size_t count, loff_t *ppos)
1198 {
1199 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1200 					    NIX_AQ_CTYPE_CQ);
1201 }
1202 
1203 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1204 {
1205 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1206 }
1207 
1208 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1209 
1210 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1211 				 unsigned long *bmap, char *qtype)
1212 {
1213 	char *buf;
1214 
1215 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1216 	if (!buf)
1217 		return;
1218 
1219 	bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1220 	seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1221 	seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1222 		   qtype, buf);
1223 	kfree(buf);
1224 }
1225 
1226 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1227 {
1228 	if (!pfvf->cq_ctx)
1229 		seq_puts(filp, "cq context is not initialized\n");
1230 	else
1231 		print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1232 				     "cq");
1233 
1234 	if (!pfvf->rq_ctx)
1235 		seq_puts(filp, "rq context is not initialized\n");
1236 	else
1237 		print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1238 				     "rq");
1239 
1240 	if (!pfvf->sq_ctx)
1241 		seq_puts(filp, "sq context is not initialized\n");
1242 	else
1243 		print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1244 				     "sq");
1245 }
1246 
1247 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1248 				       const char __user *buffer,
1249 				       size_t count, loff_t *ppos)
1250 {
1251 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1252 				   BLKTYPE_NIX);
1253 }
1254 
1255 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1256 {
1257 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1258 }
1259 
1260 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1261 
1262 static void rvu_dbg_nix_init(struct rvu *rvu)
1263 {
1264 	const struct device *dev = &rvu->pdev->dev;
1265 	struct dentry *pfile;
1266 
1267 	rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1268 	if (!rvu->rvu_dbg.nix) {
1269 		dev_err(rvu->dev, "create debugfs dir failed for nix\n");
1270 		return;
1271 	}
1272 
1273 	pfile = debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
1274 				    &rvu_dbg_nix_sq_ctx_fops);
1275 	if (!pfile)
1276 		goto create_failed;
1277 
1278 	pfile = debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
1279 				    &rvu_dbg_nix_rq_ctx_fops);
1280 	if (!pfile)
1281 		goto create_failed;
1282 
1283 	pfile = debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
1284 				    &rvu_dbg_nix_cq_ctx_fops);
1285 	if (!pfile)
1286 		goto create_failed;
1287 
1288 	pfile = debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, rvu,
1289 				    &rvu_dbg_nix_ndc_tx_cache_fops);
1290 	if (!pfile)
1291 		goto create_failed;
1292 
1293 	pfile = debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, rvu,
1294 				    &rvu_dbg_nix_ndc_rx_cache_fops);
1295 	if (!pfile)
1296 		goto create_failed;
1297 
1298 	pfile = debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix,
1299 				    rvu, &rvu_dbg_nix_ndc_tx_hits_miss_fops);
1300 	if (!pfile)
1301 		goto create_failed;
1302 
1303 	pfile = debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix,
1304 				    rvu, &rvu_dbg_nix_ndc_rx_hits_miss_fops);
1305 	if (!pfile)
1306 		goto create_failed;
1307 
1308 	pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
1309 				    &rvu_dbg_nix_qsize_fops);
1310 	if (!pfile)
1311 		goto create_failed;
1312 
1313 	return;
1314 create_failed:
1315 	dev_err(dev, "Failed to create debugfs dir/file for NIX\n");
1316 	debugfs_remove_recursive(rvu->rvu_dbg.nix);
1317 }
1318 
1319 static void rvu_dbg_npa_init(struct rvu *rvu)
1320 {
1321 	const struct device *dev = &rvu->pdev->dev;
1322 	struct dentry *pfile;
1323 
1324 	rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
1325 	if (!rvu->rvu_dbg.npa)
1326 		return;
1327 
1328 	pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
1329 				    &rvu_dbg_npa_qsize_fops);
1330 	if (!pfile)
1331 		goto create_failed;
1332 
1333 	pfile = debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1334 				    &rvu_dbg_npa_aura_ctx_fops);
1335 	if (!pfile)
1336 		goto create_failed;
1337 
1338 	pfile = debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1339 				    &rvu_dbg_npa_pool_ctx_fops);
1340 	if (!pfile)
1341 		goto create_failed;
1342 
1343 	pfile = debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
1344 				    &rvu_dbg_npa_ndc_cache_fops);
1345 	if (!pfile)
1346 		goto create_failed;
1347 
1348 	pfile = debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa,
1349 				    rvu, &rvu_dbg_npa_ndc_hits_miss_fops);
1350 	if (!pfile)
1351 		goto create_failed;
1352 
1353 	return;
1354 
1355 create_failed:
1356 	dev_err(dev, "Failed to create debugfs dir/file for NPA\n");
1357 	debugfs_remove_recursive(rvu->rvu_dbg.npa);
1358 }
1359 
1360 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)				\
1361 	({								\
1362 		u64 cnt;						\
1363 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
1364 					     NIX_STATS_RX, &(cnt));	\
1365 		if (!err)						\
1366 			seq_printf(s, "%s: %llu\n", name, cnt);		\
1367 		cnt;							\
1368 	})
1369 
1370 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)			\
1371 	({								\
1372 		u64 cnt;						\
1373 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
1374 					  NIX_STATS_TX, &(cnt));	\
1375 		if (!err)						\
1376 			seq_printf(s, "%s: %llu\n", name, cnt);		\
1377 		cnt;							\
1378 	})
1379 
1380 static int cgx_print_stats(struct seq_file *s, int lmac_id)
1381 {
1382 	struct cgx_link_user_info linfo;
1383 	void *cgxd = s->private;
1384 	u64 ucast, mcast, bcast;
1385 	int stat = 0, err = 0;
1386 	u64 tx_stat, rx_stat;
1387 	struct rvu *rvu;
1388 
1389 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
1390 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
1391 	if (!rvu)
1392 		return -ENODEV;
1393 
1394 	/* Link status */
1395 	seq_puts(s, "\n=======Link Status======\n\n");
1396 	err = cgx_get_link_info(cgxd, lmac_id, &linfo);
1397 	if (err)
1398 		seq_puts(s, "Failed to read link status\n");
1399 	seq_printf(s, "\nLink is %s %d Mbps\n\n",
1400 		   linfo.link_up ? "UP" : "DOWN", linfo.speed);
1401 
1402 	/* Rx stats */
1403 	seq_puts(s, "\n=======NIX RX_STATS(CGX port level)======\n\n");
1404 	ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
1405 	if (err)
1406 		return err;
1407 	mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
1408 	if (err)
1409 		return err;
1410 	bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
1411 	if (err)
1412 		return err;
1413 	seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
1414 	PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
1415 	if (err)
1416 		return err;
1417 	PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
1418 	if (err)
1419 		return err;
1420 	PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
1421 	if (err)
1422 		return err;
1423 
1424 	/* Tx stats */
1425 	seq_puts(s, "\n=======NIX TX_STATS(CGX port level)======\n\n");
1426 	ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
1427 	if (err)
1428 		return err;
1429 	mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
1430 	if (err)
1431 		return err;
1432 	bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
1433 	if (err)
1434 		return err;
1435 	seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
1436 	PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
1437 	if (err)
1438 		return err;
1439 	PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
1440 	if (err)
1441 		return err;
1442 
1443 	/* Rx stats */
1444 	seq_puts(s, "\n=======CGX RX_STATS======\n\n");
1445 	while (stat < CGX_RX_STATS_COUNT) {
1446 		err = cgx_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
1447 		if (err)
1448 			return err;
1449 		seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], rx_stat);
1450 		stat++;
1451 	}
1452 
1453 	/* Tx stats */
1454 	stat = 0;
1455 	seq_puts(s, "\n=======CGX TX_STATS======\n\n");
1456 	while (stat < CGX_TX_STATS_COUNT) {
1457 		err = cgx_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
1458 		if (err)
1459 			return err;
1460 		seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], tx_stat);
1461 		stat++;
1462 	}
1463 
1464 	return err;
1465 }
1466 
1467 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
1468 {
1469 	struct dentry *current_dir;
1470 	int err, lmac_id;
1471 	char *buf;
1472 
1473 	current_dir = filp->file->f_path.dentry->d_parent;
1474 	buf = strrchr(current_dir->d_name.name, 'c');
1475 	if (!buf)
1476 		return -EINVAL;
1477 
1478 	err = kstrtoint(buf + 1, 10, &lmac_id);
1479 	if (!err) {
1480 		err = cgx_print_stats(filp, lmac_id);
1481 		if (err)
1482 			return err;
1483 	}
1484 	return err;
1485 }
1486 
1487 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
1488 
1489 static void rvu_dbg_cgx_init(struct rvu *rvu)
1490 {
1491 	const struct device *dev = &rvu->pdev->dev;
1492 	struct dentry *pfile;
1493 	int i, lmac_id;
1494 	char dname[20];
1495 	void *cgx;
1496 
1497 	rvu->rvu_dbg.cgx_root = debugfs_create_dir("cgx", rvu->rvu_dbg.root);
1498 
1499 	for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
1500 		cgx = rvu_cgx_pdata(i, rvu);
1501 		if (!cgx)
1502 			continue;
1503 		/* cgx debugfs dir */
1504 		sprintf(dname, "cgx%d", i);
1505 		rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
1506 						      rvu->rvu_dbg.cgx_root);
1507 		for (lmac_id = 0; lmac_id < cgx_get_lmac_cnt(cgx); lmac_id++) {
1508 			/* lmac debugfs dir */
1509 			sprintf(dname, "lmac%d", lmac_id);
1510 			rvu->rvu_dbg.lmac =
1511 				debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
1512 
1513 			pfile =	debugfs_create_file("stats", 0600,
1514 						    rvu->rvu_dbg.lmac, cgx,
1515 						    &rvu_dbg_cgx_stat_fops);
1516 			if (!pfile)
1517 				goto create_failed;
1518 		}
1519 	}
1520 	return;
1521 
1522 create_failed:
1523 	dev_err(dev, "Failed to create debugfs dir/file for CGX\n");
1524 	debugfs_remove_recursive(rvu->rvu_dbg.cgx_root);
1525 }
1526 
1527 /* NPC debugfs APIs */
1528 static void rvu_print_npc_mcam_info(struct seq_file *s,
1529 				    u16 pcifunc, int blkaddr)
1530 {
1531 	struct rvu *rvu = s->private;
1532 	int entry_acnt, entry_ecnt;
1533 	int cntr_acnt, cntr_ecnt;
1534 
1535 	/* Skip PF0 */
1536 	if (!pcifunc)
1537 		return;
1538 	rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
1539 					  &entry_acnt, &entry_ecnt);
1540 	rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
1541 					    &cntr_acnt, &cntr_ecnt);
1542 	if (!entry_acnt && !cntr_acnt)
1543 		return;
1544 
1545 	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
1546 		seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
1547 			   rvu_get_pf(pcifunc));
1548 	else
1549 		seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
1550 			   rvu_get_pf(pcifunc),
1551 			   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1552 
1553 	if (entry_acnt) {
1554 		seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
1555 		seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
1556 	}
1557 	if (cntr_acnt) {
1558 		seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
1559 		seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
1560 	}
1561 }
1562 
1563 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
1564 {
1565 	struct rvu *rvu = filp->private;
1566 	int pf, vf, numvfs, blkaddr;
1567 	struct npc_mcam *mcam;
1568 	u16 pcifunc;
1569 	u64 cfg;
1570 
1571 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1572 	if (blkaddr < 0)
1573 		return -ENODEV;
1574 
1575 	mcam = &rvu->hw->mcam;
1576 
1577 	seq_puts(filp, "\nNPC MCAM info:\n");
1578 	/* MCAM keywidth on receive and transmit sides */
1579 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
1580 	cfg = (cfg >> 32) & 0x07;
1581 	seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
1582 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
1583 		   "224bits" : "448bits"));
1584 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
1585 	cfg = (cfg >> 32) & 0x07;
1586 	seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
1587 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
1588 		   "224bits" : "448bits"));
1589 
1590 	mutex_lock(&mcam->lock);
1591 	/* MCAM entries */
1592 	seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
1593 	seq_printf(filp, "\t\t Reserved \t: %d\n",
1594 		   mcam->total_entries - mcam->bmap_entries);
1595 	seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
1596 
1597 	/* MCAM counters */
1598 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
1599 	cfg = (cfg >> 48) & 0xFFFF;
1600 	seq_printf(filp, "\n\t\t MCAM counters \t: %lld\n", cfg);
1601 	seq_printf(filp, "\t\t Reserved \t: %lld\n", cfg - mcam->counters.max);
1602 	seq_printf(filp, "\t\t Available \t: %d\n",
1603 		   rvu_rsrc_free_count(&mcam->counters));
1604 
1605 	if (mcam->bmap_entries == mcam->bmap_fcnt) {
1606 		mutex_unlock(&mcam->lock);
1607 		return 0;
1608 	}
1609 
1610 	seq_puts(filp, "\n\t\t Current allocation\n");
1611 	seq_puts(filp, "\t\t====================\n");
1612 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
1613 		pcifunc = (pf << RVU_PFVF_PF_SHIFT);
1614 		rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
1615 
1616 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
1617 		numvfs = (cfg >> 12) & 0xFF;
1618 		for (vf = 0; vf < numvfs; vf++) {
1619 			pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
1620 			rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
1621 		}
1622 	}
1623 
1624 	mutex_unlock(&mcam->lock);
1625 	return 0;
1626 }
1627 
1628 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
1629 
1630 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
1631 					     void *unused)
1632 {
1633 	struct rvu *rvu = filp->private;
1634 	struct npc_mcam *mcam;
1635 	int blkaddr;
1636 
1637 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1638 	if (blkaddr < 0)
1639 		return -ENODEV;
1640 
1641 	mcam = &rvu->hw->mcam;
1642 
1643 	seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
1644 	seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
1645 		   rvu_read64(rvu, blkaddr,
1646 			      NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
1647 
1648 	return 0;
1649 }
1650 
1651 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
1652 
1653 static void rvu_dbg_npc_init(struct rvu *rvu)
1654 {
1655 	const struct device *dev = &rvu->pdev->dev;
1656 	struct dentry *pfile;
1657 
1658 	rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
1659 	if (!rvu->rvu_dbg.npc)
1660 		return;
1661 
1662 	pfile = debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc,
1663 				    rvu, &rvu_dbg_npc_mcam_info_fops);
1664 	if (!pfile)
1665 		goto create_failed;
1666 
1667 	pfile = debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc,
1668 				    rvu, &rvu_dbg_npc_rx_miss_act_fops);
1669 	if (!pfile)
1670 		goto create_failed;
1671 
1672 	return;
1673 
1674 create_failed:
1675 	dev_err(dev, "Failed to create debugfs dir/file for NPC\n");
1676 	debugfs_remove_recursive(rvu->rvu_dbg.npc);
1677 }
1678 
1679 void rvu_dbg_init(struct rvu *rvu)
1680 {
1681 	struct device *dev = &rvu->pdev->dev;
1682 	struct dentry *pfile;
1683 
1684 	rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
1685 	if (!rvu->rvu_dbg.root) {
1686 		dev_err(rvu->dev, "%s failed\n", __func__);
1687 		return;
1688 	}
1689 	pfile = debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
1690 				    &rvu_dbg_rsrc_status_fops);
1691 	if (!pfile)
1692 		goto create_failed;
1693 
1694 	rvu_dbg_npa_init(rvu);
1695 	rvu_dbg_nix_init(rvu);
1696 	rvu_dbg_cgx_init(rvu);
1697 	rvu_dbg_npc_init(rvu);
1698 
1699 	return;
1700 
1701 create_failed:
1702 	dev_err(dev, "Failed to create debugfs dir\n");
1703 	debugfs_remove_recursive(rvu->rvu_dbg.root);
1704 }
1705 
1706 void rvu_dbg_exit(struct rvu *rvu)
1707 {
1708 	debugfs_remove_recursive(rvu->rvu_dbg.root);
1709 }
1710 
1711 #endif /* CONFIG_DEBUG_FS */
1712