1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2019 Marvell.
5 *
6 */
7
8 #ifdef CONFIG_DEBUG_FS
9
10 #include <linux/fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 #include "npc.h"
21 #include "rvu_npc_hash.h"
22 #include "mcs.h"
23
24 #define DEBUGFS_DIR_NAME "octeontx2"
25
26 enum {
27 CGX_STAT0,
28 CGX_STAT1,
29 CGX_STAT2,
30 CGX_STAT3,
31 CGX_STAT4,
32 CGX_STAT5,
33 CGX_STAT6,
34 CGX_STAT7,
35 CGX_STAT8,
36 CGX_STAT9,
37 CGX_STAT10,
38 CGX_STAT11,
39 CGX_STAT12,
40 CGX_STAT13,
41 CGX_STAT14,
42 CGX_STAT15,
43 CGX_STAT16,
44 CGX_STAT17,
45 CGX_STAT18,
46 };
47
48 /* NIX TX stats */
49 enum nix_stat_lf_tx {
50 TX_UCAST = 0x0,
51 TX_BCAST = 0x1,
52 TX_MCAST = 0x2,
53 TX_DROP = 0x3,
54 TX_OCTS = 0x4,
55 TX_STATS_ENUM_LAST,
56 };
57
58 /* NIX RX stats */
59 enum nix_stat_lf_rx {
60 RX_OCTS = 0x0,
61 RX_UCAST = 0x1,
62 RX_BCAST = 0x2,
63 RX_MCAST = 0x3,
64 RX_DROP = 0x4,
65 RX_DROP_OCTS = 0x5,
66 RX_FCS = 0x6,
67 RX_ERR = 0x7,
68 RX_DRP_BCAST = 0x8,
69 RX_DRP_MCAST = 0x9,
70 RX_DRP_L3BCAST = 0xa,
71 RX_DRP_L3MCAST = 0xb,
72 RX_STATS_ENUM_LAST,
73 };
74
75 static char *cgx_rx_stats_fields[] = {
76 [CGX_STAT0] = "Received packets",
77 [CGX_STAT1] = "Octets of received packets",
78 [CGX_STAT2] = "Received PAUSE packets",
79 [CGX_STAT3] = "Received PAUSE and control packets",
80 [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets",
81 [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets",
82 [CGX_STAT6] = "Packets dropped due to RX FIFO full",
83 [CGX_STAT7] = "Octets dropped due to RX FIFO full",
84 [CGX_STAT8] = "Error packets",
85 [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets",
86 [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets",
87 [CGX_STAT11] = "NCSI-bound packets dropped",
88 [CGX_STAT12] = "NCSI-bound octets dropped",
89 };
90
91 static char *cgx_tx_stats_fields[] = {
92 [CGX_STAT0] = "Packets dropped due to excessive collisions",
93 [CGX_STAT1] = "Packets dropped due to excessive deferral",
94 [CGX_STAT2] = "Multiple collisions before successful transmission",
95 [CGX_STAT3] = "Single collisions before successful transmission",
96 [CGX_STAT4] = "Total octets sent on the interface",
97 [CGX_STAT5] = "Total frames sent on the interface",
98 [CGX_STAT6] = "Packets sent with an octet count < 64",
99 [CGX_STAT7] = "Packets sent with an octet count == 64",
100 [CGX_STAT8] = "Packets sent with an octet count of 65-127",
101 [CGX_STAT9] = "Packets sent with an octet count of 128-255",
102 [CGX_STAT10] = "Packets sent with an octet count of 256-511",
103 [CGX_STAT11] = "Packets sent with an octet count of 512-1023",
104 [CGX_STAT12] = "Packets sent with an octet count of 1024-1518",
105 [CGX_STAT13] = "Packets sent with an octet count of > 1518",
106 [CGX_STAT14] = "Packets sent to a broadcast DMAC",
107 [CGX_STAT15] = "Packets sent to the multicast DMAC",
108 [CGX_STAT16] = "Transmit underflow and were truncated",
109 [CGX_STAT17] = "Control/PAUSE packets sent",
110 };
111
112 static char *rpm_rx_stats_fields[] = {
113 "Octets of received packets",
114 "Octets of received packets with out error",
115 "Received packets with alignment errors",
116 "Control/PAUSE packets received",
117 "Packets received with Frame too long Errors",
118 "Packets received with a1nrange length Errors",
119 "Received packets",
120 "Packets received with FrameCheckSequenceErrors",
121 "Packets received with VLAN header",
122 "Error packets",
123 "Packets received with unicast DMAC",
124 "Packets received with multicast DMAC",
125 "Packets received with broadcast DMAC",
126 "Dropped packets",
127 "Total frames received on interface",
128 "Packets received with an octet count < 64",
129 "Packets received with an octet count == 64",
130 "Packets received with an octet count of 65-127",
131 "Packets received with an octet count of 128-255",
132 "Packets received with an octet count of 256-511",
133 "Packets received with an octet count of 512-1023",
134 "Packets received with an octet count of 1024-1518",
135 "Packets received with an octet count of > 1518",
136 "Oversized Packets",
137 "Jabber Packets",
138 "Fragmented Packets",
139 "CBFC(class based flow control) pause frames received for class 0",
140 "CBFC pause frames received for class 1",
141 "CBFC pause frames received for class 2",
142 "CBFC pause frames received for class 3",
143 "CBFC pause frames received for class 4",
144 "CBFC pause frames received for class 5",
145 "CBFC pause frames received for class 6",
146 "CBFC pause frames received for class 7",
147 "CBFC pause frames received for class 8",
148 "CBFC pause frames received for class 9",
149 "CBFC pause frames received for class 10",
150 "CBFC pause frames received for class 11",
151 "CBFC pause frames received for class 12",
152 "CBFC pause frames received for class 13",
153 "CBFC pause frames received for class 14",
154 "CBFC pause frames received for class 15",
155 "MAC control packets received",
156 };
157
158 static char *rpm_tx_stats_fields[] = {
159 "Total octets sent on the interface",
160 "Total octets transmitted OK",
161 "Control/Pause frames sent",
162 "Total frames transmitted OK",
163 "Total frames sent with VLAN header",
164 "Error Packets",
165 "Packets sent to unicast DMAC",
166 "Packets sent to the multicast DMAC",
167 "Packets sent to a broadcast DMAC",
168 "Packets sent with an octet count == 64",
169 "Packets sent with an octet count of 65-127",
170 "Packets sent with an octet count of 128-255",
171 "Packets sent with an octet count of 256-511",
172 "Packets sent with an octet count of 512-1023",
173 "Packets sent with an octet count of 1024-1518",
174 "Packets sent with an octet count of > 1518",
175 "CBFC(class based flow control) pause frames transmitted for class 0",
176 "CBFC pause frames transmitted for class 1",
177 "CBFC pause frames transmitted for class 2",
178 "CBFC pause frames transmitted for class 3",
179 "CBFC pause frames transmitted for class 4",
180 "CBFC pause frames transmitted for class 5",
181 "CBFC pause frames transmitted for class 6",
182 "CBFC pause frames transmitted for class 7",
183 "CBFC pause frames transmitted for class 8",
184 "CBFC pause frames transmitted for class 9",
185 "CBFC pause frames transmitted for class 10",
186 "CBFC pause frames transmitted for class 11",
187 "CBFC pause frames transmitted for class 12",
188 "CBFC pause frames transmitted for class 13",
189 "CBFC pause frames transmitted for class 14",
190 "CBFC pause frames transmitted for class 15",
191 "MAC control packets sent",
192 "Total frames sent on the interface"
193 };
194
195 enum cpt_eng_type {
196 CPT_AE_TYPE = 1,
197 CPT_SE_TYPE = 2,
198 CPT_IE_TYPE = 3,
199 };
200
201 #define rvu_dbg_NULL NULL
202 #define rvu_dbg_open_NULL NULL
203
204 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
205 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
206 { \
207 return single_open(file, rvu_dbg_##read_op, inode->i_private); \
208 } \
209 static const struct file_operations rvu_dbg_##name##_fops = { \
210 .owner = THIS_MODULE, \
211 .open = rvu_dbg_open_##name, \
212 .read = seq_read, \
213 .write = rvu_dbg_##write_op, \
214 .llseek = seq_lseek, \
215 .release = single_release, \
216 }
217
218 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
219 static const struct file_operations rvu_dbg_##name##_fops = { \
220 .owner = THIS_MODULE, \
221 .open = simple_open, \
222 .read = rvu_dbg_##read_op, \
223 .write = rvu_dbg_##write_op \
224 }
225
226 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
227
rvu_dbg_mcs_port_stats_display(struct seq_file * filp,void * unused,int dir)228 static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
229 {
230 struct mcs *mcs = filp->private;
231 struct mcs_port_stats stats;
232 int lmac;
233
234 seq_puts(filp, "\n port stats\n");
235 mutex_lock(&mcs->stats_lock);
236 for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
237 mcs_get_port_stats(mcs, &stats, lmac, dir);
238 seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
239 seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
240
241 if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
242 seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
243 stats.preempt_err_cnt);
244 if (dir == MCS_TX)
245 seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
246 stats.sectag_insert_err_cnt);
247 }
248 mutex_unlock(&mcs->stats_lock);
249 return 0;
250 }
251
rvu_dbg_mcs_rx_port_stats_display(struct seq_file * filp,void * unused)252 static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
253 {
254 return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
255 }
256
257 RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
258
rvu_dbg_mcs_tx_port_stats_display(struct seq_file * filp,void * unused)259 static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
260 {
261 return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
262 }
263
264 RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
265
rvu_dbg_mcs_sa_stats_display(struct seq_file * filp,void * unused,int dir)266 static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
267 {
268 struct mcs *mcs = filp->private;
269 struct mcs_sa_stats stats;
270 struct rsrc_bmap *map;
271 int sa_id;
272
273 if (dir == MCS_TX) {
274 map = &mcs->tx.sa;
275 mutex_lock(&mcs->stats_lock);
276 for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
277 seq_puts(filp, "\n TX SA stats\n");
278 mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
279 seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
280 stats.pkt_encrypt_cnt);
281
282 seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
283 stats.pkt_protected_cnt);
284 }
285 mutex_unlock(&mcs->stats_lock);
286 return 0;
287 }
288
289 /* RX stats */
290 map = &mcs->rx.sa;
291 mutex_lock(&mcs->stats_lock);
292 for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
293 seq_puts(filp, "\n RX SA stats\n");
294 mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
295 seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
296 seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
297 seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
298 seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
299 seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
300 }
301 mutex_unlock(&mcs->stats_lock);
302 return 0;
303 }
304
rvu_dbg_mcs_rx_sa_stats_display(struct seq_file * filp,void * unused)305 static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
306 {
307 return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
308 }
309
310 RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
311
rvu_dbg_mcs_tx_sa_stats_display(struct seq_file * filp,void * unused)312 static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
313 {
314 return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
315 }
316
317 RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
318
rvu_dbg_mcs_tx_sc_stats_display(struct seq_file * filp,void * unused)319 static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
320 {
321 struct mcs *mcs = filp->private;
322 struct mcs_sc_stats stats;
323 struct rsrc_bmap *map;
324 int sc_id;
325
326 map = &mcs->tx.sc;
327 seq_puts(filp, "\n SC stats\n");
328
329 mutex_lock(&mcs->stats_lock);
330 for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
331 mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
332 seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
333 seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
334 seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
335
336 if (mcs->hw->mcs_blks == 1) {
337 seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
338 stats.octet_encrypt_cnt);
339 seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
340 stats.octet_protected_cnt);
341 }
342 }
343 mutex_unlock(&mcs->stats_lock);
344 return 0;
345 }
346
347 RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
348
rvu_dbg_mcs_rx_sc_stats_display(struct seq_file * filp,void * unused)349 static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
350 {
351 struct mcs *mcs = filp->private;
352 struct mcs_sc_stats stats;
353 struct rsrc_bmap *map;
354 int sc_id;
355
356 map = &mcs->rx.sc;
357 seq_puts(filp, "\n SC stats\n");
358
359 mutex_lock(&mcs->stats_lock);
360 for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
361 mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
362 seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
363 seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
364 seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
365 seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
366 seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
367 seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
368
369 if (mcs->hw->mcs_blks > 1) {
370 seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
371 seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
372 }
373 if (mcs->hw->mcs_blks == 1) {
374 seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
375 stats.octet_decrypt_cnt);
376 seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
377 stats.octet_validate_cnt);
378 }
379 }
380 mutex_unlock(&mcs->stats_lock);
381 return 0;
382 }
383
384 RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
385
rvu_dbg_mcs_flowid_stats_display(struct seq_file * filp,void * unused,int dir)386 static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
387 {
388 struct mcs *mcs = filp->private;
389 struct mcs_flowid_stats stats;
390 struct rsrc_bmap *map;
391 int flow_id;
392
393 seq_puts(filp, "\n Flowid stats\n");
394
395 if (dir == MCS_RX)
396 map = &mcs->rx.flow_ids;
397 else
398 map = &mcs->tx.flow_ids;
399
400 mutex_lock(&mcs->stats_lock);
401 for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
402 mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
403 seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
404 }
405 mutex_unlock(&mcs->stats_lock);
406 return 0;
407 }
408
rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file * filp,void * unused)409 static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
410 {
411 return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
412 }
413
414 RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
415
rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file * filp,void * unused)416 static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
417 {
418 return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
419 }
420
421 RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
422
rvu_dbg_mcs_tx_secy_stats_display(struct seq_file * filp,void * unused)423 static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
424 {
425 struct mcs *mcs = filp->private;
426 struct mcs_secy_stats stats;
427 struct rsrc_bmap *map;
428 int secy_id;
429
430 map = &mcs->tx.secy;
431 seq_puts(filp, "\n MCS TX secy stats\n");
432
433 mutex_lock(&mcs->stats_lock);
434 for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
435 mcs_get_tx_secy_stats(mcs, &stats, secy_id);
436 seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
437 seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
438 stats.ctl_pkt_bcast_cnt);
439 seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
440 stats.ctl_pkt_mcast_cnt);
441 seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
442 stats.ctl_pkt_ucast_cnt);
443 seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
444 seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
445 stats.unctl_pkt_bcast_cnt);
446 seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
447 stats.unctl_pkt_mcast_cnt);
448 seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
449 stats.unctl_pkt_ucast_cnt);
450 seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
451 seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
452 stats.octet_encrypted_cnt);
453 seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
454 stats.octet_protected_cnt);
455 seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
456 stats.pkt_noactivesa_cnt);
457 seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
458 seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
459 }
460 mutex_unlock(&mcs->stats_lock);
461 return 0;
462 }
463
464 RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
465
rvu_dbg_mcs_rx_secy_stats_display(struct seq_file * filp,void * unused)466 static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
467 {
468 struct mcs *mcs = filp->private;
469 struct mcs_secy_stats stats;
470 struct rsrc_bmap *map;
471 int secy_id;
472
473 map = &mcs->rx.secy;
474 seq_puts(filp, "\n MCS secy stats\n");
475
476 mutex_lock(&mcs->stats_lock);
477 for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
478 mcs_get_rx_secy_stats(mcs, &stats, secy_id);
479 seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
480 seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
481 stats.ctl_pkt_bcast_cnt);
482 seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
483 stats.ctl_pkt_mcast_cnt);
484 seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
485 stats.ctl_pkt_ucast_cnt);
486 seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
487 seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
488 stats.unctl_pkt_bcast_cnt);
489 seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
490 stats.unctl_pkt_mcast_cnt);
491 seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
492 stats.unctl_pkt_ucast_cnt);
493 seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
494 seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
495 stats.octet_decrypted_cnt);
496 seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
497 stats.octet_validated_cnt);
498 seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
499 stats.pkt_port_disabled_cnt);
500 seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt);
501 seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id,
502 stats.pkt_nosa_cnt);
503 seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
504 stats.pkt_nosaerror_cnt);
505 seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
506 stats.pkt_tagged_ctl_cnt);
507 seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
508 seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
509 if (mcs->hw->mcs_blks > 1)
510 seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
511 stats.pkt_notag_cnt);
512 }
513 mutex_unlock(&mcs->stats_lock);
514 return 0;
515 }
516
517 RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
518
rvu_dbg_mcs_init(struct rvu * rvu)519 static void rvu_dbg_mcs_init(struct rvu *rvu)
520 {
521 struct mcs *mcs;
522 char dname[10];
523 int i;
524
525 if (!rvu->mcs_blk_cnt)
526 return;
527
528 rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
529
530 for (i = 0; i < rvu->mcs_blk_cnt; i++) {
531 mcs = mcs_get_pdata(i);
532
533 sprintf(dname, "mcs%d", i);
534 rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
535 rvu->rvu_dbg.mcs_root);
536
537 rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
538
539 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
540 &rvu_dbg_mcs_rx_flowid_stats_fops);
541
542 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
543 &rvu_dbg_mcs_rx_secy_stats_fops);
544
545 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
546 &rvu_dbg_mcs_rx_sc_stats_fops);
547
548 debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
549 &rvu_dbg_mcs_rx_sa_stats_fops);
550
551 debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
552 &rvu_dbg_mcs_rx_port_stats_fops);
553
554 rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
555
556 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
557 &rvu_dbg_mcs_tx_flowid_stats_fops);
558
559 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
560 &rvu_dbg_mcs_tx_secy_stats_fops);
561
562 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
563 &rvu_dbg_mcs_tx_sc_stats_fops);
564
565 debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
566 &rvu_dbg_mcs_tx_sa_stats_fops);
567
568 debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
569 &rvu_dbg_mcs_tx_port_stats_fops);
570 }
571 }
572
573 #define LMT_MAPTBL_ENTRY_SIZE 16
574 /* Dump LMTST map table */
rvu_dbg_lmtst_map_table_display(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)575 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
576 char __user *buffer,
577 size_t count, loff_t *ppos)
578 {
579 struct rvu *rvu = filp->private_data;
580 u64 lmt_addr, val, tbl_base;
581 int pf, vf, num_vfs, hw_vfs;
582 void __iomem *lmt_map_base;
583 int apr_pfs, apr_vfs;
584 int buf_size = 10240;
585 size_t off = 0;
586 int index = 0;
587 char *buf;
588 int ret;
589
590 /* don't allow partial reads */
591 if (*ppos != 0)
592 return 0;
593
594 buf = kzalloc(buf_size, GFP_KERNEL);
595 if (!buf)
596 return -ENOMEM;
597
598 tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
599 val = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
600 apr_vfs = 1 << (val & 0xF);
601 apr_pfs = 1 << ((val >> 4) & 0x7);
602
603 lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs *
604 LMT_MAPTBL_ENTRY_SIZE);
605 if (!lmt_map_base) {
606 dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
607 kfree(buf);
608 return false;
609 }
610
611 off += scnprintf(&buf[off], buf_size - 1 - off,
612 "\n\t\t\t\t\tLmtst Map Table Entries");
613 off += scnprintf(&buf[off], buf_size - 1 - off,
614 "\n\t\t\t\t\t=======================");
615 off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
616 off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
617 off += scnprintf(&buf[off], buf_size - 1 - off,
618 "Lmtline Base (word 0)\t\t");
619 off += scnprintf(&buf[off], buf_size - 1 - off,
620 "Lmt Map Entry (word 1)");
621 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
622 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
623 off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
624 pf);
625
626 index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE;
627 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
628 (tbl_base + index));
629 lmt_addr = readq(lmt_map_base + index);
630 off += scnprintf(&buf[off], buf_size - 1 - off,
631 " 0x%016llx\t\t", lmt_addr);
632 index += 8;
633 val = readq(lmt_map_base + index);
634 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
635 val);
636 /* Reading num of VFs per PF */
637 rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
638 for (vf = 0; vf < num_vfs; vf++) {
639 index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) +
640 ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
641 off += scnprintf(&buf[off], buf_size - 1 - off,
642 "PF%d:VF%d \t\t", pf, vf);
643 off += scnprintf(&buf[off], buf_size - 1 - off,
644 " 0x%llx\t\t", (tbl_base + index));
645 lmt_addr = readq(lmt_map_base + index);
646 off += scnprintf(&buf[off], buf_size - 1 - off,
647 " 0x%016llx\t\t", lmt_addr);
648 index += 8;
649 val = readq(lmt_map_base + index);
650 off += scnprintf(&buf[off], buf_size - 1 - off,
651 " 0x%016llx\n", val);
652 }
653 }
654 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
655
656 ret = min(off, count);
657 if (copy_to_user(buffer, buf, ret))
658 ret = -EFAULT;
659 kfree(buf);
660
661 iounmap(lmt_map_base);
662 if (ret < 0)
663 return ret;
664
665 *ppos = ret;
666 return ret;
667 }
668
669 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
670
get_lf_str_list(struct rvu_block block,int pcifunc,char * lfs)671 static void get_lf_str_list(struct rvu_block block, int pcifunc,
672 char *lfs)
673 {
674 int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
675
676 for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
677 if (lf >= block.lf.max)
678 break;
679
680 if (block.fn_map[lf] != pcifunc)
681 continue;
682
683 if (lf == prev_lf + 1) {
684 prev_lf = lf;
685 seq = 1;
686 continue;
687 }
688
689 if (seq)
690 len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
691 else
692 len += (len ? sprintf(lfs + len, ",%d", lf) :
693 sprintf(lfs + len, "%d", lf));
694
695 prev_lf = lf;
696 seq = 0;
697 }
698
699 if (seq)
700 len += sprintf(lfs + len, "-%d", prev_lf);
701
702 lfs[len] = '\0';
703 }
704
get_max_column_width(struct rvu * rvu)705 static int get_max_column_width(struct rvu *rvu)
706 {
707 int index, pf, vf, lf_str_size = 12, buf_size = 256;
708 struct rvu_block block;
709 u16 pcifunc;
710 char *buf;
711
712 buf = kzalloc(buf_size, GFP_KERNEL);
713 if (!buf)
714 return -ENOMEM;
715
716 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
717 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
718 pcifunc = pf << 10 | vf;
719 if (!pcifunc)
720 continue;
721
722 for (index = 0; index < BLK_COUNT; index++) {
723 block = rvu->hw->block[index];
724 if (!strlen(block.name))
725 continue;
726
727 get_lf_str_list(block, pcifunc, buf);
728 if (lf_str_size <= strlen(buf))
729 lf_str_size = strlen(buf) + 1;
730 }
731 }
732 }
733
734 kfree(buf);
735 return lf_str_size;
736 }
737
738 /* Dumps current provisioning status of all RVU block LFs */
rvu_dbg_rsrc_attach_status(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)739 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
740 char __user *buffer,
741 size_t count, loff_t *ppos)
742 {
743 int index, off = 0, flag = 0, len = 0, i = 0;
744 struct rvu *rvu = filp->private_data;
745 int bytes_not_copied = 0;
746 struct rvu_block block;
747 int pf, vf, pcifunc;
748 int buf_size = 2048;
749 int lf_str_size;
750 char *lfs;
751 char *buf;
752
753 /* don't allow partial reads */
754 if (*ppos != 0)
755 return 0;
756
757 buf = kzalloc(buf_size, GFP_KERNEL);
758 if (!buf)
759 return -ENOMEM;
760
761 /* Get the maximum width of a column */
762 lf_str_size = get_max_column_width(rvu);
763
764 lfs = kzalloc(lf_str_size, GFP_KERNEL);
765 if (!lfs) {
766 kfree(buf);
767 return -ENOMEM;
768 }
769 off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
770 "pcifunc");
771 for (index = 0; index < BLK_COUNT; index++)
772 if (strlen(rvu->hw->block[index].name)) {
773 off += scnprintf(&buf[off], buf_size - 1 - off,
774 "%-*s", lf_str_size,
775 rvu->hw->block[index].name);
776 }
777
778 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
779 bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
780 if (bytes_not_copied)
781 goto out;
782
783 i++;
784 *ppos += off;
785 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
786 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
787 off = 0;
788 flag = 0;
789 pcifunc = pf << 10 | vf;
790 if (!pcifunc)
791 continue;
792
793 if (vf) {
794 sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
795 off = scnprintf(&buf[off],
796 buf_size - 1 - off,
797 "%-*s", lf_str_size, lfs);
798 } else {
799 sprintf(lfs, "PF%d", pf);
800 off = scnprintf(&buf[off],
801 buf_size - 1 - off,
802 "%-*s", lf_str_size, lfs);
803 }
804
805 for (index = 0; index < BLK_COUNT; index++) {
806 block = rvu->hw->block[index];
807 if (!strlen(block.name))
808 continue;
809 len = 0;
810 lfs[len] = '\0';
811 get_lf_str_list(block, pcifunc, lfs);
812 if (strlen(lfs))
813 flag = 1;
814
815 off += scnprintf(&buf[off], buf_size - 1 - off,
816 "%-*s", lf_str_size, lfs);
817 }
818 if (flag) {
819 off += scnprintf(&buf[off],
820 buf_size - 1 - off, "\n");
821 bytes_not_copied = copy_to_user(buffer +
822 (i * off),
823 buf, off);
824 if (bytes_not_copied)
825 goto out;
826
827 i++;
828 *ppos += off;
829 }
830 }
831 }
832
833 out:
834 kfree(lfs);
835 kfree(buf);
836 if (bytes_not_copied)
837 return -EFAULT;
838
839 return *ppos;
840 }
841
842 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
843
rvu_dbg_rvu_pf_cgx_map_display(struct seq_file * filp,void * unused)844 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
845 {
846 struct rvu *rvu = filp->private;
847 struct pci_dev *pdev = NULL;
848 struct mac_ops *mac_ops;
849 char cgx[10], lmac[10];
850 struct rvu_pfvf *pfvf;
851 int pf, domain, blkid;
852 u8 cgx_id, lmac_id;
853 u16 pcifunc;
854
855 domain = 2;
856 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
857 /* There can be no CGX devices at all */
858 if (!mac_ops)
859 return 0;
860 seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
861 mac_ops->name);
862 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
863 if (!is_pf_cgxmapped(rvu, pf))
864 continue;
865
866 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
867 if (!pdev)
868 continue;
869
870 cgx[0] = 0;
871 lmac[0] = 0;
872 pcifunc = pf << 10;
873 pfvf = rvu_get_pfvf(rvu, pcifunc);
874
875 if (pfvf->nix_blkaddr == BLKADDR_NIX0)
876 blkid = 0;
877 else
878 blkid = 1;
879
880 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
881 &lmac_id);
882 sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
883 sprintf(lmac, "LMAC%d", lmac_id);
884 seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
885 dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
886
887 pci_dev_put(pdev);
888 }
889 return 0;
890 }
891
892 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
893
rvu_dbg_is_valid_lf(struct rvu * rvu,int blkaddr,int lf,u16 * pcifunc)894 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
895 u16 *pcifunc)
896 {
897 struct rvu_block *block;
898 struct rvu_hwinfo *hw;
899
900 hw = rvu->hw;
901 block = &hw->block[blkaddr];
902
903 if (lf < 0 || lf >= block->lf.max) {
904 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
905 block->lf.max - 1);
906 return false;
907 }
908
909 *pcifunc = block->fn_map[lf];
910 if (!*pcifunc) {
911 dev_warn(rvu->dev,
912 "This LF is not attached to any RVU PFFUNC\n");
913 return false;
914 }
915 return true;
916 }
917
print_npa_qsize(struct seq_file * m,struct rvu_pfvf * pfvf)918 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
919 {
920 char *buf;
921
922 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
923 if (!buf)
924 return;
925
926 if (!pfvf->aura_ctx) {
927 seq_puts(m, "Aura context is not initialized\n");
928 } else {
929 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
930 pfvf->aura_ctx->qsize);
931 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
932 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
933 }
934
935 if (!pfvf->pool_ctx) {
936 seq_puts(m, "Pool context is not initialized\n");
937 } else {
938 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
939 pfvf->pool_ctx->qsize);
940 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
941 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
942 }
943 kfree(buf);
944 }
945
946 /* The 'qsize' entry dumps current Aura/Pool context Qsize
947 * and each context's current enable/disable status in a bitmap.
948 */
rvu_dbg_qsize_display(struct seq_file * filp,void * unsused,int blktype)949 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
950 int blktype)
951 {
952 void (*print_qsize)(struct seq_file *filp,
953 struct rvu_pfvf *pfvf) = NULL;
954 struct dentry *current_dir;
955 struct rvu_pfvf *pfvf;
956 struct rvu *rvu;
957 int qsize_id;
958 u16 pcifunc;
959 int blkaddr;
960
961 rvu = filp->private;
962 switch (blktype) {
963 case BLKTYPE_NPA:
964 qsize_id = rvu->rvu_dbg.npa_qsize_id;
965 print_qsize = print_npa_qsize;
966 break;
967
968 case BLKTYPE_NIX:
969 qsize_id = rvu->rvu_dbg.nix_qsize_id;
970 print_qsize = print_nix_qsize;
971 break;
972
973 default:
974 return -EINVAL;
975 }
976
977 if (blktype == BLKTYPE_NPA) {
978 blkaddr = BLKADDR_NPA;
979 } else {
980 current_dir = filp->file->f_path.dentry->d_parent;
981 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
982 BLKADDR_NIX1 : BLKADDR_NIX0);
983 }
984
985 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
986 return -EINVAL;
987
988 pfvf = rvu_get_pfvf(rvu, pcifunc);
989 print_qsize(filp, pfvf);
990
991 return 0;
992 }
993
rvu_dbg_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int blktype)994 static ssize_t rvu_dbg_qsize_write(struct file *filp,
995 const char __user *buffer, size_t count,
996 loff_t *ppos, int blktype)
997 {
998 char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
999 struct seq_file *seqfile = filp->private_data;
1000 char *cmd_buf, *cmd_buf_tmp, *subtoken;
1001 struct rvu *rvu = seqfile->private;
1002 struct dentry *current_dir;
1003 int blkaddr;
1004 u16 pcifunc;
1005 int ret, lf;
1006
1007 cmd_buf = memdup_user_nul(buffer, count);
1008 if (IS_ERR(cmd_buf))
1009 return -ENOMEM;
1010
1011 cmd_buf_tmp = strchr(cmd_buf, '\n');
1012 if (cmd_buf_tmp) {
1013 *cmd_buf_tmp = '\0';
1014 count = cmd_buf_tmp - cmd_buf + 1;
1015 }
1016
1017 cmd_buf_tmp = cmd_buf;
1018 subtoken = strsep(&cmd_buf, " ");
1019 ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
1020 if (cmd_buf)
1021 ret = -EINVAL;
1022
1023 if (ret < 0 || !strncmp(subtoken, "help", 4)) {
1024 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
1025 goto qsize_write_done;
1026 }
1027
1028 if (blktype == BLKTYPE_NPA) {
1029 blkaddr = BLKADDR_NPA;
1030 } else {
1031 current_dir = filp->f_path.dentry->d_parent;
1032 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
1033 BLKADDR_NIX1 : BLKADDR_NIX0);
1034 }
1035
1036 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
1037 ret = -EINVAL;
1038 goto qsize_write_done;
1039 }
1040 if (blktype == BLKTYPE_NPA)
1041 rvu->rvu_dbg.npa_qsize_id = lf;
1042 else
1043 rvu->rvu_dbg.nix_qsize_id = lf;
1044
1045 qsize_write_done:
1046 kfree(cmd_buf_tmp);
1047 return ret ? ret : count;
1048 }
1049
rvu_dbg_npa_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1050 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
1051 const char __user *buffer,
1052 size_t count, loff_t *ppos)
1053 {
1054 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1055 BLKTYPE_NPA);
1056 }
1057
rvu_dbg_npa_qsize_display(struct seq_file * filp,void * unused)1058 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
1059 {
1060 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
1061 }
1062
1063 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
1064
1065 /* Dumps given NPA Aura's context */
print_npa_aura_ctx(struct seq_file * m,struct npa_aq_enq_rsp * rsp)1066 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1067 {
1068 struct npa_aura_s *aura = &rsp->aura;
1069 struct rvu *rvu = m->private;
1070
1071 seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
1072
1073 seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
1074 aura->ena, aura->pool_caching);
1075 seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
1076 aura->pool_way_mask, aura->avg_con);
1077 seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
1078 aura->pool_drop_ena, aura->aura_drop_ena);
1079 seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
1080 aura->bp_ena, aura->aura_drop);
1081 seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
1082 aura->shift, aura->avg_level);
1083
1084 seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
1085 (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
1086
1087 seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
1088 (u64)aura->limit, aura->bp, aura->fc_ena);
1089
1090 if (!is_rvu_otx2(rvu))
1091 seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
1092 seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
1093 aura->fc_up_crossing, aura->fc_stype);
1094 seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
1095
1096 seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
1097
1098 seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
1099 aura->pool_drop, aura->update_time);
1100 seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
1101 aura->err_int, aura->err_int_ena);
1102 seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
1103 aura->thresh_int, aura->thresh_int_ena);
1104 seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
1105 aura->thresh_up, aura->thresh_qint_idx);
1106 seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
1107
1108 seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
1109 if (!is_rvu_otx2(rvu))
1110 seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
1111 }
1112
1113 /* Dumps given NPA Pool's context */
print_npa_pool_ctx(struct seq_file * m,struct npa_aq_enq_rsp * rsp)1114 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1115 {
1116 struct npa_pool_s *pool = &rsp->pool;
1117 struct rvu *rvu = m->private;
1118
1119 seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
1120
1121 seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
1122 pool->ena, pool->nat_align);
1123 seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
1124 pool->stack_caching, pool->stack_way_mask);
1125 seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
1126 pool->buf_offset, pool->buf_size);
1127
1128 seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
1129 pool->stack_max_pages, pool->stack_pages);
1130
1131 seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
1132
1133 seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
1134 pool->stack_offset, pool->shift, pool->avg_level);
1135 seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
1136 pool->avg_con, pool->fc_ena, pool->fc_stype);
1137 seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
1138 pool->fc_hyst_bits, pool->fc_up_crossing);
1139 if (!is_rvu_otx2(rvu))
1140 seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
1141 seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
1142
1143 seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
1144
1145 seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
1146
1147 seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
1148
1149 seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
1150 pool->err_int, pool->err_int_ena);
1151 seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
1152 seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
1153 pool->thresh_int_ena, pool->thresh_up);
1154 seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
1155 pool->thresh_qint_idx, pool->err_qint_idx);
1156 if (!is_rvu_otx2(rvu))
1157 seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
1158 }
1159
1160 /* Reads aura/pool's ctx from admin queue */
rvu_dbg_npa_ctx_display(struct seq_file * m,void * unused,int ctype)1161 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
1162 {
1163 void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
1164 struct npa_aq_enq_req aq_req;
1165 struct npa_aq_enq_rsp rsp;
1166 struct rvu_pfvf *pfvf;
1167 int aura, rc, max_id;
1168 int npalf, id, all;
1169 struct rvu *rvu;
1170 u16 pcifunc;
1171
1172 rvu = m->private;
1173
1174 switch (ctype) {
1175 case NPA_AQ_CTYPE_AURA:
1176 npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
1177 id = rvu->rvu_dbg.npa_aura_ctx.id;
1178 all = rvu->rvu_dbg.npa_aura_ctx.all;
1179 break;
1180
1181 case NPA_AQ_CTYPE_POOL:
1182 npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
1183 id = rvu->rvu_dbg.npa_pool_ctx.id;
1184 all = rvu->rvu_dbg.npa_pool_ctx.all;
1185 break;
1186 default:
1187 return -EINVAL;
1188 }
1189
1190 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1191 return -EINVAL;
1192
1193 pfvf = rvu_get_pfvf(rvu, pcifunc);
1194 if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
1195 seq_puts(m, "Aura context is not initialized\n");
1196 return -EINVAL;
1197 } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
1198 seq_puts(m, "Pool context is not initialized\n");
1199 return -EINVAL;
1200 }
1201
1202 memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
1203 aq_req.hdr.pcifunc = pcifunc;
1204 aq_req.ctype = ctype;
1205 aq_req.op = NPA_AQ_INSTOP_READ;
1206 if (ctype == NPA_AQ_CTYPE_AURA) {
1207 max_id = pfvf->aura_ctx->qsize;
1208 print_npa_ctx = print_npa_aura_ctx;
1209 } else {
1210 max_id = pfvf->pool_ctx->qsize;
1211 print_npa_ctx = print_npa_pool_ctx;
1212 }
1213
1214 if (id < 0 || id >= max_id) {
1215 seq_printf(m, "Invalid %s, valid range is 0-%d\n",
1216 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1217 max_id - 1);
1218 return -EINVAL;
1219 }
1220
1221 if (all)
1222 id = 0;
1223 else
1224 max_id = id + 1;
1225
1226 for (aura = id; aura < max_id; aura++) {
1227 aq_req.aura_id = aura;
1228
1229 /* Skip if queue is uninitialized */
1230 if (ctype == NPA_AQ_CTYPE_POOL && !test_bit(aura, pfvf->pool_bmap))
1231 continue;
1232
1233 seq_printf(m, "======%s : %d=======\n",
1234 (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
1235 aq_req.aura_id);
1236 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
1237 if (rc) {
1238 seq_puts(m, "Failed to read context\n");
1239 return -EINVAL;
1240 }
1241 print_npa_ctx(m, &rsp);
1242 }
1243 return 0;
1244 }
1245
write_npa_ctx(struct rvu * rvu,bool all,int npalf,int id,int ctype)1246 static int write_npa_ctx(struct rvu *rvu, bool all,
1247 int npalf, int id, int ctype)
1248 {
1249 struct rvu_pfvf *pfvf;
1250 int max_id = 0;
1251 u16 pcifunc;
1252
1253 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1254 return -EINVAL;
1255
1256 pfvf = rvu_get_pfvf(rvu, pcifunc);
1257
1258 if (ctype == NPA_AQ_CTYPE_AURA) {
1259 if (!pfvf->aura_ctx) {
1260 dev_warn(rvu->dev, "Aura context is not initialized\n");
1261 return -EINVAL;
1262 }
1263 max_id = pfvf->aura_ctx->qsize;
1264 } else if (ctype == NPA_AQ_CTYPE_POOL) {
1265 if (!pfvf->pool_ctx) {
1266 dev_warn(rvu->dev, "Pool context is not initialized\n");
1267 return -EINVAL;
1268 }
1269 max_id = pfvf->pool_ctx->qsize;
1270 }
1271
1272 if (id < 0 || id >= max_id) {
1273 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
1274 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1275 max_id - 1);
1276 return -EINVAL;
1277 }
1278
1279 switch (ctype) {
1280 case NPA_AQ_CTYPE_AURA:
1281 rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
1282 rvu->rvu_dbg.npa_aura_ctx.id = id;
1283 rvu->rvu_dbg.npa_aura_ctx.all = all;
1284 break;
1285
1286 case NPA_AQ_CTYPE_POOL:
1287 rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
1288 rvu->rvu_dbg.npa_pool_ctx.id = id;
1289 rvu->rvu_dbg.npa_pool_ctx.all = all;
1290 break;
1291 default:
1292 return -EINVAL;
1293 }
1294 return 0;
1295 }
1296
parse_cmd_buffer_ctx(char * cmd_buf,size_t * count,const char __user * buffer,int * npalf,int * id,bool * all)1297 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
1298 const char __user *buffer, int *npalf,
1299 int *id, bool *all)
1300 {
1301 int bytes_not_copied;
1302 char *cmd_buf_tmp;
1303 char *subtoken;
1304 int ret;
1305
1306 bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
1307 if (bytes_not_copied)
1308 return -EFAULT;
1309
1310 cmd_buf[*count] = '\0';
1311 cmd_buf_tmp = strchr(cmd_buf, '\n');
1312
1313 if (cmd_buf_tmp) {
1314 *cmd_buf_tmp = '\0';
1315 *count = cmd_buf_tmp - cmd_buf + 1;
1316 }
1317
1318 subtoken = strsep(&cmd_buf, " ");
1319 ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
1320 if (ret < 0)
1321 return ret;
1322 subtoken = strsep(&cmd_buf, " ");
1323 if (subtoken && strcmp(subtoken, "all") == 0) {
1324 *all = true;
1325 } else {
1326 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
1327 if (ret < 0)
1328 return ret;
1329 }
1330 if (cmd_buf)
1331 return -EINVAL;
1332 return ret;
1333 }
1334
rvu_dbg_npa_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int ctype)1335 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
1336 const char __user *buffer,
1337 size_t count, loff_t *ppos, int ctype)
1338 {
1339 char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
1340 "aura" : "pool";
1341 struct seq_file *seqfp = filp->private_data;
1342 struct rvu *rvu = seqfp->private;
1343 int npalf, id = 0, ret;
1344 bool all = false;
1345
1346 if ((*ppos != 0) || !count)
1347 return -EINVAL;
1348
1349 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1350 if (!cmd_buf)
1351 return count;
1352 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1353 &npalf, &id, &all);
1354 if (ret < 0) {
1355 dev_info(rvu->dev,
1356 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1357 ctype_string, ctype_string);
1358 goto done;
1359 } else {
1360 ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1361 }
1362 done:
1363 kfree(cmd_buf);
1364 return ret ? ret : count;
1365 }
1366
rvu_dbg_npa_aura_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1367 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1368 const char __user *buffer,
1369 size_t count, loff_t *ppos)
1370 {
1371 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1372 NPA_AQ_CTYPE_AURA);
1373 }
1374
rvu_dbg_npa_aura_ctx_display(struct seq_file * filp,void * unused)1375 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1376 {
1377 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1378 }
1379
1380 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1381
rvu_dbg_npa_pool_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1382 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1383 const char __user *buffer,
1384 size_t count, loff_t *ppos)
1385 {
1386 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1387 NPA_AQ_CTYPE_POOL);
1388 }
1389
rvu_dbg_npa_pool_ctx_display(struct seq_file * filp,void * unused)1390 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1391 {
1392 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1393 }
1394
1395 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1396
ndc_cache_stats(struct seq_file * s,int blk_addr,int ctype,int transaction)1397 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1398 int ctype, int transaction)
1399 {
1400 u64 req, out_req, lat, cant_alloc;
1401 struct nix_hw *nix_hw;
1402 struct rvu *rvu;
1403 int port;
1404
1405 if (blk_addr == BLKADDR_NDC_NPA0) {
1406 rvu = s->private;
1407 } else {
1408 nix_hw = s->private;
1409 rvu = nix_hw->rvu;
1410 }
1411
1412 for (port = 0; port < NDC_MAX_PORT; port++) {
1413 req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1414 (port, ctype, transaction));
1415 lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1416 (port, ctype, transaction));
1417 out_req = rvu_read64(rvu, blk_addr,
1418 NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1419 (port, ctype, transaction));
1420 cant_alloc = rvu_read64(rvu, blk_addr,
1421 NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1422 (port, transaction));
1423 seq_printf(s, "\nPort:%d\n", port);
1424 seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1425 seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1426 seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1427 seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1428 seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1429 }
1430 }
1431
ndc_blk_cache_stats(struct seq_file * s,int idx,int blk_addr)1432 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1433 {
1434 seq_puts(s, "\n***** CACHE mode read stats *****\n");
1435 ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1436 seq_puts(s, "\n***** CACHE mode write stats *****\n");
1437 ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1438 seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1439 ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1440 seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1441 ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1442 return 0;
1443 }
1444
rvu_dbg_npa_ndc_cache_display(struct seq_file * filp,void * unused)1445 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1446 {
1447 return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1448 }
1449
1450 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1451
ndc_blk_hits_miss_stats(struct seq_file * s,int idx,int blk_addr)1452 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1453 {
1454 struct nix_hw *nix_hw;
1455 struct rvu *rvu;
1456 int bank, max_bank;
1457 u64 ndc_af_const;
1458
1459 if (blk_addr == BLKADDR_NDC_NPA0) {
1460 rvu = s->private;
1461 } else {
1462 nix_hw = s->private;
1463 rvu = nix_hw->rvu;
1464 }
1465
1466 ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
1467 max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
1468 for (bank = 0; bank < max_bank; bank++) {
1469 seq_printf(s, "BANK:%d\n", bank);
1470 seq_printf(s, "\tHits:\t%lld\n",
1471 (u64)rvu_read64(rvu, blk_addr,
1472 NDC_AF_BANKX_HIT_PC(bank)));
1473 seq_printf(s, "\tMiss:\t%lld\n",
1474 (u64)rvu_read64(rvu, blk_addr,
1475 NDC_AF_BANKX_MISS_PC(bank)));
1476 }
1477 return 0;
1478 }
1479
rvu_dbg_nix_ndc_rx_cache_display(struct seq_file * filp,void * unused)1480 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1481 {
1482 struct nix_hw *nix_hw = filp->private;
1483 int blkaddr = 0;
1484 int ndc_idx = 0;
1485
1486 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1487 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1488 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1489
1490 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1491 }
1492
1493 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1494
rvu_dbg_nix_ndc_tx_cache_display(struct seq_file * filp,void * unused)1495 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1496 {
1497 struct nix_hw *nix_hw = filp->private;
1498 int blkaddr = 0;
1499 int ndc_idx = 0;
1500
1501 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1502 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1503 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1504
1505 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1506 }
1507
1508 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1509
rvu_dbg_npa_ndc_hits_miss_display(struct seq_file * filp,void * unused)1510 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1511 void *unused)
1512 {
1513 return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1514 }
1515
1516 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1517
rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file * filp,void * unused)1518 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1519 void *unused)
1520 {
1521 struct nix_hw *nix_hw = filp->private;
1522 int ndc_idx = NPA0_U;
1523 int blkaddr = 0;
1524
1525 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1526 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1527
1528 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1529 }
1530
1531 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1532
rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file * filp,void * unused)1533 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1534 void *unused)
1535 {
1536 struct nix_hw *nix_hw = filp->private;
1537 int ndc_idx = NPA0_U;
1538 int blkaddr = 0;
1539
1540 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1541 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1542
1543 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1544 }
1545
1546 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1547
print_nix_cn10k_sq_ctx(struct seq_file * m,struct nix_cn10k_sq_ctx_s * sq_ctx)1548 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1549 struct nix_cn10k_sq_ctx_s *sq_ctx)
1550 {
1551 seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1552 sq_ctx->ena, sq_ctx->qint_idx);
1553 seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1554 sq_ctx->substream, sq_ctx->sdp_mcast);
1555 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1556 sq_ctx->cq, sq_ctx->sqe_way_mask);
1557
1558 seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1559 sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1560 seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1561 sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1562 seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1563 sq_ctx->default_chan, sq_ctx->sqb_count);
1564
1565 seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1566 seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1567 seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1568 sq_ctx->sqb_aura, sq_ctx->sq_int);
1569 seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1570 sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1571
1572 seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1573 sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1574 seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1575 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1576 seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1577 sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1578 seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1579 sq_ctx->tail_offset, sq_ctx->smenq_offset);
1580 seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1581 sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1582
1583 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1584 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1585 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1586 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1587 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1588 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1589 sq_ctx->smenq_next_sqb);
1590
1591 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1592
1593 seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1594 seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1595 sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1596 seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1597 sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1598 seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1599 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1600
1601 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1602 (u64)sq_ctx->scm_lso_rem);
1603 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1604 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1605 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1606 (u64)sq_ctx->dropped_octs);
1607 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1608 (u64)sq_ctx->dropped_pkts);
1609 }
1610
1611 /* Dumps given nix_sq's context */
print_nix_sq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)1612 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1613 {
1614 struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1615 struct nix_hw *nix_hw = m->private;
1616 struct rvu *rvu = nix_hw->rvu;
1617
1618 if (!is_rvu_otx2(rvu)) {
1619 print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1620 return;
1621 }
1622 seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1623 sq_ctx->sqe_way_mask, sq_ctx->cq);
1624 seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1625 sq_ctx->sdp_mcast, sq_ctx->substream);
1626 seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1627 sq_ctx->qint_idx, sq_ctx->ena);
1628
1629 seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1630 sq_ctx->sqb_count, sq_ctx->default_chan);
1631 seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1632 sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1633 seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1634 sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1635
1636 seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1637 sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1638 seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1639 sq_ctx->sq_int, sq_ctx->sqb_aura);
1640 seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1641
1642 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1643 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1644 seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1645 sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1646 seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1647 sq_ctx->smenq_offset, sq_ctx->tail_offset);
1648 seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1649 sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1650 seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1651 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1652 seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1653 sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1654
1655 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1656 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1657 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1658 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1659 sq_ctx->smenq_next_sqb);
1660
1661 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1662
1663 seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1664 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1665 seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1666 sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1667 seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1668 sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1669 seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1670
1671 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1672 (u64)sq_ctx->scm_lso_rem);
1673 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1674 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1675 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1676 (u64)sq_ctx->dropped_octs);
1677 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1678 (u64)sq_ctx->dropped_pkts);
1679 }
1680
print_nix_cn10k_rq_ctx(struct seq_file * m,struct nix_cn10k_rq_ctx_s * rq_ctx)1681 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1682 struct nix_cn10k_rq_ctx_s *rq_ctx)
1683 {
1684 seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1685 rq_ctx->ena, rq_ctx->sso_ena);
1686 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1687 rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1688 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1689 rq_ctx->cq, rq_ctx->lenerr_dis);
1690 seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1691 rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1692 seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1693 rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1694 seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1695 rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1696 seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1697
1698 seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1699 rq_ctx->spb_aura, rq_ctx->lpb_aura);
1700 seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1701 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1702 rq_ctx->sso_grp, rq_ctx->sso_tt);
1703 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1704 rq_ctx->pb_caching, rq_ctx->wqe_caching);
1705 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1706 rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1707 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1708 rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1709 seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1710 rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1711
1712 seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1713 seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1714 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1715 seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1716 rq_ctx->wqe_skip, rq_ctx->spb_ena);
1717 seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1718 rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1719 seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1720 rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1721 seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1722 rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1723
1724 seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1725 rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1726 seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1727 rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1728 seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1729 rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1730 seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1731 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1732
1733 seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1734 rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1735 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1736 rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1737 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1738 rq_ctx->rq_int, rq_ctx->rq_int_ena);
1739 seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1740
1741 seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1742 rq_ctx->ltag, rq_ctx->good_utag);
1743 seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1744 rq_ctx->bad_utag, rq_ctx->flow_tagw);
1745 seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1746 rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1747 seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1748 rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1749 seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1750
1751 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1752 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1753 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1754 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1755 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1756 }
1757
1758 /* Dumps given nix_rq's context */
print_nix_rq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)1759 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1760 {
1761 struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1762 struct nix_hw *nix_hw = m->private;
1763 struct rvu *rvu = nix_hw->rvu;
1764
1765 if (!is_rvu_otx2(rvu)) {
1766 print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1767 return;
1768 }
1769
1770 seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1771 rq_ctx->wqe_aura, rq_ctx->substream);
1772 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1773 rq_ctx->cq, rq_ctx->ena_wqwd);
1774 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1775 rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1776 seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1777
1778 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1779 rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1780 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1781 rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1782 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1783 rq_ctx->pb_caching, rq_ctx->sso_tt);
1784 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1785 rq_ctx->sso_grp, rq_ctx->lpb_aura);
1786 seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1787
1788 seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1789 rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1790 seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1791 rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1792 seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1793 rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1794 seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1795 rq_ctx->spb_ena, rq_ctx->wqe_skip);
1796 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1797
1798 seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1799 rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1800 seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1801 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1802 seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1803 rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1804 seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1805 rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1806
1807 seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1808 rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1809 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1810 rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1811 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1812 rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1813 seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1814
1815 seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1816 rq_ctx->flow_tagw, rq_ctx->bad_utag);
1817 seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1818 rq_ctx->good_utag, rq_ctx->ltag);
1819
1820 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1821 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1822 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1823 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1824 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1825 }
1826
1827 /* Dumps given nix_cq's context */
print_nix_cq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)1828 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1829 {
1830 struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1831
1832 seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1833
1834 seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1835 seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1836 cq_ctx->avg_con, cq_ctx->cint_idx);
1837 seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1838 cq_ctx->cq_err, cq_ctx->qint_idx);
1839 seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1840 cq_ctx->bpid, cq_ctx->bp_ena);
1841
1842 seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1843 cq_ctx->update_time, cq_ctx->avg_level);
1844 seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1845 cq_ctx->head, cq_ctx->tail);
1846
1847 seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1848 cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1849 seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1850 cq_ctx->qsize, cq_ctx->caching);
1851 seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1852 cq_ctx->substream, cq_ctx->ena);
1853 seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1854 cq_ctx->drop_ena, cq_ctx->drop);
1855 seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1856 }
1857
rvu_dbg_nix_queue_ctx_display(struct seq_file * filp,void * unused,int ctype)1858 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1859 void *unused, int ctype)
1860 {
1861 void (*print_nix_ctx)(struct seq_file *filp,
1862 struct nix_aq_enq_rsp *rsp) = NULL;
1863 struct nix_hw *nix_hw = filp->private;
1864 struct rvu *rvu = nix_hw->rvu;
1865 struct nix_aq_enq_req aq_req;
1866 struct nix_aq_enq_rsp rsp;
1867 char *ctype_string = NULL;
1868 int qidx, rc, max_id = 0;
1869 struct rvu_pfvf *pfvf;
1870 int nixlf, id, all;
1871 u16 pcifunc;
1872
1873 switch (ctype) {
1874 case NIX_AQ_CTYPE_CQ:
1875 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1876 id = rvu->rvu_dbg.nix_cq_ctx.id;
1877 all = rvu->rvu_dbg.nix_cq_ctx.all;
1878 break;
1879
1880 case NIX_AQ_CTYPE_SQ:
1881 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1882 id = rvu->rvu_dbg.nix_sq_ctx.id;
1883 all = rvu->rvu_dbg.nix_sq_ctx.all;
1884 break;
1885
1886 case NIX_AQ_CTYPE_RQ:
1887 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1888 id = rvu->rvu_dbg.nix_rq_ctx.id;
1889 all = rvu->rvu_dbg.nix_rq_ctx.all;
1890 break;
1891
1892 default:
1893 return -EINVAL;
1894 }
1895
1896 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1897 return -EINVAL;
1898
1899 pfvf = rvu_get_pfvf(rvu, pcifunc);
1900 if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1901 seq_puts(filp, "SQ context is not initialized\n");
1902 return -EINVAL;
1903 } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1904 seq_puts(filp, "RQ context is not initialized\n");
1905 return -EINVAL;
1906 } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1907 seq_puts(filp, "CQ context is not initialized\n");
1908 return -EINVAL;
1909 }
1910
1911 if (ctype == NIX_AQ_CTYPE_SQ) {
1912 max_id = pfvf->sq_ctx->qsize;
1913 ctype_string = "sq";
1914 print_nix_ctx = print_nix_sq_ctx;
1915 } else if (ctype == NIX_AQ_CTYPE_RQ) {
1916 max_id = pfvf->rq_ctx->qsize;
1917 ctype_string = "rq";
1918 print_nix_ctx = print_nix_rq_ctx;
1919 } else if (ctype == NIX_AQ_CTYPE_CQ) {
1920 max_id = pfvf->cq_ctx->qsize;
1921 ctype_string = "cq";
1922 print_nix_ctx = print_nix_cq_ctx;
1923 }
1924
1925 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1926 aq_req.hdr.pcifunc = pcifunc;
1927 aq_req.ctype = ctype;
1928 aq_req.op = NIX_AQ_INSTOP_READ;
1929 if (all)
1930 id = 0;
1931 else
1932 max_id = id + 1;
1933 for (qidx = id; qidx < max_id; qidx++) {
1934 aq_req.qidx = qidx;
1935 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1936 ctype_string, nixlf, aq_req.qidx);
1937 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1938 if (rc) {
1939 seq_puts(filp, "Failed to read the context\n");
1940 return -EINVAL;
1941 }
1942 print_nix_ctx(filp, &rsp);
1943 }
1944 return 0;
1945 }
1946
write_nix_queue_ctx(struct rvu * rvu,bool all,int nixlf,int id,int ctype,char * ctype_string,struct seq_file * m)1947 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1948 int id, int ctype, char *ctype_string,
1949 struct seq_file *m)
1950 {
1951 struct nix_hw *nix_hw = m->private;
1952 struct rvu_pfvf *pfvf;
1953 int max_id = 0;
1954 u16 pcifunc;
1955
1956 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1957 return -EINVAL;
1958
1959 pfvf = rvu_get_pfvf(rvu, pcifunc);
1960
1961 if (ctype == NIX_AQ_CTYPE_SQ) {
1962 if (!pfvf->sq_ctx) {
1963 dev_warn(rvu->dev, "SQ context is not initialized\n");
1964 return -EINVAL;
1965 }
1966 max_id = pfvf->sq_ctx->qsize;
1967 } else if (ctype == NIX_AQ_CTYPE_RQ) {
1968 if (!pfvf->rq_ctx) {
1969 dev_warn(rvu->dev, "RQ context is not initialized\n");
1970 return -EINVAL;
1971 }
1972 max_id = pfvf->rq_ctx->qsize;
1973 } else if (ctype == NIX_AQ_CTYPE_CQ) {
1974 if (!pfvf->cq_ctx) {
1975 dev_warn(rvu->dev, "CQ context is not initialized\n");
1976 return -EINVAL;
1977 }
1978 max_id = pfvf->cq_ctx->qsize;
1979 }
1980
1981 if (id < 0 || id >= max_id) {
1982 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1983 ctype_string, max_id - 1);
1984 return -EINVAL;
1985 }
1986 switch (ctype) {
1987 case NIX_AQ_CTYPE_CQ:
1988 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1989 rvu->rvu_dbg.nix_cq_ctx.id = id;
1990 rvu->rvu_dbg.nix_cq_ctx.all = all;
1991 break;
1992
1993 case NIX_AQ_CTYPE_SQ:
1994 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1995 rvu->rvu_dbg.nix_sq_ctx.id = id;
1996 rvu->rvu_dbg.nix_sq_ctx.all = all;
1997 break;
1998
1999 case NIX_AQ_CTYPE_RQ:
2000 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
2001 rvu->rvu_dbg.nix_rq_ctx.id = id;
2002 rvu->rvu_dbg.nix_rq_ctx.all = all;
2003 break;
2004 default:
2005 return -EINVAL;
2006 }
2007 return 0;
2008 }
2009
rvu_dbg_nix_queue_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int ctype)2010 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
2011 const char __user *buffer,
2012 size_t count, loff_t *ppos,
2013 int ctype)
2014 {
2015 struct seq_file *m = filp->private_data;
2016 struct nix_hw *nix_hw = m->private;
2017 struct rvu *rvu = nix_hw->rvu;
2018 char *cmd_buf, *ctype_string;
2019 int nixlf, id = 0, ret;
2020 bool all = false;
2021
2022 if ((*ppos != 0) || !count)
2023 return -EINVAL;
2024
2025 switch (ctype) {
2026 case NIX_AQ_CTYPE_SQ:
2027 ctype_string = "sq";
2028 break;
2029 case NIX_AQ_CTYPE_RQ:
2030 ctype_string = "rq";
2031 break;
2032 case NIX_AQ_CTYPE_CQ:
2033 ctype_string = "cq";
2034 break;
2035 default:
2036 return -EINVAL;
2037 }
2038
2039 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
2040
2041 if (!cmd_buf)
2042 return count;
2043
2044 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
2045 &nixlf, &id, &all);
2046 if (ret < 0) {
2047 dev_info(rvu->dev,
2048 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
2049 ctype_string, ctype_string);
2050 goto done;
2051 } else {
2052 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
2053 ctype_string, m);
2054 }
2055 done:
2056 kfree(cmd_buf);
2057 return ret ? ret : count;
2058 }
2059
rvu_dbg_nix_sq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2060 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
2061 const char __user *buffer,
2062 size_t count, loff_t *ppos)
2063 {
2064 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2065 NIX_AQ_CTYPE_SQ);
2066 }
2067
rvu_dbg_nix_sq_ctx_display(struct seq_file * filp,void * unused)2068 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
2069 {
2070 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
2071 }
2072
2073 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
2074
rvu_dbg_nix_rq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2075 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
2076 const char __user *buffer,
2077 size_t count, loff_t *ppos)
2078 {
2079 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2080 NIX_AQ_CTYPE_RQ);
2081 }
2082
rvu_dbg_nix_rq_ctx_display(struct seq_file * filp,void * unused)2083 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused)
2084 {
2085 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ);
2086 }
2087
2088 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
2089
rvu_dbg_nix_cq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2090 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
2091 const char __user *buffer,
2092 size_t count, loff_t *ppos)
2093 {
2094 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2095 NIX_AQ_CTYPE_CQ);
2096 }
2097
rvu_dbg_nix_cq_ctx_display(struct seq_file * filp,void * unused)2098 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
2099 {
2100 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
2101 }
2102
2103 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
2104
print_nix_qctx_qsize(struct seq_file * filp,int qsize,unsigned long * bmap,char * qtype)2105 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
2106 unsigned long *bmap, char *qtype)
2107 {
2108 char *buf;
2109
2110 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2111 if (!buf)
2112 return;
2113
2114 bitmap_print_to_pagebuf(false, buf, bmap, qsize);
2115 seq_printf(filp, "%s context count : %d\n", qtype, qsize);
2116 seq_printf(filp, "%s context ena/dis bitmap : %s\n",
2117 qtype, buf);
2118 kfree(buf);
2119 }
2120
print_nix_qsize(struct seq_file * filp,struct rvu_pfvf * pfvf)2121 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
2122 {
2123 if (!pfvf->cq_ctx)
2124 seq_puts(filp, "cq context is not initialized\n");
2125 else
2126 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
2127 "cq");
2128
2129 if (!pfvf->rq_ctx)
2130 seq_puts(filp, "rq context is not initialized\n");
2131 else
2132 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
2133 "rq");
2134
2135 if (!pfvf->sq_ctx)
2136 seq_puts(filp, "sq context is not initialized\n");
2137 else
2138 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
2139 "sq");
2140 }
2141
rvu_dbg_nix_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2142 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
2143 const char __user *buffer,
2144 size_t count, loff_t *ppos)
2145 {
2146 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
2147 BLKTYPE_NIX);
2148 }
2149
rvu_dbg_nix_qsize_display(struct seq_file * filp,void * unused)2150 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
2151 {
2152 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
2153 }
2154
2155 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
2156
print_band_prof_ctx(struct seq_file * m,struct nix_bandprof_s * prof)2157 static void print_band_prof_ctx(struct seq_file *m,
2158 struct nix_bandprof_s *prof)
2159 {
2160 char *str;
2161
2162 switch (prof->pc_mode) {
2163 case NIX_RX_PC_MODE_VLAN:
2164 str = "VLAN";
2165 break;
2166 case NIX_RX_PC_MODE_DSCP:
2167 str = "DSCP";
2168 break;
2169 case NIX_RX_PC_MODE_GEN:
2170 str = "Generic";
2171 break;
2172 case NIX_RX_PC_MODE_RSVD:
2173 str = "Reserved";
2174 break;
2175 }
2176 seq_printf(m, "W0: pc_mode\t\t%s\n", str);
2177 str = (prof->icolor == 3) ? "Color blind" :
2178 (prof->icolor == 0) ? "Green" :
2179 (prof->icolor == 1) ? "Yellow" : "Red";
2180 seq_printf(m, "W0: icolor\t\t%s\n", str);
2181 seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
2182 seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
2183 seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
2184 seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
2185 seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
2186 seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
2187 seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
2188 seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
2189
2190 seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
2191 str = (prof->lmode == 0) ? "byte" : "packet";
2192 seq_printf(m, "W1: lmode\t\t%s\n", str);
2193 seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
2194 seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
2195 seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
2196 seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
2197 str = (prof->gc_action == 0) ? "PASS" :
2198 (prof->gc_action == 1) ? "DROP" : "RED";
2199 seq_printf(m, "W1: gc_action\t\t%s\n", str);
2200 str = (prof->yc_action == 0) ? "PASS" :
2201 (prof->yc_action == 1) ? "DROP" : "RED";
2202 seq_printf(m, "W1: yc_action\t\t%s\n", str);
2203 str = (prof->rc_action == 0) ? "PASS" :
2204 (prof->rc_action == 1) ? "DROP" : "RED";
2205 seq_printf(m, "W1: rc_action\t\t%s\n", str);
2206 seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
2207 seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
2208 seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
2209
2210 seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
2211 seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
2212 seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
2213 seq_printf(m, "W4: green_pkt_pass\t%lld\n",
2214 (u64)prof->green_pkt_pass);
2215 seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
2216 (u64)prof->yellow_pkt_pass);
2217 seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
2218 seq_printf(m, "W7: green_octs_pass\t%lld\n",
2219 (u64)prof->green_octs_pass);
2220 seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
2221 (u64)prof->yellow_octs_pass);
2222 seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
2223 seq_printf(m, "W10: green_pkt_drop\t%lld\n",
2224 (u64)prof->green_pkt_drop);
2225 seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
2226 (u64)prof->yellow_pkt_drop);
2227 seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
2228 seq_printf(m, "W13: green_octs_drop\t%lld\n",
2229 (u64)prof->green_octs_drop);
2230 seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
2231 (u64)prof->yellow_octs_drop);
2232 seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
2233 seq_puts(m, "==============================\n");
2234 }
2235
rvu_dbg_nix_band_prof_ctx_display(struct seq_file * m,void * unused)2236 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
2237 {
2238 struct nix_hw *nix_hw = m->private;
2239 struct nix_cn10k_aq_enq_req aq_req;
2240 struct nix_cn10k_aq_enq_rsp aq_rsp;
2241 struct rvu *rvu = nix_hw->rvu;
2242 struct nix_ipolicer *ipolicer;
2243 int layer, prof_idx, idx, rc;
2244 u16 pcifunc;
2245 char *str;
2246
2247 /* Ingress policers do not exist on all platforms */
2248 if (!nix_hw->ipolicer)
2249 return 0;
2250
2251 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2252 if (layer == BAND_PROF_INVAL_LAYER)
2253 continue;
2254 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2255 (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
2256
2257 seq_printf(m, "\n%s bandwidth profiles\n", str);
2258 seq_puts(m, "=======================\n");
2259
2260 ipolicer = &nix_hw->ipolicer[layer];
2261
2262 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
2263 if (is_rsrc_free(&ipolicer->band_prof, idx))
2264 continue;
2265
2266 prof_idx = (idx & 0x3FFF) | (layer << 14);
2267 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
2268 0x00, NIX_AQ_CTYPE_BANDPROF,
2269 prof_idx);
2270 if (rc) {
2271 dev_err(rvu->dev,
2272 "%s: Failed to fetch context of %s profile %d, err %d\n",
2273 __func__, str, idx, rc);
2274 return 0;
2275 }
2276 seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
2277 pcifunc = ipolicer->pfvf_map[idx];
2278 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2279 seq_printf(m, "Allocated to :: PF %d\n",
2280 rvu_get_pf(pcifunc));
2281 else
2282 seq_printf(m, "Allocated to :: PF %d VF %d\n",
2283 rvu_get_pf(pcifunc),
2284 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2285 print_band_prof_ctx(m, &aq_rsp.prof);
2286 }
2287 }
2288 return 0;
2289 }
2290
2291 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
2292
rvu_dbg_nix_band_prof_rsrc_display(struct seq_file * m,void * unused)2293 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
2294 {
2295 struct nix_hw *nix_hw = m->private;
2296 struct nix_ipolicer *ipolicer;
2297 int layer;
2298 char *str;
2299
2300 /* Ingress policers do not exist on all platforms */
2301 if (!nix_hw->ipolicer)
2302 return 0;
2303
2304 seq_puts(m, "\nBandwidth profile resource free count\n");
2305 seq_puts(m, "=====================================\n");
2306 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2307 if (layer == BAND_PROF_INVAL_LAYER)
2308 continue;
2309 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2310 (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
2311
2312 ipolicer = &nix_hw->ipolicer[layer];
2313 seq_printf(m, "%s :: Max: %4d Free: %4d\n", str,
2314 ipolicer->band_prof.max,
2315 rvu_rsrc_free_count(&ipolicer->band_prof));
2316 }
2317 seq_puts(m, "=====================================\n");
2318
2319 return 0;
2320 }
2321
2322 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
2323
rvu_dbg_nix_init(struct rvu * rvu,int blkaddr)2324 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
2325 {
2326 struct nix_hw *nix_hw;
2327
2328 if (!is_block_implemented(rvu->hw, blkaddr))
2329 return;
2330
2331 if (blkaddr == BLKADDR_NIX0) {
2332 rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
2333 nix_hw = &rvu->hw->nix[0];
2334 } else {
2335 rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
2336 rvu->rvu_dbg.root);
2337 nix_hw = &rvu->hw->nix[1];
2338 }
2339
2340 debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2341 &rvu_dbg_nix_sq_ctx_fops);
2342 debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2343 &rvu_dbg_nix_rq_ctx_fops);
2344 debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2345 &rvu_dbg_nix_cq_ctx_fops);
2346 debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2347 &rvu_dbg_nix_ndc_tx_cache_fops);
2348 debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2349 &rvu_dbg_nix_ndc_rx_cache_fops);
2350 debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2351 &rvu_dbg_nix_ndc_tx_hits_miss_fops);
2352 debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2353 &rvu_dbg_nix_ndc_rx_hits_miss_fops);
2354 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
2355 &rvu_dbg_nix_qsize_fops);
2356 debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2357 &rvu_dbg_nix_band_prof_ctx_fops);
2358 debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
2359 &rvu_dbg_nix_band_prof_rsrc_fops);
2360 }
2361
rvu_dbg_npa_init(struct rvu * rvu)2362 static void rvu_dbg_npa_init(struct rvu *rvu)
2363 {
2364 rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2365
2366 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2367 &rvu_dbg_npa_qsize_fops);
2368 debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2369 &rvu_dbg_npa_aura_ctx_fops);
2370 debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2371 &rvu_dbg_npa_pool_ctx_fops);
2372 debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2373 &rvu_dbg_npa_ndc_cache_fops);
2374 debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2375 &rvu_dbg_npa_ndc_hits_miss_fops);
2376 }
2377
2378 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
2379 ({ \
2380 u64 cnt; \
2381 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
2382 NIX_STATS_RX, &(cnt)); \
2383 if (!err) \
2384 seq_printf(s, "%s: %llu\n", name, cnt); \
2385 cnt; \
2386 })
2387
2388 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \
2389 ({ \
2390 u64 cnt; \
2391 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
2392 NIX_STATS_TX, &(cnt)); \
2393 if (!err) \
2394 seq_printf(s, "%s: %llu\n", name, cnt); \
2395 cnt; \
2396 })
2397
cgx_print_stats(struct seq_file * s,int lmac_id)2398 static int cgx_print_stats(struct seq_file *s, int lmac_id)
2399 {
2400 struct cgx_link_user_info linfo;
2401 struct mac_ops *mac_ops;
2402 void *cgxd = s->private;
2403 u64 ucast, mcast, bcast;
2404 int stat = 0, err = 0;
2405 u64 tx_stat, rx_stat;
2406 struct rvu *rvu;
2407
2408 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2409 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2410 if (!rvu)
2411 return -ENODEV;
2412
2413 mac_ops = get_mac_ops(cgxd);
2414 /* There can be no CGX devices at all */
2415 if (!mac_ops)
2416 return 0;
2417
2418 /* Link status */
2419 seq_puts(s, "\n=======Link Status======\n\n");
2420 err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2421 if (err)
2422 seq_puts(s, "Failed to read link status\n");
2423 seq_printf(s, "\nLink is %s %d Mbps\n\n",
2424 linfo.link_up ? "UP" : "DOWN", linfo.speed);
2425
2426 /* Rx stats */
2427 seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2428 mac_ops->name);
2429 ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2430 if (err)
2431 return err;
2432 mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2433 if (err)
2434 return err;
2435 bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2436 if (err)
2437 return err;
2438 seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2439 PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2440 if (err)
2441 return err;
2442 PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2443 if (err)
2444 return err;
2445 PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2446 if (err)
2447 return err;
2448
2449 /* Tx stats */
2450 seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2451 mac_ops->name);
2452 ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2453 if (err)
2454 return err;
2455 mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2456 if (err)
2457 return err;
2458 bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2459 if (err)
2460 return err;
2461 seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2462 PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2463 if (err)
2464 return err;
2465 PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2466 if (err)
2467 return err;
2468
2469 /* Rx stats */
2470 seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2471 while (stat < mac_ops->rx_stats_cnt) {
2472 err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2473 if (err)
2474 return err;
2475 if (is_rvu_otx2(rvu))
2476 seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2477 rx_stat);
2478 else
2479 seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2480 rx_stat);
2481 stat++;
2482 }
2483
2484 /* Tx stats */
2485 stat = 0;
2486 seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2487 while (stat < mac_ops->tx_stats_cnt) {
2488 err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2489 if (err)
2490 return err;
2491
2492 if (is_rvu_otx2(rvu))
2493 seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2494 tx_stat);
2495 else
2496 seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2497 tx_stat);
2498 stat++;
2499 }
2500
2501 return err;
2502 }
2503
rvu_dbg_derive_lmacid(struct seq_file * filp,int * lmac_id)2504 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
2505 {
2506 struct dentry *current_dir;
2507 char *buf;
2508
2509 current_dir = filp->file->f_path.dentry->d_parent;
2510 buf = strrchr(current_dir->d_name.name, 'c');
2511 if (!buf)
2512 return -EINVAL;
2513
2514 return kstrtoint(buf + 1, 10, lmac_id);
2515 }
2516
rvu_dbg_cgx_stat_display(struct seq_file * filp,void * unused)2517 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
2518 {
2519 int lmac_id, err;
2520
2521 err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2522 if (!err)
2523 return cgx_print_stats(filp, lmac_id);
2524
2525 return err;
2526 }
2527
2528 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2529
cgx_print_dmac_flt(struct seq_file * s,int lmac_id)2530 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2531 {
2532 struct pci_dev *pdev = NULL;
2533 void *cgxd = s->private;
2534 char *bcast, *mcast;
2535 u16 index, domain;
2536 u8 dmac[ETH_ALEN];
2537 struct rvu *rvu;
2538 u64 cfg, mac;
2539 int pf;
2540
2541 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2542 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2543 if (!rvu)
2544 return -ENODEV;
2545
2546 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2547 domain = 2;
2548
2549 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2550 if (!pdev)
2551 return 0;
2552
2553 cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2554 bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2555 mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2556
2557 seq_puts(s,
2558 "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
2559 seq_printf(s, "%s PF%d %9s %9s",
2560 dev_name(&pdev->dev), pf, bcast, mcast);
2561 if (cfg & CGX_DMAC_CAM_ACCEPT)
2562 seq_printf(s, "%12s\n\n", "UNICAST");
2563 else
2564 seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2565
2566 seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
2567
2568 for (index = 0 ; index < 32 ; index++) {
2569 cfg = cgx_read_dmac_entry(cgxd, index);
2570 /* Display enabled dmac entries associated with current lmac */
2571 if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2572 FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2573 mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2574 u64_to_ether_addr(mac, dmac);
2575 seq_printf(s, "%7d %pM\n", index, dmac);
2576 }
2577 }
2578
2579 pci_dev_put(pdev);
2580 return 0;
2581 }
2582
rvu_dbg_cgx_dmac_flt_display(struct seq_file * filp,void * unused)2583 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2584 {
2585 int err, lmac_id;
2586
2587 err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2588 if (!err)
2589 return cgx_print_dmac_flt(filp, lmac_id);
2590
2591 return err;
2592 }
2593
2594 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2595
rvu_dbg_cgx_init(struct rvu * rvu)2596 static void rvu_dbg_cgx_init(struct rvu *rvu)
2597 {
2598 struct mac_ops *mac_ops;
2599 unsigned long lmac_bmap;
2600 int i, lmac_id;
2601 char dname[20];
2602 void *cgx;
2603
2604 if (!cgx_get_cgxcnt_max())
2605 return;
2606
2607 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2608 if (!mac_ops)
2609 return;
2610
2611 rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2612 rvu->rvu_dbg.root);
2613
2614 for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2615 cgx = rvu_cgx_pdata(i, rvu);
2616 if (!cgx)
2617 continue;
2618 lmac_bmap = cgx_get_lmac_bmap(cgx);
2619 /* cgx debugfs dir */
2620 sprintf(dname, "%s%d", mac_ops->name, i);
2621 rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2622 rvu->rvu_dbg.cgx_root);
2623
2624 for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
2625 /* lmac debugfs dir */
2626 sprintf(dname, "lmac%d", lmac_id);
2627 rvu->rvu_dbg.lmac =
2628 debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2629
2630 debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2631 cgx, &rvu_dbg_cgx_stat_fops);
2632 debugfs_create_file("mac_filter", 0600,
2633 rvu->rvu_dbg.lmac, cgx,
2634 &rvu_dbg_cgx_dmac_flt_fops);
2635 }
2636 }
2637 }
2638
2639 /* NPC debugfs APIs */
rvu_print_npc_mcam_info(struct seq_file * s,u16 pcifunc,int blkaddr)2640 static void rvu_print_npc_mcam_info(struct seq_file *s,
2641 u16 pcifunc, int blkaddr)
2642 {
2643 struct rvu *rvu = s->private;
2644 int entry_acnt, entry_ecnt;
2645 int cntr_acnt, cntr_ecnt;
2646
2647 rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2648 &entry_acnt, &entry_ecnt);
2649 rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2650 &cntr_acnt, &cntr_ecnt);
2651 if (!entry_acnt && !cntr_acnt)
2652 return;
2653
2654 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2655 seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2656 rvu_get_pf(pcifunc));
2657 else
2658 seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2659 rvu_get_pf(pcifunc),
2660 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2661
2662 if (entry_acnt) {
2663 seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2664 seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2665 }
2666 if (cntr_acnt) {
2667 seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2668 seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2669 }
2670 }
2671
rvu_dbg_npc_mcam_info_display(struct seq_file * filp,void * unsued)2672 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2673 {
2674 struct rvu *rvu = filp->private;
2675 int pf, vf, numvfs, blkaddr;
2676 struct npc_mcam *mcam;
2677 u16 pcifunc, counters;
2678 u64 cfg;
2679
2680 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2681 if (blkaddr < 0)
2682 return -ENODEV;
2683
2684 mcam = &rvu->hw->mcam;
2685 counters = rvu->hw->npc_counters;
2686
2687 seq_puts(filp, "\nNPC MCAM info:\n");
2688 /* MCAM keywidth on receive and transmit sides */
2689 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2690 cfg = (cfg >> 32) & 0x07;
2691 seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2692 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2693 "224bits" : "448bits"));
2694 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2695 cfg = (cfg >> 32) & 0x07;
2696 seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2697 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2698 "224bits" : "448bits"));
2699
2700 mutex_lock(&mcam->lock);
2701 /* MCAM entries */
2702 seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2703 seq_printf(filp, "\t\t Reserved \t: %d\n",
2704 mcam->total_entries - mcam->bmap_entries);
2705 seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2706
2707 /* MCAM counters */
2708 seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2709 seq_printf(filp, "\t\t Reserved \t: %d\n",
2710 counters - mcam->counters.max);
2711 seq_printf(filp, "\t\t Available \t: %d\n",
2712 rvu_rsrc_free_count(&mcam->counters));
2713
2714 if (mcam->bmap_entries == mcam->bmap_fcnt) {
2715 mutex_unlock(&mcam->lock);
2716 return 0;
2717 }
2718
2719 seq_puts(filp, "\n\t\t Current allocation\n");
2720 seq_puts(filp, "\t\t====================\n");
2721 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2722 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2723 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2724
2725 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2726 numvfs = (cfg >> 12) & 0xFF;
2727 for (vf = 0; vf < numvfs; vf++) {
2728 pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2729 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2730 }
2731 }
2732
2733 mutex_unlock(&mcam->lock);
2734 return 0;
2735 }
2736
2737 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2738
rvu_dbg_npc_rx_miss_stats_display(struct seq_file * filp,void * unused)2739 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2740 void *unused)
2741 {
2742 struct rvu *rvu = filp->private;
2743 struct npc_mcam *mcam;
2744 int blkaddr;
2745
2746 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2747 if (blkaddr < 0)
2748 return -ENODEV;
2749
2750 mcam = &rvu->hw->mcam;
2751
2752 seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2753 seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2754 rvu_read64(rvu, blkaddr,
2755 NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2756
2757 return 0;
2758 }
2759
2760 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2761
rvu_dbg_npc_mcam_show_flows(struct seq_file * s,struct rvu_npc_mcam_rule * rule)2762 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2763 struct rvu_npc_mcam_rule *rule)
2764 {
2765 u8 bit;
2766
2767 for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2768 seq_printf(s, "\t%s ", npc_get_field_name(bit));
2769 switch (bit) {
2770 case NPC_LXMB:
2771 if (rule->lxmb == 1)
2772 seq_puts(s, "\tL2M nibble is set\n");
2773 else
2774 seq_puts(s, "\tL2B nibble is set\n");
2775 break;
2776 case NPC_DMAC:
2777 seq_printf(s, "%pM ", rule->packet.dmac);
2778 seq_printf(s, "mask %pM\n", rule->mask.dmac);
2779 break;
2780 case NPC_SMAC:
2781 seq_printf(s, "%pM ", rule->packet.smac);
2782 seq_printf(s, "mask %pM\n", rule->mask.smac);
2783 break;
2784 case NPC_ETYPE:
2785 seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2786 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2787 break;
2788 case NPC_OUTER_VID:
2789 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2790 seq_printf(s, "mask 0x%x\n",
2791 ntohs(rule->mask.vlan_tci));
2792 break;
2793 case NPC_INNER_VID:
2794 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_itci));
2795 seq_printf(s, "mask 0x%x\n",
2796 ntohs(rule->mask.vlan_itci));
2797 break;
2798 case NPC_TOS:
2799 seq_printf(s, "%d ", rule->packet.tos);
2800 seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2801 break;
2802 case NPC_SIP_IPV4:
2803 seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2804 seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2805 break;
2806 case NPC_DIP_IPV4:
2807 seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2808 seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2809 break;
2810 case NPC_SIP_IPV6:
2811 seq_printf(s, "%pI6 ", rule->packet.ip6src);
2812 seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2813 break;
2814 case NPC_DIP_IPV6:
2815 seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2816 seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2817 break;
2818 case NPC_IPFRAG_IPV6:
2819 seq_printf(s, "0x%x ", rule->packet.next_header);
2820 seq_printf(s, "mask 0x%x\n", rule->mask.next_header);
2821 break;
2822 case NPC_IPFRAG_IPV4:
2823 seq_printf(s, "0x%x ", rule->packet.ip_flag);
2824 seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag);
2825 break;
2826 case NPC_SPORT_TCP:
2827 case NPC_SPORT_UDP:
2828 case NPC_SPORT_SCTP:
2829 seq_printf(s, "%d ", ntohs(rule->packet.sport));
2830 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2831 break;
2832 case NPC_DPORT_TCP:
2833 case NPC_DPORT_UDP:
2834 case NPC_DPORT_SCTP:
2835 seq_printf(s, "%d ", ntohs(rule->packet.dport));
2836 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2837 break;
2838 case NPC_IPSEC_SPI:
2839 seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
2840 seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
2841 break;
2842 default:
2843 seq_puts(s, "\n");
2844 break;
2845 }
2846 }
2847 }
2848
rvu_dbg_npc_mcam_show_action(struct seq_file * s,struct rvu_npc_mcam_rule * rule)2849 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2850 struct rvu_npc_mcam_rule *rule)
2851 {
2852 if (is_npc_intf_tx(rule->intf)) {
2853 switch (rule->tx_action.op) {
2854 case NIX_TX_ACTIONOP_DROP:
2855 seq_puts(s, "\taction: Drop\n");
2856 break;
2857 case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2858 seq_puts(s, "\taction: Unicast to default channel\n");
2859 break;
2860 case NIX_TX_ACTIONOP_UCAST_CHAN:
2861 seq_printf(s, "\taction: Unicast to channel %d\n",
2862 rule->tx_action.index);
2863 break;
2864 case NIX_TX_ACTIONOP_MCAST:
2865 seq_puts(s, "\taction: Multicast\n");
2866 break;
2867 case NIX_TX_ACTIONOP_DROP_VIOL:
2868 seq_puts(s, "\taction: Lockdown Violation Drop\n");
2869 break;
2870 default:
2871 break;
2872 }
2873 } else {
2874 switch (rule->rx_action.op) {
2875 case NIX_RX_ACTIONOP_DROP:
2876 seq_puts(s, "\taction: Drop\n");
2877 break;
2878 case NIX_RX_ACTIONOP_UCAST:
2879 seq_printf(s, "\taction: Direct to queue %d\n",
2880 rule->rx_action.index);
2881 break;
2882 case NIX_RX_ACTIONOP_RSS:
2883 seq_puts(s, "\taction: RSS\n");
2884 break;
2885 case NIX_RX_ACTIONOP_UCAST_IPSEC:
2886 seq_puts(s, "\taction: Unicast ipsec\n");
2887 break;
2888 case NIX_RX_ACTIONOP_MCAST:
2889 seq_puts(s, "\taction: Multicast\n");
2890 break;
2891 default:
2892 break;
2893 }
2894 }
2895 }
2896
rvu_dbg_get_intf_name(int intf)2897 static const char *rvu_dbg_get_intf_name(int intf)
2898 {
2899 switch (intf) {
2900 case NIX_INTFX_RX(0):
2901 return "NIX0_RX";
2902 case NIX_INTFX_RX(1):
2903 return "NIX1_RX";
2904 case NIX_INTFX_TX(0):
2905 return "NIX0_TX";
2906 case NIX_INTFX_TX(1):
2907 return "NIX1_TX";
2908 default:
2909 break;
2910 }
2911
2912 return "unknown";
2913 }
2914
rvu_dbg_npc_mcam_show_rules(struct seq_file * s,void * unused)2915 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2916 {
2917 struct rvu_npc_mcam_rule *iter;
2918 struct rvu *rvu = s->private;
2919 struct npc_mcam *mcam;
2920 int pf, vf = -1;
2921 bool enabled;
2922 int blkaddr;
2923 u16 target;
2924 u64 hits;
2925
2926 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2927 if (blkaddr < 0)
2928 return 0;
2929
2930 mcam = &rvu->hw->mcam;
2931
2932 mutex_lock(&mcam->lock);
2933 list_for_each_entry(iter, &mcam->mcam_rules, list) {
2934 pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2935 seq_printf(s, "\n\tInstalled by: PF%d ", pf);
2936
2937 if (iter->owner & RVU_PFVF_FUNC_MASK) {
2938 vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
2939 seq_printf(s, "VF%d", vf);
2940 }
2941 seq_puts(s, "\n");
2942
2943 seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
2944 "RX" : "TX");
2945 seq_printf(s, "\tinterface: %s\n",
2946 rvu_dbg_get_intf_name(iter->intf));
2947 seq_printf(s, "\tmcam entry: %d\n", iter->entry);
2948
2949 rvu_dbg_npc_mcam_show_flows(s, iter);
2950 if (is_npc_intf_rx(iter->intf)) {
2951 target = iter->rx_action.pf_func;
2952 pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2953 seq_printf(s, "\tForward to: PF%d ", pf);
2954
2955 if (target & RVU_PFVF_FUNC_MASK) {
2956 vf = (target & RVU_PFVF_FUNC_MASK) - 1;
2957 seq_printf(s, "VF%d", vf);
2958 }
2959 seq_puts(s, "\n");
2960 seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
2961 seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
2962 }
2963
2964 rvu_dbg_npc_mcam_show_action(s, iter);
2965
2966 enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
2967 seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
2968
2969 if (!iter->has_cntr)
2970 continue;
2971 seq_printf(s, "\tcounter: %d\n", iter->cntr);
2972
2973 hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
2974 seq_printf(s, "\thits: %lld\n", hits);
2975 }
2976 mutex_unlock(&mcam->lock);
2977
2978 return 0;
2979 }
2980
2981 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
2982
rvu_dbg_npc_exact_show_entries(struct seq_file * s,void * unused)2983 static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
2984 {
2985 struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
2986 struct npc_exact_table_entry *cam_entry;
2987 struct npc_exact_table *table;
2988 struct rvu *rvu = s->private;
2989 int i, j;
2990
2991 u8 bitmap = 0;
2992
2993 table = rvu->hw->table;
2994
2995 mutex_lock(&table->lock);
2996
2997 /* Check if there is at least one entry in mem table */
2998 if (!table->mem_tbl_entry_cnt)
2999 goto dump_cam_table;
3000
3001 /* Print table headers */
3002 seq_puts(s, "\n\tExact Match MEM Table\n");
3003 seq_puts(s, "Index\t");
3004
3005 for (i = 0; i < table->mem_table.ways; i++) {
3006 mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
3007 struct npc_exact_table_entry, list);
3008
3009 seq_printf(s, "Way-%d\t\t\t\t\t", i);
3010 }
3011
3012 seq_puts(s, "\n");
3013 for (i = 0; i < table->mem_table.ways; i++)
3014 seq_puts(s, "\tChan MAC \t");
3015
3016 seq_puts(s, "\n\n");
3017
3018 /* Print mem table entries */
3019 for (i = 0; i < table->mem_table.depth; i++) {
3020 bitmap = 0;
3021 for (j = 0; j < table->mem_table.ways; j++) {
3022 if (!mem_entry[j])
3023 continue;
3024
3025 if (mem_entry[j]->index != i)
3026 continue;
3027
3028 bitmap |= BIT(j);
3029 }
3030
3031 /* No valid entries */
3032 if (!bitmap)
3033 continue;
3034
3035 seq_printf(s, "%d\t", i);
3036 for (j = 0; j < table->mem_table.ways; j++) {
3037 if (!(bitmap & BIT(j))) {
3038 seq_puts(s, "nil\t\t\t\t\t");
3039 continue;
3040 }
3041
3042 seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
3043 mem_entry[j]->mac);
3044 mem_entry[j] = list_next_entry(mem_entry[j], list);
3045 }
3046 seq_puts(s, "\n");
3047 }
3048
3049 dump_cam_table:
3050
3051 if (!table->cam_tbl_entry_cnt)
3052 goto done;
3053
3054 seq_puts(s, "\n\tExact Match CAM Table\n");
3055 seq_puts(s, "index\tchan\tMAC\n");
3056
3057 /* Traverse cam table entries */
3058 list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
3059 seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
3060 cam_entry->mac);
3061 }
3062
3063 done:
3064 mutex_unlock(&table->lock);
3065 return 0;
3066 }
3067
3068 RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
3069
rvu_dbg_npc_exact_show_info(struct seq_file * s,void * unused)3070 static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
3071 {
3072 struct npc_exact_table *table;
3073 struct rvu *rvu = s->private;
3074 int i;
3075
3076 table = rvu->hw->table;
3077
3078 seq_puts(s, "\n\tExact Table Info\n");
3079 seq_printf(s, "Exact Match Feature : %s\n",
3080 rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
3081 if (!rvu->hw->cap.npc_exact_match_enabled)
3082 return 0;
3083
3084 seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
3085 for (i = 0; i < table->num_drop_rules; i++)
3086 seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
3087
3088 seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
3089 for (i = 0; i < table->num_drop_rules; i++)
3090 seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
3091
3092 seq_puts(s, "\n\tMEM Table Info\n");
3093 seq_printf(s, "Ways : %d\n", table->mem_table.ways);
3094 seq_printf(s, "Depth : %d\n", table->mem_table.depth);
3095 seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
3096 seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
3097 seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
3098
3099 seq_puts(s, "\n\tCAM Table Info\n");
3100 seq_printf(s, "Depth : %d\n", table->cam_table.depth);
3101
3102 return 0;
3103 }
3104
3105 RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
3106
rvu_dbg_npc_exact_drop_cnt(struct seq_file * s,void * unused)3107 static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
3108 {
3109 struct npc_exact_table *table;
3110 struct rvu *rvu = s->private;
3111 struct npc_key_field *field;
3112 u16 chan, pcifunc;
3113 int blkaddr, i;
3114 u64 cfg, cam1;
3115 char *str;
3116
3117 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3118 table = rvu->hw->table;
3119
3120 field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
3121
3122 seq_puts(s, "\n\t Exact Hit on drop status\n");
3123 seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
3124
3125 for (i = 0; i < table->num_drop_rules; i++) {
3126 pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
3127 cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
3128
3129 /* channel will be always in keyword 0 */
3130 cam1 = rvu_read64(rvu, blkaddr,
3131 NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
3132 chan = field->kw_mask[0] & cam1;
3133
3134 str = (cfg & 1) ? "enabled" : "disabled";
3135
3136 seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
3137 rvu_read64(rvu, blkaddr,
3138 NPC_AF_MATCH_STATX(table->counter_idx[i])),
3139 chan, str);
3140 }
3141
3142 return 0;
3143 }
3144
3145 RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
3146
rvu_dbg_npc_init(struct rvu * rvu)3147 static void rvu_dbg_npc_init(struct rvu *rvu)
3148 {
3149 rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
3150
3151 debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
3152 &rvu_dbg_npc_mcam_info_fops);
3153 debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
3154 &rvu_dbg_npc_mcam_rules_fops);
3155
3156 debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
3157 &rvu_dbg_npc_rx_miss_act_fops);
3158
3159 if (!rvu->hw->cap.npc_exact_match_enabled)
3160 return;
3161
3162 debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
3163 &rvu_dbg_npc_exact_entries_fops);
3164
3165 debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
3166 &rvu_dbg_npc_exact_info_fops);
3167
3168 debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
3169 &rvu_dbg_npc_exact_drop_cnt_fops);
3170
3171 }
3172
cpt_eng_sts_display(struct seq_file * filp,u8 eng_type)3173 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
3174 {
3175 struct cpt_ctx *ctx = filp->private;
3176 u64 busy_sts = 0, free_sts = 0;
3177 u32 e_min = 0, e_max = 0, e, i;
3178 u16 max_ses, max_ies, max_aes;
3179 struct rvu *rvu = ctx->rvu;
3180 int blkaddr = ctx->blkaddr;
3181 u64 reg;
3182
3183 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3184 max_ses = reg & 0xffff;
3185 max_ies = (reg >> 16) & 0xffff;
3186 max_aes = (reg >> 32) & 0xffff;
3187
3188 switch (eng_type) {
3189 case CPT_AE_TYPE:
3190 e_min = max_ses + max_ies;
3191 e_max = max_ses + max_ies + max_aes;
3192 break;
3193 case CPT_SE_TYPE:
3194 e_min = 0;
3195 e_max = max_ses;
3196 break;
3197 case CPT_IE_TYPE:
3198 e_min = max_ses;
3199 e_max = max_ses + max_ies;
3200 break;
3201 default:
3202 return -EINVAL;
3203 }
3204
3205 for (e = e_min, i = 0; e < e_max; e++, i++) {
3206 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
3207 if (reg & 0x1)
3208 busy_sts |= 1ULL << i;
3209
3210 if (reg & 0x2)
3211 free_sts |= 1ULL << i;
3212 }
3213 seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
3214 seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
3215
3216 return 0;
3217 }
3218
rvu_dbg_cpt_ae_sts_display(struct seq_file * filp,void * unused)3219 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
3220 {
3221 return cpt_eng_sts_display(filp, CPT_AE_TYPE);
3222 }
3223
3224 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
3225
rvu_dbg_cpt_se_sts_display(struct seq_file * filp,void * unused)3226 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
3227 {
3228 return cpt_eng_sts_display(filp, CPT_SE_TYPE);
3229 }
3230
3231 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
3232
rvu_dbg_cpt_ie_sts_display(struct seq_file * filp,void * unused)3233 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
3234 {
3235 return cpt_eng_sts_display(filp, CPT_IE_TYPE);
3236 }
3237
3238 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
3239
rvu_dbg_cpt_engines_info_display(struct seq_file * filp,void * unused)3240 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
3241 {
3242 struct cpt_ctx *ctx = filp->private;
3243 u16 max_ses, max_ies, max_aes;
3244 struct rvu *rvu = ctx->rvu;
3245 int blkaddr = ctx->blkaddr;
3246 u32 e_max, e;
3247 u64 reg;
3248
3249 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3250 max_ses = reg & 0xffff;
3251 max_ies = (reg >> 16) & 0xffff;
3252 max_aes = (reg >> 32) & 0xffff;
3253
3254 e_max = max_ses + max_ies + max_aes;
3255
3256 seq_puts(filp, "===========================================\n");
3257 for (e = 0; e < e_max; e++) {
3258 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
3259 seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e,
3260 reg & 0xff);
3261 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
3262 seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e,
3263 reg);
3264 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
3265 seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e,
3266 reg);
3267 seq_puts(filp, "===========================================\n");
3268 }
3269 return 0;
3270 }
3271
3272 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
3273
rvu_dbg_cpt_lfs_info_display(struct seq_file * filp,void * unused)3274 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
3275 {
3276 struct cpt_ctx *ctx = filp->private;
3277 int blkaddr = ctx->blkaddr;
3278 struct rvu *rvu = ctx->rvu;
3279 struct rvu_block *block;
3280 struct rvu_hwinfo *hw;
3281 u64 reg;
3282 u32 lf;
3283
3284 hw = rvu->hw;
3285 block = &hw->block[blkaddr];
3286 if (!block->lf.bmap)
3287 return -ENODEV;
3288
3289 seq_puts(filp, "===========================================\n");
3290 for (lf = 0; lf < block->lf.max; lf++) {
3291 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
3292 seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg);
3293 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
3294 seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg);
3295 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
3296 seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg);
3297 reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
3298 (lf << block->lfshift));
3299 seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg);
3300 seq_puts(filp, "===========================================\n");
3301 }
3302 return 0;
3303 }
3304
3305 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
3306
rvu_dbg_cpt_err_info_display(struct seq_file * filp,void * unused)3307 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
3308 {
3309 struct cpt_ctx *ctx = filp->private;
3310 struct rvu *rvu = ctx->rvu;
3311 int blkaddr = ctx->blkaddr;
3312 u64 reg0, reg1;
3313
3314 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
3315 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
3316 seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1);
3317 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
3318 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
3319 seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1);
3320 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
3321 seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0);
3322 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
3323 seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0);
3324 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
3325 seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0);
3326 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
3327 seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0);
3328
3329 return 0;
3330 }
3331
3332 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
3333
rvu_dbg_cpt_pc_display(struct seq_file * filp,void * unused)3334 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
3335 {
3336 struct cpt_ctx *ctx = filp->private;
3337 struct rvu *rvu = ctx->rvu;
3338 int blkaddr = ctx->blkaddr;
3339 u64 reg;
3340
3341 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
3342 seq_printf(filp, "CPT instruction requests %llu\n", reg);
3343 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
3344 seq_printf(filp, "CPT instruction latency %llu\n", reg);
3345 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
3346 seq_printf(filp, "CPT NCB read requests %llu\n", reg);
3347 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
3348 seq_printf(filp, "CPT NCB read latency %llu\n", reg);
3349 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
3350 seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg);
3351 reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
3352 seq_printf(filp, "CPT active cycles pc %llu\n", reg);
3353 reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
3354 seq_printf(filp, "CPT clock count pc %llu\n", reg);
3355
3356 return 0;
3357 }
3358
3359 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
3360
rvu_dbg_cpt_init(struct rvu * rvu,int blkaddr)3361 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
3362 {
3363 struct cpt_ctx *ctx;
3364
3365 if (!is_block_implemented(rvu->hw, blkaddr))
3366 return;
3367
3368 if (blkaddr == BLKADDR_CPT0) {
3369 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
3370 ctx = &rvu->rvu_dbg.cpt_ctx[0];
3371 ctx->blkaddr = BLKADDR_CPT0;
3372 ctx->rvu = rvu;
3373 } else {
3374 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
3375 rvu->rvu_dbg.root);
3376 ctx = &rvu->rvu_dbg.cpt_ctx[1];
3377 ctx->blkaddr = BLKADDR_CPT1;
3378 ctx->rvu = rvu;
3379 }
3380
3381 debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
3382 &rvu_dbg_cpt_pc_fops);
3383 debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3384 &rvu_dbg_cpt_ae_sts_fops);
3385 debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3386 &rvu_dbg_cpt_se_sts_fops);
3387 debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3388 &rvu_dbg_cpt_ie_sts_fops);
3389 debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
3390 &rvu_dbg_cpt_engines_info_fops);
3391 debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
3392 &rvu_dbg_cpt_lfs_info_fops);
3393 debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
3394 &rvu_dbg_cpt_err_info_fops);
3395 }
3396
rvu_get_dbg_dir_name(struct rvu * rvu)3397 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
3398 {
3399 if (!is_rvu_otx2(rvu))
3400 return "cn10k";
3401 else
3402 return "octeontx2";
3403 }
3404
rvu_dbg_init(struct rvu * rvu)3405 void rvu_dbg_init(struct rvu *rvu)
3406 {
3407 rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
3408
3409 debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
3410 &rvu_dbg_rsrc_status_fops);
3411
3412 if (!is_rvu_otx2(rvu))
3413 debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
3414 rvu, &rvu_dbg_lmtst_map_table_fops);
3415
3416 if (!cgx_get_cgxcnt_max())
3417 goto create;
3418
3419 if (is_rvu_otx2(rvu))
3420 debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
3421 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3422 else
3423 debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
3424 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3425
3426 create:
3427 rvu_dbg_npa_init(rvu);
3428 rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
3429
3430 rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
3431 rvu_dbg_cgx_init(rvu);
3432 rvu_dbg_npc_init(rvu);
3433 rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
3434 rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
3435 rvu_dbg_mcs_init(rvu);
3436 }
3437
rvu_dbg_exit(struct rvu * rvu)3438 void rvu_dbg_exit(struct rvu *rvu)
3439 {
3440 debugfs_remove_recursive(rvu->rvu_dbg.root);
3441 }
3442
3443 #endif /* CONFIG_DEBUG_FS */
3444