1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2019 Marvell. 5 * 6 */ 7 8 #ifdef CONFIG_DEBUG_FS 9 10 #include <linux/fs.h> 11 #include <linux/debugfs.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 15 #include "rvu_struct.h" 16 #include "rvu_reg.h" 17 #include "rvu.h" 18 #include "cgx.h" 19 #include "lmac_common.h" 20 #include "npc.h" 21 #include "rvu_npc_hash.h" 22 #include "mcs.h" 23 24 #define DEBUGFS_DIR_NAME "octeontx2" 25 26 enum { 27 CGX_STAT0, 28 CGX_STAT1, 29 CGX_STAT2, 30 CGX_STAT3, 31 CGX_STAT4, 32 CGX_STAT5, 33 CGX_STAT6, 34 CGX_STAT7, 35 CGX_STAT8, 36 CGX_STAT9, 37 CGX_STAT10, 38 CGX_STAT11, 39 CGX_STAT12, 40 CGX_STAT13, 41 CGX_STAT14, 42 CGX_STAT15, 43 CGX_STAT16, 44 CGX_STAT17, 45 CGX_STAT18, 46 }; 47 48 /* NIX TX stats */ 49 enum nix_stat_lf_tx { 50 TX_UCAST = 0x0, 51 TX_BCAST = 0x1, 52 TX_MCAST = 0x2, 53 TX_DROP = 0x3, 54 TX_OCTS = 0x4, 55 TX_STATS_ENUM_LAST, 56 }; 57 58 /* NIX RX stats */ 59 enum nix_stat_lf_rx { 60 RX_OCTS = 0x0, 61 RX_UCAST = 0x1, 62 RX_BCAST = 0x2, 63 RX_MCAST = 0x3, 64 RX_DROP = 0x4, 65 RX_DROP_OCTS = 0x5, 66 RX_FCS = 0x6, 67 RX_ERR = 0x7, 68 RX_DRP_BCAST = 0x8, 69 RX_DRP_MCAST = 0x9, 70 RX_DRP_L3BCAST = 0xa, 71 RX_DRP_L3MCAST = 0xb, 72 RX_STATS_ENUM_LAST, 73 }; 74 75 static char *cgx_rx_stats_fields[] = { 76 [CGX_STAT0] = "Received packets", 77 [CGX_STAT1] = "Octets of received packets", 78 [CGX_STAT2] = "Received PAUSE packets", 79 [CGX_STAT3] = "Received PAUSE and control packets", 80 [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets", 81 [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets", 82 [CGX_STAT6] = "Packets dropped due to RX FIFO full", 83 [CGX_STAT7] = "Octets dropped due to RX FIFO full", 84 [CGX_STAT8] = "Error packets", 85 [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets", 86 [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets", 87 [CGX_STAT11] = "NCSI-bound packets dropped", 88 [CGX_STAT12] = "NCSI-bound octets dropped", 89 }; 90 91 static char *cgx_tx_stats_fields[] = { 92 [CGX_STAT0] = "Packets dropped due to excessive collisions", 93 [CGX_STAT1] = "Packets dropped due to excessive deferral", 94 [CGX_STAT2] = "Multiple collisions before successful transmission", 95 [CGX_STAT3] = "Single collisions before successful transmission", 96 [CGX_STAT4] = "Total octets sent on the interface", 97 [CGX_STAT5] = "Total frames sent on the interface", 98 [CGX_STAT6] = "Packets sent with an octet count < 64", 99 [CGX_STAT7] = "Packets sent with an octet count == 64", 100 [CGX_STAT8] = "Packets sent with an octet count of 65-127", 101 [CGX_STAT9] = "Packets sent with an octet count of 128-255", 102 [CGX_STAT10] = "Packets sent with an octet count of 256-511", 103 [CGX_STAT11] = "Packets sent with an octet count of 512-1023", 104 [CGX_STAT12] = "Packets sent with an octet count of 1024-1518", 105 [CGX_STAT13] = "Packets sent with an octet count of > 1518", 106 [CGX_STAT14] = "Packets sent to a broadcast DMAC", 107 [CGX_STAT15] = "Packets sent to the multicast DMAC", 108 [CGX_STAT16] = "Transmit underflow and were truncated", 109 [CGX_STAT17] = "Control/PAUSE packets sent", 110 }; 111 112 static char *rpm_rx_stats_fields[] = { 113 "Octets of received packets", 114 "Octets of received packets with out error", 115 "Received packets with alignment errors", 116 "Control/PAUSE packets received", 117 "Packets received with Frame too long Errors", 118 "Packets received with a1nrange length Errors", 119 "Received packets", 120 "Packets received with FrameCheckSequenceErrors", 121 "Packets received with VLAN header", 122 "Error packets", 123 "Packets received with unicast DMAC", 124 "Packets received with multicast DMAC", 125 "Packets received with broadcast DMAC", 126 "Dropped packets", 127 "Total frames received on interface", 128 "Packets received with an octet count < 64", 129 "Packets received with an octet count == 64", 130 "Packets received with an octet count of 65-127", 131 "Packets received with an octet count of 128-255", 132 "Packets received with an octet count of 256-511", 133 "Packets received with an octet count of 512-1023", 134 "Packets received with an octet count of 1024-1518", 135 "Packets received with an octet count of > 1518", 136 "Oversized Packets", 137 "Jabber Packets", 138 "Fragmented Packets", 139 "CBFC(class based flow control) pause frames received for class 0", 140 "CBFC pause frames received for class 1", 141 "CBFC pause frames received for class 2", 142 "CBFC pause frames received for class 3", 143 "CBFC pause frames received for class 4", 144 "CBFC pause frames received for class 5", 145 "CBFC pause frames received for class 6", 146 "CBFC pause frames received for class 7", 147 "CBFC pause frames received for class 8", 148 "CBFC pause frames received for class 9", 149 "CBFC pause frames received for class 10", 150 "CBFC pause frames received for class 11", 151 "CBFC pause frames received for class 12", 152 "CBFC pause frames received for class 13", 153 "CBFC pause frames received for class 14", 154 "CBFC pause frames received for class 15", 155 "MAC control packets received", 156 }; 157 158 static char *rpm_tx_stats_fields[] = { 159 "Total octets sent on the interface", 160 "Total octets transmitted OK", 161 "Control/Pause frames sent", 162 "Total frames transmitted OK", 163 "Total frames sent with VLAN header", 164 "Error Packets", 165 "Packets sent to unicast DMAC", 166 "Packets sent to the multicast DMAC", 167 "Packets sent to a broadcast DMAC", 168 "Packets sent with an octet count == 64", 169 "Packets sent with an octet count of 65-127", 170 "Packets sent with an octet count of 128-255", 171 "Packets sent with an octet count of 256-511", 172 "Packets sent with an octet count of 512-1023", 173 "Packets sent with an octet count of 1024-1518", 174 "Packets sent with an octet count of > 1518", 175 "CBFC(class based flow control) pause frames transmitted for class 0", 176 "CBFC pause frames transmitted for class 1", 177 "CBFC pause frames transmitted for class 2", 178 "CBFC pause frames transmitted for class 3", 179 "CBFC pause frames transmitted for class 4", 180 "CBFC pause frames transmitted for class 5", 181 "CBFC pause frames transmitted for class 6", 182 "CBFC pause frames transmitted for class 7", 183 "CBFC pause frames transmitted for class 8", 184 "CBFC pause frames transmitted for class 9", 185 "CBFC pause frames transmitted for class 10", 186 "CBFC pause frames transmitted for class 11", 187 "CBFC pause frames transmitted for class 12", 188 "CBFC pause frames transmitted for class 13", 189 "CBFC pause frames transmitted for class 14", 190 "CBFC pause frames transmitted for class 15", 191 "MAC control packets sent", 192 "Total frames sent on the interface" 193 }; 194 195 enum cpt_eng_type { 196 CPT_AE_TYPE = 1, 197 CPT_SE_TYPE = 2, 198 CPT_IE_TYPE = 3, 199 }; 200 201 #define rvu_dbg_NULL NULL 202 #define rvu_dbg_open_NULL NULL 203 204 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \ 205 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \ 206 { \ 207 return single_open(file, rvu_dbg_##read_op, inode->i_private); \ 208 } \ 209 static const struct file_operations rvu_dbg_##name##_fops = { \ 210 .owner = THIS_MODULE, \ 211 .open = rvu_dbg_open_##name, \ 212 .read = seq_read, \ 213 .write = rvu_dbg_##write_op, \ 214 .llseek = seq_lseek, \ 215 .release = single_release, \ 216 } 217 218 #define RVU_DEBUG_FOPS(name, read_op, write_op) \ 219 static const struct file_operations rvu_dbg_##name##_fops = { \ 220 .owner = THIS_MODULE, \ 221 .open = simple_open, \ 222 .read = rvu_dbg_##read_op, \ 223 .write = rvu_dbg_##write_op \ 224 } 225 226 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf); 227 228 static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir) 229 { 230 struct mcs *mcs = filp->private; 231 struct mcs_port_stats stats; 232 int lmac; 233 234 seq_puts(filp, "\n port stats\n"); 235 mutex_lock(&mcs->stats_lock); 236 for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) { 237 mcs_get_port_stats(mcs, &stats, lmac, dir); 238 seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt); 239 seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt); 240 241 if (dir == MCS_RX && mcs->hw->mcs_blks > 1) 242 seq_printf(filp, "port%d: Preempt error: %lld\n", lmac, 243 stats.preempt_err_cnt); 244 if (dir == MCS_TX) 245 seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac, 246 stats.sectag_insert_err_cnt); 247 } 248 mutex_unlock(&mcs->stats_lock); 249 return 0; 250 } 251 252 static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused) 253 { 254 return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX); 255 } 256 257 RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL); 258 259 static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused) 260 { 261 return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX); 262 } 263 264 RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL); 265 266 static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir) 267 { 268 struct mcs *mcs = filp->private; 269 struct mcs_sa_stats stats; 270 struct rsrc_bmap *map; 271 int sa_id; 272 273 if (dir == MCS_TX) { 274 map = &mcs->tx.sa; 275 mutex_lock(&mcs->stats_lock); 276 for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) { 277 seq_puts(filp, "\n TX SA stats\n"); 278 mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX); 279 seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id, 280 stats.pkt_encrypt_cnt); 281 282 seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id, 283 stats.pkt_protected_cnt); 284 } 285 mutex_unlock(&mcs->stats_lock); 286 return 0; 287 } 288 289 /* RX stats */ 290 map = &mcs->rx.sa; 291 mutex_lock(&mcs->stats_lock); 292 for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) { 293 seq_puts(filp, "\n RX SA stats\n"); 294 mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX); 295 seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt); 296 seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt); 297 seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt); 298 seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt); 299 seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt); 300 } 301 mutex_unlock(&mcs->stats_lock); 302 return 0; 303 } 304 305 static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused) 306 { 307 return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX); 308 } 309 310 RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL); 311 312 static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused) 313 { 314 return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX); 315 } 316 317 RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL); 318 319 static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused) 320 { 321 struct mcs *mcs = filp->private; 322 struct mcs_sc_stats stats; 323 struct rsrc_bmap *map; 324 int sc_id; 325 326 map = &mcs->tx.sc; 327 seq_puts(filp, "\n SC stats\n"); 328 329 mutex_lock(&mcs->stats_lock); 330 for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) { 331 mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX); 332 seq_printf(filp, "\n=======sc%d======\n\n", sc_id); 333 seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt); 334 seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt); 335 336 if (mcs->hw->mcs_blks == 1) { 337 seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id, 338 stats.octet_encrypt_cnt); 339 seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id, 340 stats.octet_protected_cnt); 341 } 342 } 343 mutex_unlock(&mcs->stats_lock); 344 return 0; 345 } 346 347 RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL); 348 349 static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused) 350 { 351 struct mcs *mcs = filp->private; 352 struct mcs_sc_stats stats; 353 struct rsrc_bmap *map; 354 int sc_id; 355 356 map = &mcs->rx.sc; 357 seq_puts(filp, "\n SC stats\n"); 358 359 mutex_lock(&mcs->stats_lock); 360 for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) { 361 mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX); 362 seq_printf(filp, "\n=======sc%d======\n\n", sc_id); 363 seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt); 364 seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt); 365 seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt); 366 seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt); 367 seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt); 368 369 if (mcs->hw->mcs_blks > 1) { 370 seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt); 371 seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt); 372 } 373 if (mcs->hw->mcs_blks == 1) { 374 seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id, 375 stats.octet_decrypt_cnt); 376 seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id, 377 stats.octet_validate_cnt); 378 } 379 } 380 mutex_unlock(&mcs->stats_lock); 381 return 0; 382 } 383 384 RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL); 385 386 static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir) 387 { 388 struct mcs *mcs = filp->private; 389 struct mcs_flowid_stats stats; 390 struct rsrc_bmap *map; 391 int flow_id; 392 393 seq_puts(filp, "\n Flowid stats\n"); 394 395 if (dir == MCS_RX) 396 map = &mcs->rx.flow_ids; 397 else 398 map = &mcs->tx.flow_ids; 399 400 mutex_lock(&mcs->stats_lock); 401 for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) { 402 mcs_get_flowid_stats(mcs, &stats, flow_id, dir); 403 seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt); 404 } 405 mutex_unlock(&mcs->stats_lock); 406 return 0; 407 } 408 409 static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused) 410 { 411 return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX); 412 } 413 414 RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL); 415 416 static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused) 417 { 418 return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX); 419 } 420 421 RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL); 422 423 static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused) 424 { 425 struct mcs *mcs = filp->private; 426 struct mcs_secy_stats stats; 427 struct rsrc_bmap *map; 428 int secy_id; 429 430 map = &mcs->tx.secy; 431 seq_puts(filp, "\n MCS TX secy stats\n"); 432 433 mutex_lock(&mcs->stats_lock); 434 for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) { 435 mcs_get_tx_secy_stats(mcs, &stats, secy_id); 436 seq_printf(filp, "\n=======Secy%d======\n\n", secy_id); 437 seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id, 438 stats.ctl_pkt_bcast_cnt); 439 seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id, 440 stats.ctl_pkt_mcast_cnt); 441 seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id, 442 stats.ctl_pkt_ucast_cnt); 443 seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt); 444 seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id, 445 stats.unctl_pkt_bcast_cnt); 446 seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id, 447 stats.unctl_pkt_mcast_cnt); 448 seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id, 449 stats.unctl_pkt_ucast_cnt); 450 seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt); 451 seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id, 452 stats.octet_encrypted_cnt); 453 seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id, 454 stats.octet_protected_cnt); 455 seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id, 456 stats.pkt_noactivesa_cnt); 457 seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt); 458 seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt); 459 } 460 mutex_unlock(&mcs->stats_lock); 461 return 0; 462 } 463 464 RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL); 465 466 static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused) 467 { 468 struct mcs *mcs = filp->private; 469 struct mcs_secy_stats stats; 470 struct rsrc_bmap *map; 471 int secy_id; 472 473 map = &mcs->rx.secy; 474 seq_puts(filp, "\n MCS secy stats\n"); 475 476 mutex_lock(&mcs->stats_lock); 477 for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) { 478 mcs_get_rx_secy_stats(mcs, &stats, secy_id); 479 seq_printf(filp, "\n=======Secy%d======\n\n", secy_id); 480 seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id, 481 stats.ctl_pkt_bcast_cnt); 482 seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id, 483 stats.ctl_pkt_mcast_cnt); 484 seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id, 485 stats.ctl_pkt_ucast_cnt); 486 seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt); 487 seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id, 488 stats.unctl_pkt_bcast_cnt); 489 seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id, 490 stats.unctl_pkt_mcast_cnt); 491 seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id, 492 stats.unctl_pkt_ucast_cnt); 493 seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt); 494 seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id, 495 stats.octet_decrypted_cnt); 496 seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id, 497 stats.octet_validated_cnt); 498 seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id, 499 stats.pkt_port_disabled_cnt); 500 seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt); 501 seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id, 502 stats.pkt_nosa_cnt); 503 seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id, 504 stats.pkt_nosaerror_cnt); 505 seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id, 506 stats.pkt_tagged_ctl_cnt); 507 seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt); 508 seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt); 509 if (mcs->hw->mcs_blks > 1) 510 seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id, 511 stats.pkt_notag_cnt); 512 } 513 mutex_unlock(&mcs->stats_lock); 514 return 0; 515 } 516 517 RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL); 518 519 static void rvu_dbg_mcs_init(struct rvu *rvu) 520 { 521 struct mcs *mcs; 522 char dname[10]; 523 int i; 524 525 if (!rvu->mcs_blk_cnt) 526 return; 527 528 rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root); 529 530 for (i = 0; i < rvu->mcs_blk_cnt; i++) { 531 mcs = mcs_get_pdata(i); 532 533 sprintf(dname, "mcs%d", i); 534 rvu->rvu_dbg.mcs = debugfs_create_dir(dname, 535 rvu->rvu_dbg.mcs_root); 536 537 rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs); 538 539 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs, 540 &rvu_dbg_mcs_rx_flowid_stats_fops); 541 542 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs, 543 &rvu_dbg_mcs_rx_secy_stats_fops); 544 545 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs, 546 &rvu_dbg_mcs_rx_sc_stats_fops); 547 548 debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs, 549 &rvu_dbg_mcs_rx_sa_stats_fops); 550 551 debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs, 552 &rvu_dbg_mcs_rx_port_stats_fops); 553 554 rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs); 555 556 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs, 557 &rvu_dbg_mcs_tx_flowid_stats_fops); 558 559 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs, 560 &rvu_dbg_mcs_tx_secy_stats_fops); 561 562 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs, 563 &rvu_dbg_mcs_tx_sc_stats_fops); 564 565 debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs, 566 &rvu_dbg_mcs_tx_sa_stats_fops); 567 568 debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs, 569 &rvu_dbg_mcs_tx_port_stats_fops); 570 } 571 } 572 573 #define LMT_MAPTBL_ENTRY_SIZE 16 574 /* Dump LMTST map table */ 575 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, 576 char __user *buffer, 577 size_t count, loff_t *ppos) 578 { 579 struct rvu *rvu = filp->private_data; 580 u64 lmt_addr, val, tbl_base; 581 int pf, vf, num_vfs, hw_vfs; 582 void __iomem *lmt_map_base; 583 int buf_size = 10240; 584 size_t off = 0; 585 int index = 0; 586 char *buf; 587 int ret; 588 589 /* don't allow partial reads */ 590 if (*ppos != 0) 591 return 0; 592 593 buf = kzalloc(buf_size, GFP_KERNEL); 594 if (!buf) 595 return -ENOMEM; 596 597 tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); 598 599 lmt_map_base = ioremap_wc(tbl_base, 128 * 1024); 600 if (!lmt_map_base) { 601 dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); 602 kfree(buf); 603 return false; 604 } 605 606 off += scnprintf(&buf[off], buf_size - 1 - off, 607 "\n\t\t\t\t\tLmtst Map Table Entries"); 608 off += scnprintf(&buf[off], buf_size - 1 - off, 609 "\n\t\t\t\t\t======================="); 610 off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t"); 611 off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t"); 612 off += scnprintf(&buf[off], buf_size - 1 - off, 613 "Lmtline Base (word 0)\t\t"); 614 off += scnprintf(&buf[off], buf_size - 1 - off, 615 "Lmt Map Entry (word 1)"); 616 off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); 617 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 618 off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t", 619 pf); 620 621 index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE; 622 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t", 623 (tbl_base + index)); 624 lmt_addr = readq(lmt_map_base + index); 625 off += scnprintf(&buf[off], buf_size - 1 - off, 626 " 0x%016llx\t\t", lmt_addr); 627 index += 8; 628 val = readq(lmt_map_base + index); 629 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n", 630 val); 631 /* Reading num of VFs per PF */ 632 rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs); 633 for (vf = 0; vf < num_vfs; vf++) { 634 index = (pf * rvu->hw->total_vfs * 16) + 635 ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE); 636 off += scnprintf(&buf[off], buf_size - 1 - off, 637 "PF%d:VF%d \t\t", pf, vf); 638 off += scnprintf(&buf[off], buf_size - 1 - off, 639 " 0x%llx\t\t", (tbl_base + index)); 640 lmt_addr = readq(lmt_map_base + index); 641 off += scnprintf(&buf[off], buf_size - 1 - off, 642 " 0x%016llx\t\t", lmt_addr); 643 index += 8; 644 val = readq(lmt_map_base + index); 645 off += scnprintf(&buf[off], buf_size - 1 - off, 646 " 0x%016llx\n", val); 647 } 648 } 649 off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); 650 651 ret = min(off, count); 652 if (copy_to_user(buffer, buf, ret)) 653 ret = -EFAULT; 654 kfree(buf); 655 656 iounmap(lmt_map_base); 657 if (ret < 0) 658 return ret; 659 660 *ppos = ret; 661 return ret; 662 } 663 664 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL); 665 666 static void get_lf_str_list(struct rvu_block block, int pcifunc, 667 char *lfs) 668 { 669 int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max; 670 671 for_each_set_bit(lf, block.lf.bmap, block.lf.max) { 672 if (lf >= block.lf.max) 673 break; 674 675 if (block.fn_map[lf] != pcifunc) 676 continue; 677 678 if (lf == prev_lf + 1) { 679 prev_lf = lf; 680 seq = 1; 681 continue; 682 } 683 684 if (seq) 685 len += sprintf(lfs + len, "-%d,%d", prev_lf, lf); 686 else 687 len += (len ? sprintf(lfs + len, ",%d", lf) : 688 sprintf(lfs + len, "%d", lf)); 689 690 prev_lf = lf; 691 seq = 0; 692 } 693 694 if (seq) 695 len += sprintf(lfs + len, "-%d", prev_lf); 696 697 lfs[len] = '\0'; 698 } 699 700 static int get_max_column_width(struct rvu *rvu) 701 { 702 int index, pf, vf, lf_str_size = 12, buf_size = 256; 703 struct rvu_block block; 704 u16 pcifunc; 705 char *buf; 706 707 buf = kzalloc(buf_size, GFP_KERNEL); 708 if (!buf) 709 return -ENOMEM; 710 711 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 712 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { 713 pcifunc = pf << 10 | vf; 714 if (!pcifunc) 715 continue; 716 717 for (index = 0; index < BLK_COUNT; index++) { 718 block = rvu->hw->block[index]; 719 if (!strlen(block.name)) 720 continue; 721 722 get_lf_str_list(block, pcifunc, buf); 723 if (lf_str_size <= strlen(buf)) 724 lf_str_size = strlen(buf) + 1; 725 } 726 } 727 } 728 729 kfree(buf); 730 return lf_str_size; 731 } 732 733 /* Dumps current provisioning status of all RVU block LFs */ 734 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, 735 char __user *buffer, 736 size_t count, loff_t *ppos) 737 { 738 int index, off = 0, flag = 0, len = 0, i = 0; 739 struct rvu *rvu = filp->private_data; 740 int bytes_not_copied = 0; 741 struct rvu_block block; 742 int pf, vf, pcifunc; 743 int buf_size = 2048; 744 int lf_str_size; 745 char *lfs; 746 char *buf; 747 748 /* don't allow partial reads */ 749 if (*ppos != 0) 750 return 0; 751 752 buf = kzalloc(buf_size, GFP_KERNEL); 753 if (!buf) 754 return -ENOMEM; 755 756 /* Get the maximum width of a column */ 757 lf_str_size = get_max_column_width(rvu); 758 759 lfs = kzalloc(lf_str_size, GFP_KERNEL); 760 if (!lfs) { 761 kfree(buf); 762 return -ENOMEM; 763 } 764 off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size, 765 "pcifunc"); 766 for (index = 0; index < BLK_COUNT; index++) 767 if (strlen(rvu->hw->block[index].name)) { 768 off += scnprintf(&buf[off], buf_size - 1 - off, 769 "%-*s", lf_str_size, 770 rvu->hw->block[index].name); 771 } 772 773 off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); 774 bytes_not_copied = copy_to_user(buffer + (i * off), buf, off); 775 if (bytes_not_copied) 776 goto out; 777 778 i++; 779 *ppos += off; 780 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 781 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { 782 off = 0; 783 flag = 0; 784 pcifunc = pf << 10 | vf; 785 if (!pcifunc) 786 continue; 787 788 if (vf) { 789 sprintf(lfs, "PF%d:VF%d", pf, vf - 1); 790 off = scnprintf(&buf[off], 791 buf_size - 1 - off, 792 "%-*s", lf_str_size, lfs); 793 } else { 794 sprintf(lfs, "PF%d", pf); 795 off = scnprintf(&buf[off], 796 buf_size - 1 - off, 797 "%-*s", lf_str_size, lfs); 798 } 799 800 for (index = 0; index < BLK_COUNT; index++) { 801 block = rvu->hw->block[index]; 802 if (!strlen(block.name)) 803 continue; 804 len = 0; 805 lfs[len] = '\0'; 806 get_lf_str_list(block, pcifunc, lfs); 807 if (strlen(lfs)) 808 flag = 1; 809 810 off += scnprintf(&buf[off], buf_size - 1 - off, 811 "%-*s", lf_str_size, lfs); 812 } 813 if (flag) { 814 off += scnprintf(&buf[off], 815 buf_size - 1 - off, "\n"); 816 bytes_not_copied = copy_to_user(buffer + 817 (i * off), 818 buf, off); 819 if (bytes_not_copied) 820 goto out; 821 822 i++; 823 *ppos += off; 824 } 825 } 826 } 827 828 out: 829 kfree(lfs); 830 kfree(buf); 831 if (bytes_not_copied) 832 return -EFAULT; 833 834 return *ppos; 835 } 836 837 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL); 838 839 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) 840 { 841 struct rvu *rvu = filp->private; 842 struct pci_dev *pdev = NULL; 843 struct mac_ops *mac_ops; 844 char cgx[10], lmac[10]; 845 struct rvu_pfvf *pfvf; 846 int pf, domain, blkid; 847 u8 cgx_id, lmac_id; 848 u16 pcifunc; 849 850 domain = 2; 851 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); 852 /* There can be no CGX devices at all */ 853 if (!mac_ops) 854 return 0; 855 seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n", 856 mac_ops->name); 857 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 858 if (!is_pf_cgxmapped(rvu, pf)) 859 continue; 860 861 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0); 862 if (!pdev) 863 continue; 864 865 cgx[0] = 0; 866 lmac[0] = 0; 867 pcifunc = pf << 10; 868 pfvf = rvu_get_pfvf(rvu, pcifunc); 869 870 if (pfvf->nix_blkaddr == BLKADDR_NIX0) 871 blkid = 0; 872 else 873 blkid = 1; 874 875 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, 876 &lmac_id); 877 sprintf(cgx, "%s%d", mac_ops->name, cgx_id); 878 sprintf(lmac, "LMAC%d", lmac_id); 879 seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n", 880 dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac); 881 882 pci_dev_put(pdev); 883 } 884 return 0; 885 } 886 887 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL); 888 889 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf, 890 u16 *pcifunc) 891 { 892 struct rvu_block *block; 893 struct rvu_hwinfo *hw; 894 895 hw = rvu->hw; 896 block = &hw->block[blkaddr]; 897 898 if (lf < 0 || lf >= block->lf.max) { 899 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n", 900 block->lf.max - 1); 901 return false; 902 } 903 904 *pcifunc = block->fn_map[lf]; 905 if (!*pcifunc) { 906 dev_warn(rvu->dev, 907 "This LF is not attached to any RVU PFFUNC\n"); 908 return false; 909 } 910 return true; 911 } 912 913 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf) 914 { 915 char *buf; 916 917 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 918 if (!buf) 919 return; 920 921 if (!pfvf->aura_ctx) { 922 seq_puts(m, "Aura context is not initialized\n"); 923 } else { 924 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap, 925 pfvf->aura_ctx->qsize); 926 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize); 927 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf); 928 } 929 930 if (!pfvf->pool_ctx) { 931 seq_puts(m, "Pool context is not initialized\n"); 932 } else { 933 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap, 934 pfvf->pool_ctx->qsize); 935 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize); 936 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf); 937 } 938 kfree(buf); 939 } 940 941 /* The 'qsize' entry dumps current Aura/Pool context Qsize 942 * and each context's current enable/disable status in a bitmap. 943 */ 944 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused, 945 int blktype) 946 { 947 void (*print_qsize)(struct seq_file *filp, 948 struct rvu_pfvf *pfvf) = NULL; 949 struct dentry *current_dir; 950 struct rvu_pfvf *pfvf; 951 struct rvu *rvu; 952 int qsize_id; 953 u16 pcifunc; 954 int blkaddr; 955 956 rvu = filp->private; 957 switch (blktype) { 958 case BLKTYPE_NPA: 959 qsize_id = rvu->rvu_dbg.npa_qsize_id; 960 print_qsize = print_npa_qsize; 961 break; 962 963 case BLKTYPE_NIX: 964 qsize_id = rvu->rvu_dbg.nix_qsize_id; 965 print_qsize = print_nix_qsize; 966 break; 967 968 default: 969 return -EINVAL; 970 } 971 972 if (blktype == BLKTYPE_NPA) { 973 blkaddr = BLKADDR_NPA; 974 } else { 975 current_dir = filp->file->f_path.dentry->d_parent; 976 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ? 977 BLKADDR_NIX1 : BLKADDR_NIX0); 978 } 979 980 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc)) 981 return -EINVAL; 982 983 pfvf = rvu_get_pfvf(rvu, pcifunc); 984 print_qsize(filp, pfvf); 985 986 return 0; 987 } 988 989 static ssize_t rvu_dbg_qsize_write(struct file *filp, 990 const char __user *buffer, size_t count, 991 loff_t *ppos, int blktype) 992 { 993 char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix"; 994 struct seq_file *seqfile = filp->private_data; 995 char *cmd_buf, *cmd_buf_tmp, *subtoken; 996 struct rvu *rvu = seqfile->private; 997 struct dentry *current_dir; 998 int blkaddr; 999 u16 pcifunc; 1000 int ret, lf; 1001 1002 cmd_buf = memdup_user(buffer, count + 1); 1003 if (IS_ERR(cmd_buf)) 1004 return -ENOMEM; 1005 1006 cmd_buf[count] = '\0'; 1007 1008 cmd_buf_tmp = strchr(cmd_buf, '\n'); 1009 if (cmd_buf_tmp) { 1010 *cmd_buf_tmp = '\0'; 1011 count = cmd_buf_tmp - cmd_buf + 1; 1012 } 1013 1014 cmd_buf_tmp = cmd_buf; 1015 subtoken = strsep(&cmd_buf, " "); 1016 ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL; 1017 if (cmd_buf) 1018 ret = -EINVAL; 1019 1020 if (ret < 0 || !strncmp(subtoken, "help", 4)) { 1021 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string); 1022 goto qsize_write_done; 1023 } 1024 1025 if (blktype == BLKTYPE_NPA) { 1026 blkaddr = BLKADDR_NPA; 1027 } else { 1028 current_dir = filp->f_path.dentry->d_parent; 1029 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ? 1030 BLKADDR_NIX1 : BLKADDR_NIX0); 1031 } 1032 1033 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) { 1034 ret = -EINVAL; 1035 goto qsize_write_done; 1036 } 1037 if (blktype == BLKTYPE_NPA) 1038 rvu->rvu_dbg.npa_qsize_id = lf; 1039 else 1040 rvu->rvu_dbg.nix_qsize_id = lf; 1041 1042 qsize_write_done: 1043 kfree(cmd_buf_tmp); 1044 return ret ? ret : count; 1045 } 1046 1047 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp, 1048 const char __user *buffer, 1049 size_t count, loff_t *ppos) 1050 { 1051 return rvu_dbg_qsize_write(filp, buffer, count, ppos, 1052 BLKTYPE_NPA); 1053 } 1054 1055 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused) 1056 { 1057 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA); 1058 } 1059 1060 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write); 1061 1062 /* Dumps given NPA Aura's context */ 1063 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) 1064 { 1065 struct npa_aura_s *aura = &rsp->aura; 1066 struct rvu *rvu = m->private; 1067 1068 seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr); 1069 1070 seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n", 1071 aura->ena, aura->pool_caching); 1072 seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n", 1073 aura->pool_way_mask, aura->avg_con); 1074 seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n", 1075 aura->pool_drop_ena, aura->aura_drop_ena); 1076 seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n", 1077 aura->bp_ena, aura->aura_drop); 1078 seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n", 1079 aura->shift, aura->avg_level); 1080 1081 seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n", 1082 (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid); 1083 1084 seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n", 1085 (u64)aura->limit, aura->bp, aura->fc_ena); 1086 1087 if (!is_rvu_otx2(rvu)) 1088 seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be); 1089 seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n", 1090 aura->fc_up_crossing, aura->fc_stype); 1091 seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits); 1092 1093 seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr); 1094 1095 seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n", 1096 aura->pool_drop, aura->update_time); 1097 seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n", 1098 aura->err_int, aura->err_int_ena); 1099 seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n", 1100 aura->thresh_int, aura->thresh_int_ena); 1101 seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n", 1102 aura->thresh_up, aura->thresh_qint_idx); 1103 seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx); 1104 1105 seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh); 1106 if (!is_rvu_otx2(rvu)) 1107 seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst); 1108 } 1109 1110 /* Dumps given NPA Pool's context */ 1111 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) 1112 { 1113 struct npa_pool_s *pool = &rsp->pool; 1114 struct rvu *rvu = m->private; 1115 1116 seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base); 1117 1118 seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n", 1119 pool->ena, pool->nat_align); 1120 seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n", 1121 pool->stack_caching, pool->stack_way_mask); 1122 seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n", 1123 pool->buf_offset, pool->buf_size); 1124 1125 seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n", 1126 pool->stack_max_pages, pool->stack_pages); 1127 1128 seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc); 1129 1130 seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n", 1131 pool->stack_offset, pool->shift, pool->avg_level); 1132 seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n", 1133 pool->avg_con, pool->fc_ena, pool->fc_stype); 1134 seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n", 1135 pool->fc_hyst_bits, pool->fc_up_crossing); 1136 if (!is_rvu_otx2(rvu)) 1137 seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be); 1138 seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time); 1139 1140 seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr); 1141 1142 seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start); 1143 1144 seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end); 1145 1146 seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n", 1147 pool->err_int, pool->err_int_ena); 1148 seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int); 1149 seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n", 1150 pool->thresh_int_ena, pool->thresh_up); 1151 seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n", 1152 pool->thresh_qint_idx, pool->err_qint_idx); 1153 if (!is_rvu_otx2(rvu)) 1154 seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst); 1155 } 1156 1157 /* Reads aura/pool's ctx from admin queue */ 1158 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype) 1159 { 1160 void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp); 1161 struct npa_aq_enq_req aq_req; 1162 struct npa_aq_enq_rsp rsp; 1163 struct rvu_pfvf *pfvf; 1164 int aura, rc, max_id; 1165 int npalf, id, all; 1166 struct rvu *rvu; 1167 u16 pcifunc; 1168 1169 rvu = m->private; 1170 1171 switch (ctype) { 1172 case NPA_AQ_CTYPE_AURA: 1173 npalf = rvu->rvu_dbg.npa_aura_ctx.lf; 1174 id = rvu->rvu_dbg.npa_aura_ctx.id; 1175 all = rvu->rvu_dbg.npa_aura_ctx.all; 1176 break; 1177 1178 case NPA_AQ_CTYPE_POOL: 1179 npalf = rvu->rvu_dbg.npa_pool_ctx.lf; 1180 id = rvu->rvu_dbg.npa_pool_ctx.id; 1181 all = rvu->rvu_dbg.npa_pool_ctx.all; 1182 break; 1183 default: 1184 return -EINVAL; 1185 } 1186 1187 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc)) 1188 return -EINVAL; 1189 1190 pfvf = rvu_get_pfvf(rvu, pcifunc); 1191 if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) { 1192 seq_puts(m, "Aura context is not initialized\n"); 1193 return -EINVAL; 1194 } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) { 1195 seq_puts(m, "Pool context is not initialized\n"); 1196 return -EINVAL; 1197 } 1198 1199 memset(&aq_req, 0, sizeof(struct npa_aq_enq_req)); 1200 aq_req.hdr.pcifunc = pcifunc; 1201 aq_req.ctype = ctype; 1202 aq_req.op = NPA_AQ_INSTOP_READ; 1203 if (ctype == NPA_AQ_CTYPE_AURA) { 1204 max_id = pfvf->aura_ctx->qsize; 1205 print_npa_ctx = print_npa_aura_ctx; 1206 } else { 1207 max_id = pfvf->pool_ctx->qsize; 1208 print_npa_ctx = print_npa_pool_ctx; 1209 } 1210 1211 if (id < 0 || id >= max_id) { 1212 seq_printf(m, "Invalid %s, valid range is 0-%d\n", 1213 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool", 1214 max_id - 1); 1215 return -EINVAL; 1216 } 1217 1218 if (all) 1219 id = 0; 1220 else 1221 max_id = id + 1; 1222 1223 for (aura = id; aura < max_id; aura++) { 1224 aq_req.aura_id = aura; 1225 seq_printf(m, "======%s : %d=======\n", 1226 (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL", 1227 aq_req.aura_id); 1228 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp); 1229 if (rc) { 1230 seq_puts(m, "Failed to read context\n"); 1231 return -EINVAL; 1232 } 1233 print_npa_ctx(m, &rsp); 1234 } 1235 return 0; 1236 } 1237 1238 static int write_npa_ctx(struct rvu *rvu, bool all, 1239 int npalf, int id, int ctype) 1240 { 1241 struct rvu_pfvf *pfvf; 1242 int max_id = 0; 1243 u16 pcifunc; 1244 1245 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc)) 1246 return -EINVAL; 1247 1248 pfvf = rvu_get_pfvf(rvu, pcifunc); 1249 1250 if (ctype == NPA_AQ_CTYPE_AURA) { 1251 if (!pfvf->aura_ctx) { 1252 dev_warn(rvu->dev, "Aura context is not initialized\n"); 1253 return -EINVAL; 1254 } 1255 max_id = pfvf->aura_ctx->qsize; 1256 } else if (ctype == NPA_AQ_CTYPE_POOL) { 1257 if (!pfvf->pool_ctx) { 1258 dev_warn(rvu->dev, "Pool context is not initialized\n"); 1259 return -EINVAL; 1260 } 1261 max_id = pfvf->pool_ctx->qsize; 1262 } 1263 1264 if (id < 0 || id >= max_id) { 1265 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n", 1266 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool", 1267 max_id - 1); 1268 return -EINVAL; 1269 } 1270 1271 switch (ctype) { 1272 case NPA_AQ_CTYPE_AURA: 1273 rvu->rvu_dbg.npa_aura_ctx.lf = npalf; 1274 rvu->rvu_dbg.npa_aura_ctx.id = id; 1275 rvu->rvu_dbg.npa_aura_ctx.all = all; 1276 break; 1277 1278 case NPA_AQ_CTYPE_POOL: 1279 rvu->rvu_dbg.npa_pool_ctx.lf = npalf; 1280 rvu->rvu_dbg.npa_pool_ctx.id = id; 1281 rvu->rvu_dbg.npa_pool_ctx.all = all; 1282 break; 1283 default: 1284 return -EINVAL; 1285 } 1286 return 0; 1287 } 1288 1289 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count, 1290 const char __user *buffer, int *npalf, 1291 int *id, bool *all) 1292 { 1293 int bytes_not_copied; 1294 char *cmd_buf_tmp; 1295 char *subtoken; 1296 int ret; 1297 1298 bytes_not_copied = copy_from_user(cmd_buf, buffer, *count); 1299 if (bytes_not_copied) 1300 return -EFAULT; 1301 1302 cmd_buf[*count] = '\0'; 1303 cmd_buf_tmp = strchr(cmd_buf, '\n'); 1304 1305 if (cmd_buf_tmp) { 1306 *cmd_buf_tmp = '\0'; 1307 *count = cmd_buf_tmp - cmd_buf + 1; 1308 } 1309 1310 subtoken = strsep(&cmd_buf, " "); 1311 ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL; 1312 if (ret < 0) 1313 return ret; 1314 subtoken = strsep(&cmd_buf, " "); 1315 if (subtoken && strcmp(subtoken, "all") == 0) { 1316 *all = true; 1317 } else { 1318 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL; 1319 if (ret < 0) 1320 return ret; 1321 } 1322 if (cmd_buf) 1323 return -EINVAL; 1324 return ret; 1325 } 1326 1327 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp, 1328 const char __user *buffer, 1329 size_t count, loff_t *ppos, int ctype) 1330 { 1331 char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ? 1332 "aura" : "pool"; 1333 struct seq_file *seqfp = filp->private_data; 1334 struct rvu *rvu = seqfp->private; 1335 int npalf, id = 0, ret; 1336 bool all = false; 1337 1338 if ((*ppos != 0) || !count) 1339 return -EINVAL; 1340 1341 cmd_buf = kzalloc(count + 1, GFP_KERNEL); 1342 if (!cmd_buf) 1343 return count; 1344 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer, 1345 &npalf, &id, &all); 1346 if (ret < 0) { 1347 dev_info(rvu->dev, 1348 "Usage: echo <npalf> [%s number/all] > %s_ctx\n", 1349 ctype_string, ctype_string); 1350 goto done; 1351 } else { 1352 ret = write_npa_ctx(rvu, all, npalf, id, ctype); 1353 } 1354 done: 1355 kfree(cmd_buf); 1356 return ret ? ret : count; 1357 } 1358 1359 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp, 1360 const char __user *buffer, 1361 size_t count, loff_t *ppos) 1362 { 1363 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos, 1364 NPA_AQ_CTYPE_AURA); 1365 } 1366 1367 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused) 1368 { 1369 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA); 1370 } 1371 1372 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write); 1373 1374 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp, 1375 const char __user *buffer, 1376 size_t count, loff_t *ppos) 1377 { 1378 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos, 1379 NPA_AQ_CTYPE_POOL); 1380 } 1381 1382 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused) 1383 { 1384 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL); 1385 } 1386 1387 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write); 1388 1389 static void ndc_cache_stats(struct seq_file *s, int blk_addr, 1390 int ctype, int transaction) 1391 { 1392 u64 req, out_req, lat, cant_alloc; 1393 struct nix_hw *nix_hw; 1394 struct rvu *rvu; 1395 int port; 1396 1397 if (blk_addr == BLKADDR_NDC_NPA0) { 1398 rvu = s->private; 1399 } else { 1400 nix_hw = s->private; 1401 rvu = nix_hw->rvu; 1402 } 1403 1404 for (port = 0; port < NDC_MAX_PORT; port++) { 1405 req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC 1406 (port, ctype, transaction)); 1407 lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC 1408 (port, ctype, transaction)); 1409 out_req = rvu_read64(rvu, blk_addr, 1410 NDC_AF_PORTX_RTX_RWX_OSTDN_PC 1411 (port, ctype, transaction)); 1412 cant_alloc = rvu_read64(rvu, blk_addr, 1413 NDC_AF_PORTX_RTX_CANT_ALLOC_PC 1414 (port, transaction)); 1415 seq_printf(s, "\nPort:%d\n", port); 1416 seq_printf(s, "\tTotal Requests:\t\t%lld\n", req); 1417 seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat); 1418 seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req); 1419 seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req); 1420 seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc); 1421 } 1422 } 1423 1424 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr) 1425 { 1426 seq_puts(s, "\n***** CACHE mode read stats *****\n"); 1427 ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS); 1428 seq_puts(s, "\n***** CACHE mode write stats *****\n"); 1429 ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS); 1430 seq_puts(s, "\n***** BY-PASS mode read stats *****\n"); 1431 ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS); 1432 seq_puts(s, "\n***** BY-PASS mode write stats *****\n"); 1433 ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS); 1434 return 0; 1435 } 1436 1437 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused) 1438 { 1439 return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0); 1440 } 1441 1442 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL); 1443 1444 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr) 1445 { 1446 struct nix_hw *nix_hw; 1447 struct rvu *rvu; 1448 int bank, max_bank; 1449 u64 ndc_af_const; 1450 1451 if (blk_addr == BLKADDR_NDC_NPA0) { 1452 rvu = s->private; 1453 } else { 1454 nix_hw = s->private; 1455 rvu = nix_hw->rvu; 1456 } 1457 1458 ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST); 1459 max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const); 1460 for (bank = 0; bank < max_bank; bank++) { 1461 seq_printf(s, "BANK:%d\n", bank); 1462 seq_printf(s, "\tHits:\t%lld\n", 1463 (u64)rvu_read64(rvu, blk_addr, 1464 NDC_AF_BANKX_HIT_PC(bank))); 1465 seq_printf(s, "\tMiss:\t%lld\n", 1466 (u64)rvu_read64(rvu, blk_addr, 1467 NDC_AF_BANKX_MISS_PC(bank))); 1468 } 1469 return 0; 1470 } 1471 1472 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused) 1473 { 1474 struct nix_hw *nix_hw = filp->private; 1475 int blkaddr = 0; 1476 int ndc_idx = 0; 1477 1478 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ? 1479 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX); 1480 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX); 1481 1482 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr); 1483 } 1484 1485 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL); 1486 1487 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused) 1488 { 1489 struct nix_hw *nix_hw = filp->private; 1490 int blkaddr = 0; 1491 int ndc_idx = 0; 1492 1493 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ? 1494 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX); 1495 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX); 1496 1497 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr); 1498 } 1499 1500 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL); 1501 1502 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp, 1503 void *unused) 1504 { 1505 return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0); 1506 } 1507 1508 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL); 1509 1510 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp, 1511 void *unused) 1512 { 1513 struct nix_hw *nix_hw = filp->private; 1514 int ndc_idx = NPA0_U; 1515 int blkaddr = 0; 1516 1517 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ? 1518 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX); 1519 1520 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr); 1521 } 1522 1523 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL); 1524 1525 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp, 1526 void *unused) 1527 { 1528 struct nix_hw *nix_hw = filp->private; 1529 int ndc_idx = NPA0_U; 1530 int blkaddr = 0; 1531 1532 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ? 1533 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX); 1534 1535 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr); 1536 } 1537 1538 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL); 1539 1540 static void print_nix_cn10k_sq_ctx(struct seq_file *m, 1541 struct nix_cn10k_sq_ctx_s *sq_ctx) 1542 { 1543 seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n", 1544 sq_ctx->ena, sq_ctx->qint_idx); 1545 seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n", 1546 sq_ctx->substream, sq_ctx->sdp_mcast); 1547 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n", 1548 sq_ctx->cq, sq_ctx->sqe_way_mask); 1549 1550 seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n", 1551 sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff); 1552 seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n", 1553 sq_ctx->sso_ena, sq_ctx->smq_rr_weight); 1554 seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n", 1555 sq_ctx->default_chan, sq_ctx->sqb_count); 1556 1557 seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb); 1558 seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub); 1559 seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n", 1560 sq_ctx->sqb_aura, sq_ctx->sq_int); 1561 seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n", 1562 sq_ctx->sq_int_ena, sq_ctx->sqe_stype); 1563 1564 seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n", 1565 sq_ctx->max_sqe_size, sq_ctx->cq_limit); 1566 seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n", 1567 sq_ctx->mnq_dis, sq_ctx->lmt_dis); 1568 seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n", 1569 sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum); 1570 seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n", 1571 sq_ctx->tail_offset, sq_ctx->smenq_offset); 1572 seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n", 1573 sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld); 1574 1575 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n", 1576 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend); 1577 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb); 1578 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb); 1579 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb); 1580 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n", 1581 sq_ctx->smenq_next_sqb); 1582 1583 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb); 1584 1585 seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total); 1586 seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n", 1587 sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb); 1588 seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n", 1589 sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena); 1590 seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n", 1591 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena); 1592 1593 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n", 1594 (u64)sq_ctx->scm_lso_rem); 1595 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs); 1596 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts); 1597 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n", 1598 (u64)sq_ctx->dropped_octs); 1599 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n", 1600 (u64)sq_ctx->dropped_pkts); 1601 } 1602 1603 /* Dumps given nix_sq's context */ 1604 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) 1605 { 1606 struct nix_sq_ctx_s *sq_ctx = &rsp->sq; 1607 struct nix_hw *nix_hw = m->private; 1608 struct rvu *rvu = nix_hw->rvu; 1609 1610 if (!is_rvu_otx2(rvu)) { 1611 print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx); 1612 return; 1613 } 1614 seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n", 1615 sq_ctx->sqe_way_mask, sq_ctx->cq); 1616 seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n", 1617 sq_ctx->sdp_mcast, sq_ctx->substream); 1618 seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n", 1619 sq_ctx->qint_idx, sq_ctx->ena); 1620 1621 seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n", 1622 sq_ctx->sqb_count, sq_ctx->default_chan); 1623 seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n", 1624 sq_ctx->smq_rr_quantum, sq_ctx->sso_ena); 1625 seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n", 1626 sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq); 1627 1628 seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n", 1629 sq_ctx->sqe_stype, sq_ctx->sq_int_ena); 1630 seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n", 1631 sq_ctx->sq_int, sq_ctx->sqb_aura); 1632 seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count); 1633 1634 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n", 1635 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend); 1636 seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n", 1637 sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset); 1638 seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n", 1639 sq_ctx->smenq_offset, sq_ctx->tail_offset); 1640 seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n", 1641 sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq); 1642 seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n", 1643 sq_ctx->mnq_dis, sq_ctx->lmt_dis); 1644 seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n", 1645 sq_ctx->cq_limit, sq_ctx->max_sqe_size); 1646 1647 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb); 1648 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb); 1649 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb); 1650 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n", 1651 sq_ctx->smenq_next_sqb); 1652 1653 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb); 1654 1655 seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n", 1656 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena); 1657 seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n", 1658 sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps); 1659 seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n", 1660 sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1); 1661 seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total); 1662 1663 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n", 1664 (u64)sq_ctx->scm_lso_rem); 1665 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs); 1666 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts); 1667 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n", 1668 (u64)sq_ctx->dropped_octs); 1669 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n", 1670 (u64)sq_ctx->dropped_pkts); 1671 } 1672 1673 static void print_nix_cn10k_rq_ctx(struct seq_file *m, 1674 struct nix_cn10k_rq_ctx_s *rq_ctx) 1675 { 1676 seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n", 1677 rq_ctx->ena, rq_ctx->sso_ena); 1678 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n", 1679 rq_ctx->ipsech_ena, rq_ctx->ena_wqwd); 1680 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n", 1681 rq_ctx->cq, rq_ctx->lenerr_dis); 1682 seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n", 1683 rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis); 1684 seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n", 1685 rq_ctx->len_il4_dis, rq_ctx->len_il3_dis); 1686 seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n", 1687 rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis); 1688 seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura); 1689 1690 seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n", 1691 rq_ctx->spb_aura, rq_ctx->lpb_aura); 1692 seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura); 1693 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n", 1694 rq_ctx->sso_grp, rq_ctx->sso_tt); 1695 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n", 1696 rq_ctx->pb_caching, rq_ctx->wqe_caching); 1697 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n", 1698 rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena); 1699 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n", 1700 rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing); 1701 seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n", 1702 rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena); 1703 1704 seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id); 1705 seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena); 1706 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1); 1707 seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n", 1708 rq_ctx->wqe_skip, rq_ctx->spb_ena); 1709 seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n", 1710 rq_ctx->lpb_sizem1, rq_ctx->first_skip); 1711 seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n", 1712 rq_ctx->later_skip, rq_ctx->xqe_imm_size); 1713 seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n", 1714 rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split); 1715 1716 seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n", 1717 rq_ctx->xqe_drop, rq_ctx->xqe_pass); 1718 seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n", 1719 rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass); 1720 seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n", 1721 rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass); 1722 seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n", 1723 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop); 1724 1725 seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n", 1726 rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop); 1727 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n", 1728 rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass); 1729 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n", 1730 rq_ctx->rq_int, rq_ctx->rq_int_ena); 1731 seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx); 1732 1733 seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n", 1734 rq_ctx->ltag, rq_ctx->good_utag); 1735 seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n", 1736 rq_ctx->bad_utag, rq_ctx->flow_tagw); 1737 seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n", 1738 rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena); 1739 seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n", 1740 rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp); 1741 seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip); 1742 1743 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs); 1744 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts); 1745 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs); 1746 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts); 1747 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts); 1748 } 1749 1750 /* Dumps given nix_rq's context */ 1751 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) 1752 { 1753 struct nix_rq_ctx_s *rq_ctx = &rsp->rq; 1754 struct nix_hw *nix_hw = m->private; 1755 struct rvu *rvu = nix_hw->rvu; 1756 1757 if (!is_rvu_otx2(rvu)) { 1758 print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx); 1759 return; 1760 } 1761 1762 seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n", 1763 rq_ctx->wqe_aura, rq_ctx->substream); 1764 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n", 1765 rq_ctx->cq, rq_ctx->ena_wqwd); 1766 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n", 1767 rq_ctx->ipsech_ena, rq_ctx->sso_ena); 1768 seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena); 1769 1770 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n", 1771 rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena); 1772 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n", 1773 rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching); 1774 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n", 1775 rq_ctx->pb_caching, rq_ctx->sso_tt); 1776 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n", 1777 rq_ctx->sso_grp, rq_ctx->lpb_aura); 1778 seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura); 1779 1780 seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n", 1781 rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy); 1782 seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n", 1783 rq_ctx->xqe_imm_size, rq_ctx->later_skip); 1784 seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n", 1785 rq_ctx->first_skip, rq_ctx->lpb_sizem1); 1786 seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n", 1787 rq_ctx->spb_ena, rq_ctx->wqe_skip); 1788 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1); 1789 1790 seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n", 1791 rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop); 1792 seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n", 1793 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop); 1794 seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n", 1795 rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop); 1796 seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n", 1797 rq_ctx->xqe_pass, rq_ctx->xqe_drop); 1798 1799 seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n", 1800 rq_ctx->qint_idx, rq_ctx->rq_int_ena); 1801 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n", 1802 rq_ctx->rq_int, rq_ctx->lpb_pool_pass); 1803 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n", 1804 rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass); 1805 seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop); 1806 1807 seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n", 1808 rq_ctx->flow_tagw, rq_ctx->bad_utag); 1809 seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n", 1810 rq_ctx->good_utag, rq_ctx->ltag); 1811 1812 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs); 1813 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts); 1814 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs); 1815 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts); 1816 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts); 1817 } 1818 1819 /* Dumps given nix_cq's context */ 1820 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) 1821 { 1822 struct nix_cq_ctx_s *cq_ctx = &rsp->cq; 1823 1824 seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base); 1825 1826 seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr); 1827 seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n", 1828 cq_ctx->avg_con, cq_ctx->cint_idx); 1829 seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n", 1830 cq_ctx->cq_err, cq_ctx->qint_idx); 1831 seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n", 1832 cq_ctx->bpid, cq_ctx->bp_ena); 1833 1834 seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n", 1835 cq_ctx->update_time, cq_ctx->avg_level); 1836 seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n", 1837 cq_ctx->head, cq_ctx->tail); 1838 1839 seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n", 1840 cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int); 1841 seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n", 1842 cq_ctx->qsize, cq_ctx->caching); 1843 seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n", 1844 cq_ctx->substream, cq_ctx->ena); 1845 seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n", 1846 cq_ctx->drop_ena, cq_ctx->drop); 1847 seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp); 1848 } 1849 1850 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp, 1851 void *unused, int ctype) 1852 { 1853 void (*print_nix_ctx)(struct seq_file *filp, 1854 struct nix_aq_enq_rsp *rsp) = NULL; 1855 struct nix_hw *nix_hw = filp->private; 1856 struct rvu *rvu = nix_hw->rvu; 1857 struct nix_aq_enq_req aq_req; 1858 struct nix_aq_enq_rsp rsp; 1859 char *ctype_string = NULL; 1860 int qidx, rc, max_id = 0; 1861 struct rvu_pfvf *pfvf; 1862 int nixlf, id, all; 1863 u16 pcifunc; 1864 1865 switch (ctype) { 1866 case NIX_AQ_CTYPE_CQ: 1867 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf; 1868 id = rvu->rvu_dbg.nix_cq_ctx.id; 1869 all = rvu->rvu_dbg.nix_cq_ctx.all; 1870 break; 1871 1872 case NIX_AQ_CTYPE_SQ: 1873 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf; 1874 id = rvu->rvu_dbg.nix_sq_ctx.id; 1875 all = rvu->rvu_dbg.nix_sq_ctx.all; 1876 break; 1877 1878 case NIX_AQ_CTYPE_RQ: 1879 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf; 1880 id = rvu->rvu_dbg.nix_rq_ctx.id; 1881 all = rvu->rvu_dbg.nix_rq_ctx.all; 1882 break; 1883 1884 default: 1885 return -EINVAL; 1886 } 1887 1888 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) 1889 return -EINVAL; 1890 1891 pfvf = rvu_get_pfvf(rvu, pcifunc); 1892 if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) { 1893 seq_puts(filp, "SQ context is not initialized\n"); 1894 return -EINVAL; 1895 } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) { 1896 seq_puts(filp, "RQ context is not initialized\n"); 1897 return -EINVAL; 1898 } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) { 1899 seq_puts(filp, "CQ context is not initialized\n"); 1900 return -EINVAL; 1901 } 1902 1903 if (ctype == NIX_AQ_CTYPE_SQ) { 1904 max_id = pfvf->sq_ctx->qsize; 1905 ctype_string = "sq"; 1906 print_nix_ctx = print_nix_sq_ctx; 1907 } else if (ctype == NIX_AQ_CTYPE_RQ) { 1908 max_id = pfvf->rq_ctx->qsize; 1909 ctype_string = "rq"; 1910 print_nix_ctx = print_nix_rq_ctx; 1911 } else if (ctype == NIX_AQ_CTYPE_CQ) { 1912 max_id = pfvf->cq_ctx->qsize; 1913 ctype_string = "cq"; 1914 print_nix_ctx = print_nix_cq_ctx; 1915 } 1916 1917 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); 1918 aq_req.hdr.pcifunc = pcifunc; 1919 aq_req.ctype = ctype; 1920 aq_req.op = NIX_AQ_INSTOP_READ; 1921 if (all) 1922 id = 0; 1923 else 1924 max_id = id + 1; 1925 for (qidx = id; qidx < max_id; qidx++) { 1926 aq_req.qidx = qidx; 1927 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n", 1928 ctype_string, nixlf, aq_req.qidx); 1929 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp); 1930 if (rc) { 1931 seq_puts(filp, "Failed to read the context\n"); 1932 return -EINVAL; 1933 } 1934 print_nix_ctx(filp, &rsp); 1935 } 1936 return 0; 1937 } 1938 1939 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf, 1940 int id, int ctype, char *ctype_string, 1941 struct seq_file *m) 1942 { 1943 struct nix_hw *nix_hw = m->private; 1944 struct rvu_pfvf *pfvf; 1945 int max_id = 0; 1946 u16 pcifunc; 1947 1948 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) 1949 return -EINVAL; 1950 1951 pfvf = rvu_get_pfvf(rvu, pcifunc); 1952 1953 if (ctype == NIX_AQ_CTYPE_SQ) { 1954 if (!pfvf->sq_ctx) { 1955 dev_warn(rvu->dev, "SQ context is not initialized\n"); 1956 return -EINVAL; 1957 } 1958 max_id = pfvf->sq_ctx->qsize; 1959 } else if (ctype == NIX_AQ_CTYPE_RQ) { 1960 if (!pfvf->rq_ctx) { 1961 dev_warn(rvu->dev, "RQ context is not initialized\n"); 1962 return -EINVAL; 1963 } 1964 max_id = pfvf->rq_ctx->qsize; 1965 } else if (ctype == NIX_AQ_CTYPE_CQ) { 1966 if (!pfvf->cq_ctx) { 1967 dev_warn(rvu->dev, "CQ context is not initialized\n"); 1968 return -EINVAL; 1969 } 1970 max_id = pfvf->cq_ctx->qsize; 1971 } 1972 1973 if (id < 0 || id >= max_id) { 1974 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n", 1975 ctype_string, max_id - 1); 1976 return -EINVAL; 1977 } 1978 switch (ctype) { 1979 case NIX_AQ_CTYPE_CQ: 1980 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf; 1981 rvu->rvu_dbg.nix_cq_ctx.id = id; 1982 rvu->rvu_dbg.nix_cq_ctx.all = all; 1983 break; 1984 1985 case NIX_AQ_CTYPE_SQ: 1986 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf; 1987 rvu->rvu_dbg.nix_sq_ctx.id = id; 1988 rvu->rvu_dbg.nix_sq_ctx.all = all; 1989 break; 1990 1991 case NIX_AQ_CTYPE_RQ: 1992 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf; 1993 rvu->rvu_dbg.nix_rq_ctx.id = id; 1994 rvu->rvu_dbg.nix_rq_ctx.all = all; 1995 break; 1996 default: 1997 return -EINVAL; 1998 } 1999 return 0; 2000 } 2001 2002 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp, 2003 const char __user *buffer, 2004 size_t count, loff_t *ppos, 2005 int ctype) 2006 { 2007 struct seq_file *m = filp->private_data; 2008 struct nix_hw *nix_hw = m->private; 2009 struct rvu *rvu = nix_hw->rvu; 2010 char *cmd_buf, *ctype_string; 2011 int nixlf, id = 0, ret; 2012 bool all = false; 2013 2014 if ((*ppos != 0) || !count) 2015 return -EINVAL; 2016 2017 switch (ctype) { 2018 case NIX_AQ_CTYPE_SQ: 2019 ctype_string = "sq"; 2020 break; 2021 case NIX_AQ_CTYPE_RQ: 2022 ctype_string = "rq"; 2023 break; 2024 case NIX_AQ_CTYPE_CQ: 2025 ctype_string = "cq"; 2026 break; 2027 default: 2028 return -EINVAL; 2029 } 2030 2031 cmd_buf = kzalloc(count + 1, GFP_KERNEL); 2032 2033 if (!cmd_buf) 2034 return count; 2035 2036 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer, 2037 &nixlf, &id, &all); 2038 if (ret < 0) { 2039 dev_info(rvu->dev, 2040 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n", 2041 ctype_string, ctype_string); 2042 goto done; 2043 } else { 2044 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype, 2045 ctype_string, m); 2046 } 2047 done: 2048 kfree(cmd_buf); 2049 return ret ? ret : count; 2050 } 2051 2052 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp, 2053 const char __user *buffer, 2054 size_t count, loff_t *ppos) 2055 { 2056 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos, 2057 NIX_AQ_CTYPE_SQ); 2058 } 2059 2060 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused) 2061 { 2062 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ); 2063 } 2064 2065 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write); 2066 2067 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp, 2068 const char __user *buffer, 2069 size_t count, loff_t *ppos) 2070 { 2071 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos, 2072 NIX_AQ_CTYPE_RQ); 2073 } 2074 2075 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused) 2076 { 2077 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ); 2078 } 2079 2080 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write); 2081 2082 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp, 2083 const char __user *buffer, 2084 size_t count, loff_t *ppos) 2085 { 2086 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos, 2087 NIX_AQ_CTYPE_CQ); 2088 } 2089 2090 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused) 2091 { 2092 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ); 2093 } 2094 2095 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write); 2096 2097 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize, 2098 unsigned long *bmap, char *qtype) 2099 { 2100 char *buf; 2101 2102 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 2103 if (!buf) 2104 return; 2105 2106 bitmap_print_to_pagebuf(false, buf, bmap, qsize); 2107 seq_printf(filp, "%s context count : %d\n", qtype, qsize); 2108 seq_printf(filp, "%s context ena/dis bitmap : %s\n", 2109 qtype, buf); 2110 kfree(buf); 2111 } 2112 2113 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf) 2114 { 2115 if (!pfvf->cq_ctx) 2116 seq_puts(filp, "cq context is not initialized\n"); 2117 else 2118 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap, 2119 "cq"); 2120 2121 if (!pfvf->rq_ctx) 2122 seq_puts(filp, "rq context is not initialized\n"); 2123 else 2124 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap, 2125 "rq"); 2126 2127 if (!pfvf->sq_ctx) 2128 seq_puts(filp, "sq context is not initialized\n"); 2129 else 2130 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap, 2131 "sq"); 2132 } 2133 2134 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp, 2135 const char __user *buffer, 2136 size_t count, loff_t *ppos) 2137 { 2138 return rvu_dbg_qsize_write(filp, buffer, count, ppos, 2139 BLKTYPE_NIX); 2140 } 2141 2142 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused) 2143 { 2144 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX); 2145 } 2146 2147 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write); 2148 2149 static void print_band_prof_ctx(struct seq_file *m, 2150 struct nix_bandprof_s *prof) 2151 { 2152 char *str; 2153 2154 switch (prof->pc_mode) { 2155 case NIX_RX_PC_MODE_VLAN: 2156 str = "VLAN"; 2157 break; 2158 case NIX_RX_PC_MODE_DSCP: 2159 str = "DSCP"; 2160 break; 2161 case NIX_RX_PC_MODE_GEN: 2162 str = "Generic"; 2163 break; 2164 case NIX_RX_PC_MODE_RSVD: 2165 str = "Reserved"; 2166 break; 2167 } 2168 seq_printf(m, "W0: pc_mode\t\t%s\n", str); 2169 str = (prof->icolor == 3) ? "Color blind" : 2170 (prof->icolor == 0) ? "Green" : 2171 (prof->icolor == 1) ? "Yellow" : "Red"; 2172 seq_printf(m, "W0: icolor\t\t%s\n", str); 2173 seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena); 2174 seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent); 2175 seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent); 2176 seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent); 2177 seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent); 2178 seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa); 2179 seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa); 2180 seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa); 2181 2182 seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa); 2183 str = (prof->lmode == 0) ? "byte" : "packet"; 2184 seq_printf(m, "W1: lmode\t\t%s\n", str); 2185 seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect); 2186 seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv); 2187 seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent); 2188 seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa); 2189 str = (prof->gc_action == 0) ? "PASS" : 2190 (prof->gc_action == 1) ? "DROP" : "RED"; 2191 seq_printf(m, "W1: gc_action\t\t%s\n", str); 2192 str = (prof->yc_action == 0) ? "PASS" : 2193 (prof->yc_action == 1) ? "DROP" : "RED"; 2194 seq_printf(m, "W1: yc_action\t\t%s\n", str); 2195 str = (prof->rc_action == 0) ? "PASS" : 2196 (prof->rc_action == 1) ? "DROP" : "RED"; 2197 seq_printf(m, "W1: rc_action\t\t%s\n", str); 2198 seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo); 2199 seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id); 2200 seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en); 2201 2202 seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts); 2203 seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum); 2204 seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum); 2205 seq_printf(m, "W4: green_pkt_pass\t%lld\n", 2206 (u64)prof->green_pkt_pass); 2207 seq_printf(m, "W5: yellow_pkt_pass\t%lld\n", 2208 (u64)prof->yellow_pkt_pass); 2209 seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass); 2210 seq_printf(m, "W7: green_octs_pass\t%lld\n", 2211 (u64)prof->green_octs_pass); 2212 seq_printf(m, "W8: yellow_octs_pass\t%lld\n", 2213 (u64)prof->yellow_octs_pass); 2214 seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass); 2215 seq_printf(m, "W10: green_pkt_drop\t%lld\n", 2216 (u64)prof->green_pkt_drop); 2217 seq_printf(m, "W11: yellow_pkt_drop\t%lld\n", 2218 (u64)prof->yellow_pkt_drop); 2219 seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop); 2220 seq_printf(m, "W13: green_octs_drop\t%lld\n", 2221 (u64)prof->green_octs_drop); 2222 seq_printf(m, "W14: yellow_octs_drop\t%lld\n", 2223 (u64)prof->yellow_octs_drop); 2224 seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop); 2225 seq_puts(m, "==============================\n"); 2226 } 2227 2228 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused) 2229 { 2230 struct nix_hw *nix_hw = m->private; 2231 struct nix_cn10k_aq_enq_req aq_req; 2232 struct nix_cn10k_aq_enq_rsp aq_rsp; 2233 struct rvu *rvu = nix_hw->rvu; 2234 struct nix_ipolicer *ipolicer; 2235 int layer, prof_idx, idx, rc; 2236 u16 pcifunc; 2237 char *str; 2238 2239 /* Ingress policers do not exist on all platforms */ 2240 if (!nix_hw->ipolicer) 2241 return 0; 2242 2243 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 2244 if (layer == BAND_PROF_INVAL_LAYER) 2245 continue; 2246 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" : 2247 (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top"; 2248 2249 seq_printf(m, "\n%s bandwidth profiles\n", str); 2250 seq_puts(m, "=======================\n"); 2251 2252 ipolicer = &nix_hw->ipolicer[layer]; 2253 2254 for (idx = 0; idx < ipolicer->band_prof.max; idx++) { 2255 if (is_rsrc_free(&ipolicer->band_prof, idx)) 2256 continue; 2257 2258 prof_idx = (idx & 0x3FFF) | (layer << 14); 2259 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 2260 0x00, NIX_AQ_CTYPE_BANDPROF, 2261 prof_idx); 2262 if (rc) { 2263 dev_err(rvu->dev, 2264 "%s: Failed to fetch context of %s profile %d, err %d\n", 2265 __func__, str, idx, rc); 2266 return 0; 2267 } 2268 seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx); 2269 pcifunc = ipolicer->pfvf_map[idx]; 2270 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) 2271 seq_printf(m, "Allocated to :: PF %d\n", 2272 rvu_get_pf(pcifunc)); 2273 else 2274 seq_printf(m, "Allocated to :: PF %d VF %d\n", 2275 rvu_get_pf(pcifunc), 2276 (pcifunc & RVU_PFVF_FUNC_MASK) - 1); 2277 print_band_prof_ctx(m, &aq_rsp.prof); 2278 } 2279 } 2280 return 0; 2281 } 2282 2283 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL); 2284 2285 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused) 2286 { 2287 struct nix_hw *nix_hw = m->private; 2288 struct nix_ipolicer *ipolicer; 2289 int layer; 2290 char *str; 2291 2292 /* Ingress policers do not exist on all platforms */ 2293 if (!nix_hw->ipolicer) 2294 return 0; 2295 2296 seq_puts(m, "\nBandwidth profile resource free count\n"); 2297 seq_puts(m, "=====================================\n"); 2298 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 2299 if (layer == BAND_PROF_INVAL_LAYER) 2300 continue; 2301 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" : 2302 (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top "; 2303 2304 ipolicer = &nix_hw->ipolicer[layer]; 2305 seq_printf(m, "%s :: Max: %4d Free: %4d\n", str, 2306 ipolicer->band_prof.max, 2307 rvu_rsrc_free_count(&ipolicer->band_prof)); 2308 } 2309 seq_puts(m, "=====================================\n"); 2310 2311 return 0; 2312 } 2313 2314 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL); 2315 2316 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr) 2317 { 2318 struct nix_hw *nix_hw; 2319 2320 if (!is_block_implemented(rvu->hw, blkaddr)) 2321 return; 2322 2323 if (blkaddr == BLKADDR_NIX0) { 2324 rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root); 2325 nix_hw = &rvu->hw->nix[0]; 2326 } else { 2327 rvu->rvu_dbg.nix = debugfs_create_dir("nix1", 2328 rvu->rvu_dbg.root); 2329 nix_hw = &rvu->hw->nix[1]; 2330 } 2331 2332 debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, 2333 &rvu_dbg_nix_sq_ctx_fops); 2334 debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, 2335 &rvu_dbg_nix_rq_ctx_fops); 2336 debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, 2337 &rvu_dbg_nix_cq_ctx_fops); 2338 debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw, 2339 &rvu_dbg_nix_ndc_tx_cache_fops); 2340 debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw, 2341 &rvu_dbg_nix_ndc_rx_cache_fops); 2342 debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw, 2343 &rvu_dbg_nix_ndc_tx_hits_miss_fops); 2344 debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw, 2345 &rvu_dbg_nix_ndc_rx_hits_miss_fops); 2346 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu, 2347 &rvu_dbg_nix_qsize_fops); 2348 debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, 2349 &rvu_dbg_nix_band_prof_ctx_fops); 2350 debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw, 2351 &rvu_dbg_nix_band_prof_rsrc_fops); 2352 } 2353 2354 static void rvu_dbg_npa_init(struct rvu *rvu) 2355 { 2356 rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root); 2357 2358 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu, 2359 &rvu_dbg_npa_qsize_fops); 2360 debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu, 2361 &rvu_dbg_npa_aura_ctx_fops); 2362 debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu, 2363 &rvu_dbg_npa_pool_ctx_fops); 2364 debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu, 2365 &rvu_dbg_npa_ndc_cache_fops); 2366 debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu, 2367 &rvu_dbg_npa_ndc_hits_miss_fops); 2368 } 2369 2370 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \ 2371 ({ \ 2372 u64 cnt; \ 2373 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \ 2374 NIX_STATS_RX, &(cnt)); \ 2375 if (!err) \ 2376 seq_printf(s, "%s: %llu\n", name, cnt); \ 2377 cnt; \ 2378 }) 2379 2380 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \ 2381 ({ \ 2382 u64 cnt; \ 2383 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \ 2384 NIX_STATS_TX, &(cnt)); \ 2385 if (!err) \ 2386 seq_printf(s, "%s: %llu\n", name, cnt); \ 2387 cnt; \ 2388 }) 2389 2390 static int cgx_print_stats(struct seq_file *s, int lmac_id) 2391 { 2392 struct cgx_link_user_info linfo; 2393 struct mac_ops *mac_ops; 2394 void *cgxd = s->private; 2395 u64 ucast, mcast, bcast; 2396 int stat = 0, err = 0; 2397 u64 tx_stat, rx_stat; 2398 struct rvu *rvu; 2399 2400 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM, 2401 PCI_DEVID_OCTEONTX2_RVU_AF, NULL)); 2402 if (!rvu) 2403 return -ENODEV; 2404 2405 mac_ops = get_mac_ops(cgxd); 2406 /* There can be no CGX devices at all */ 2407 if (!mac_ops) 2408 return 0; 2409 2410 /* Link status */ 2411 seq_puts(s, "\n=======Link Status======\n\n"); 2412 err = cgx_get_link_info(cgxd, lmac_id, &linfo); 2413 if (err) 2414 seq_puts(s, "Failed to read link status\n"); 2415 seq_printf(s, "\nLink is %s %d Mbps\n\n", 2416 linfo.link_up ? "UP" : "DOWN", linfo.speed); 2417 2418 /* Rx stats */ 2419 seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n", 2420 mac_ops->name); 2421 ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames"); 2422 if (err) 2423 return err; 2424 mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames"); 2425 if (err) 2426 return err; 2427 bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames"); 2428 if (err) 2429 return err; 2430 seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast); 2431 PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes"); 2432 if (err) 2433 return err; 2434 PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops"); 2435 if (err) 2436 return err; 2437 PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors"); 2438 if (err) 2439 return err; 2440 2441 /* Tx stats */ 2442 seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n", 2443 mac_ops->name); 2444 ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames"); 2445 if (err) 2446 return err; 2447 mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames"); 2448 if (err) 2449 return err; 2450 bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames"); 2451 if (err) 2452 return err; 2453 seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast); 2454 PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes"); 2455 if (err) 2456 return err; 2457 PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops"); 2458 if (err) 2459 return err; 2460 2461 /* Rx stats */ 2462 seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name); 2463 while (stat < mac_ops->rx_stats_cnt) { 2464 err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat); 2465 if (err) 2466 return err; 2467 if (is_rvu_otx2(rvu)) 2468 seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], 2469 rx_stat); 2470 else 2471 seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat], 2472 rx_stat); 2473 stat++; 2474 } 2475 2476 /* Tx stats */ 2477 stat = 0; 2478 seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name); 2479 while (stat < mac_ops->tx_stats_cnt) { 2480 err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat); 2481 if (err) 2482 return err; 2483 2484 if (is_rvu_otx2(rvu)) 2485 seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], 2486 tx_stat); 2487 else 2488 seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat], 2489 tx_stat); 2490 stat++; 2491 } 2492 2493 return err; 2494 } 2495 2496 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id) 2497 { 2498 struct dentry *current_dir; 2499 char *buf; 2500 2501 current_dir = filp->file->f_path.dentry->d_parent; 2502 buf = strrchr(current_dir->d_name.name, 'c'); 2503 if (!buf) 2504 return -EINVAL; 2505 2506 return kstrtoint(buf + 1, 10, lmac_id); 2507 } 2508 2509 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) 2510 { 2511 int lmac_id, err; 2512 2513 err = rvu_dbg_derive_lmacid(filp, &lmac_id); 2514 if (!err) 2515 return cgx_print_stats(filp, lmac_id); 2516 2517 return err; 2518 } 2519 2520 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL); 2521 2522 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id) 2523 { 2524 struct pci_dev *pdev = NULL; 2525 void *cgxd = s->private; 2526 char *bcast, *mcast; 2527 u16 index, domain; 2528 u8 dmac[ETH_ALEN]; 2529 struct rvu *rvu; 2530 u64 cfg, mac; 2531 int pf; 2532 2533 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM, 2534 PCI_DEVID_OCTEONTX2_RVU_AF, NULL)); 2535 if (!rvu) 2536 return -ENODEV; 2537 2538 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id); 2539 domain = 2; 2540 2541 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0); 2542 if (!pdev) 2543 return 0; 2544 2545 cfg = cgx_read_dmac_ctrl(cgxd, lmac_id); 2546 bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT"; 2547 mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT"; 2548 2549 seq_puts(s, 2550 "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n"); 2551 seq_printf(s, "%s PF%d %9s %9s", 2552 dev_name(&pdev->dev), pf, bcast, mcast); 2553 if (cfg & CGX_DMAC_CAM_ACCEPT) 2554 seq_printf(s, "%12s\n\n", "UNICAST"); 2555 else 2556 seq_printf(s, "%16s\n\n", "PROMISCUOUS"); 2557 2558 seq_puts(s, "\nDMAC-INDEX ADDRESS\n"); 2559 2560 for (index = 0 ; index < 32 ; index++) { 2561 cfg = cgx_read_dmac_entry(cgxd, index); 2562 /* Display enabled dmac entries associated with current lmac */ 2563 if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) && 2564 FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) { 2565 mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg); 2566 u64_to_ether_addr(mac, dmac); 2567 seq_printf(s, "%7d %pM\n", index, dmac); 2568 } 2569 } 2570 2571 pci_dev_put(pdev); 2572 return 0; 2573 } 2574 2575 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused) 2576 { 2577 int err, lmac_id; 2578 2579 err = rvu_dbg_derive_lmacid(filp, &lmac_id); 2580 if (!err) 2581 return cgx_print_dmac_flt(filp, lmac_id); 2582 2583 return err; 2584 } 2585 2586 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL); 2587 2588 static void rvu_dbg_cgx_init(struct rvu *rvu) 2589 { 2590 struct mac_ops *mac_ops; 2591 unsigned long lmac_bmap; 2592 int i, lmac_id; 2593 char dname[20]; 2594 void *cgx; 2595 2596 if (!cgx_get_cgxcnt_max()) 2597 return; 2598 2599 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); 2600 if (!mac_ops) 2601 return; 2602 2603 rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name, 2604 rvu->rvu_dbg.root); 2605 2606 for (i = 0; i < cgx_get_cgxcnt_max(); i++) { 2607 cgx = rvu_cgx_pdata(i, rvu); 2608 if (!cgx) 2609 continue; 2610 lmac_bmap = cgx_get_lmac_bmap(cgx); 2611 /* cgx debugfs dir */ 2612 sprintf(dname, "%s%d", mac_ops->name, i); 2613 rvu->rvu_dbg.cgx = debugfs_create_dir(dname, 2614 rvu->rvu_dbg.cgx_root); 2615 2616 for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) { 2617 /* lmac debugfs dir */ 2618 sprintf(dname, "lmac%d", lmac_id); 2619 rvu->rvu_dbg.lmac = 2620 debugfs_create_dir(dname, rvu->rvu_dbg.cgx); 2621 2622 debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac, 2623 cgx, &rvu_dbg_cgx_stat_fops); 2624 debugfs_create_file("mac_filter", 0600, 2625 rvu->rvu_dbg.lmac, cgx, 2626 &rvu_dbg_cgx_dmac_flt_fops); 2627 } 2628 } 2629 } 2630 2631 /* NPC debugfs APIs */ 2632 static void rvu_print_npc_mcam_info(struct seq_file *s, 2633 u16 pcifunc, int blkaddr) 2634 { 2635 struct rvu *rvu = s->private; 2636 int entry_acnt, entry_ecnt; 2637 int cntr_acnt, cntr_ecnt; 2638 2639 rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr, 2640 &entry_acnt, &entry_ecnt); 2641 rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr, 2642 &cntr_acnt, &cntr_ecnt); 2643 if (!entry_acnt && !cntr_acnt) 2644 return; 2645 2646 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) 2647 seq_printf(s, "\n\t\t Device \t\t: PF%d\n", 2648 rvu_get_pf(pcifunc)); 2649 else 2650 seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n", 2651 rvu_get_pf(pcifunc), 2652 (pcifunc & RVU_PFVF_FUNC_MASK) - 1); 2653 2654 if (entry_acnt) { 2655 seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt); 2656 seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt); 2657 } 2658 if (cntr_acnt) { 2659 seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt); 2660 seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt); 2661 } 2662 } 2663 2664 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued) 2665 { 2666 struct rvu *rvu = filp->private; 2667 int pf, vf, numvfs, blkaddr; 2668 struct npc_mcam *mcam; 2669 u16 pcifunc, counters; 2670 u64 cfg; 2671 2672 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2673 if (blkaddr < 0) 2674 return -ENODEV; 2675 2676 mcam = &rvu->hw->mcam; 2677 counters = rvu->hw->npc_counters; 2678 2679 seq_puts(filp, "\nNPC MCAM info:\n"); 2680 /* MCAM keywidth on receive and transmit sides */ 2681 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX)); 2682 cfg = (cfg >> 32) & 0x07; 2683 seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ? 2684 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ? 2685 "224bits" : "448bits")); 2686 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX)); 2687 cfg = (cfg >> 32) & 0x07; 2688 seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ? 2689 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ? 2690 "224bits" : "448bits")); 2691 2692 mutex_lock(&mcam->lock); 2693 /* MCAM entries */ 2694 seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries); 2695 seq_printf(filp, "\t\t Reserved \t: %d\n", 2696 mcam->total_entries - mcam->bmap_entries); 2697 seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt); 2698 2699 /* MCAM counters */ 2700 seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters); 2701 seq_printf(filp, "\t\t Reserved \t: %d\n", 2702 counters - mcam->counters.max); 2703 seq_printf(filp, "\t\t Available \t: %d\n", 2704 rvu_rsrc_free_count(&mcam->counters)); 2705 2706 if (mcam->bmap_entries == mcam->bmap_fcnt) { 2707 mutex_unlock(&mcam->lock); 2708 return 0; 2709 } 2710 2711 seq_puts(filp, "\n\t\t Current allocation\n"); 2712 seq_puts(filp, "\t\t====================\n"); 2713 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 2714 pcifunc = (pf << RVU_PFVF_PF_SHIFT); 2715 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr); 2716 2717 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 2718 numvfs = (cfg >> 12) & 0xFF; 2719 for (vf = 0; vf < numvfs; vf++) { 2720 pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1); 2721 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr); 2722 } 2723 } 2724 2725 mutex_unlock(&mcam->lock); 2726 return 0; 2727 } 2728 2729 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL); 2730 2731 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp, 2732 void *unused) 2733 { 2734 struct rvu *rvu = filp->private; 2735 struct npc_mcam *mcam; 2736 int blkaddr; 2737 2738 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2739 if (blkaddr < 0) 2740 return -ENODEV; 2741 2742 mcam = &rvu->hw->mcam; 2743 2744 seq_puts(filp, "\nNPC MCAM RX miss action stats\n"); 2745 seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr, 2746 rvu_read64(rvu, blkaddr, 2747 NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr))); 2748 2749 return 0; 2750 } 2751 2752 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL); 2753 2754 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, 2755 struct rvu_npc_mcam_rule *rule) 2756 { 2757 u8 bit; 2758 2759 for_each_set_bit(bit, (unsigned long *)&rule->features, 64) { 2760 seq_printf(s, "\t%s ", npc_get_field_name(bit)); 2761 switch (bit) { 2762 case NPC_LXMB: 2763 if (rule->lxmb == 1) 2764 seq_puts(s, "\tL2M nibble is set\n"); 2765 else 2766 seq_puts(s, "\tL2B nibble is set\n"); 2767 break; 2768 case NPC_DMAC: 2769 seq_printf(s, "%pM ", rule->packet.dmac); 2770 seq_printf(s, "mask %pM\n", rule->mask.dmac); 2771 break; 2772 case NPC_SMAC: 2773 seq_printf(s, "%pM ", rule->packet.smac); 2774 seq_printf(s, "mask %pM\n", rule->mask.smac); 2775 break; 2776 case NPC_ETYPE: 2777 seq_printf(s, "0x%x ", ntohs(rule->packet.etype)); 2778 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype)); 2779 break; 2780 case NPC_OUTER_VID: 2781 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci)); 2782 seq_printf(s, "mask 0x%x\n", 2783 ntohs(rule->mask.vlan_tci)); 2784 break; 2785 case NPC_TOS: 2786 seq_printf(s, "%d ", rule->packet.tos); 2787 seq_printf(s, "mask 0x%x\n", rule->mask.tos); 2788 break; 2789 case NPC_SIP_IPV4: 2790 seq_printf(s, "%pI4 ", &rule->packet.ip4src); 2791 seq_printf(s, "mask %pI4\n", &rule->mask.ip4src); 2792 break; 2793 case NPC_DIP_IPV4: 2794 seq_printf(s, "%pI4 ", &rule->packet.ip4dst); 2795 seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst); 2796 break; 2797 case NPC_SIP_IPV6: 2798 seq_printf(s, "%pI6 ", rule->packet.ip6src); 2799 seq_printf(s, "mask %pI6\n", rule->mask.ip6src); 2800 break; 2801 case NPC_DIP_IPV6: 2802 seq_printf(s, "%pI6 ", rule->packet.ip6dst); 2803 seq_printf(s, "mask %pI6\n", rule->mask.ip6dst); 2804 break; 2805 case NPC_IPFRAG_IPV6: 2806 seq_printf(s, "0x%x ", rule->packet.next_header); 2807 seq_printf(s, "mask 0x%x\n", rule->mask.next_header); 2808 break; 2809 case NPC_IPFRAG_IPV4: 2810 seq_printf(s, "0x%x ", rule->packet.ip_flag); 2811 seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag); 2812 break; 2813 case NPC_SPORT_TCP: 2814 case NPC_SPORT_UDP: 2815 case NPC_SPORT_SCTP: 2816 seq_printf(s, "%d ", ntohs(rule->packet.sport)); 2817 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport)); 2818 break; 2819 case NPC_DPORT_TCP: 2820 case NPC_DPORT_UDP: 2821 case NPC_DPORT_SCTP: 2822 seq_printf(s, "%d ", ntohs(rule->packet.dport)); 2823 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport)); 2824 break; 2825 default: 2826 seq_puts(s, "\n"); 2827 break; 2828 } 2829 } 2830 } 2831 2832 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s, 2833 struct rvu_npc_mcam_rule *rule) 2834 { 2835 if (is_npc_intf_tx(rule->intf)) { 2836 switch (rule->tx_action.op) { 2837 case NIX_TX_ACTIONOP_DROP: 2838 seq_puts(s, "\taction: Drop\n"); 2839 break; 2840 case NIX_TX_ACTIONOP_UCAST_DEFAULT: 2841 seq_puts(s, "\taction: Unicast to default channel\n"); 2842 break; 2843 case NIX_TX_ACTIONOP_UCAST_CHAN: 2844 seq_printf(s, "\taction: Unicast to channel %d\n", 2845 rule->tx_action.index); 2846 break; 2847 case NIX_TX_ACTIONOP_MCAST: 2848 seq_puts(s, "\taction: Multicast\n"); 2849 break; 2850 case NIX_TX_ACTIONOP_DROP_VIOL: 2851 seq_puts(s, "\taction: Lockdown Violation Drop\n"); 2852 break; 2853 default: 2854 break; 2855 } 2856 } else { 2857 switch (rule->rx_action.op) { 2858 case NIX_RX_ACTIONOP_DROP: 2859 seq_puts(s, "\taction: Drop\n"); 2860 break; 2861 case NIX_RX_ACTIONOP_UCAST: 2862 seq_printf(s, "\taction: Direct to queue %d\n", 2863 rule->rx_action.index); 2864 break; 2865 case NIX_RX_ACTIONOP_RSS: 2866 seq_puts(s, "\taction: RSS\n"); 2867 break; 2868 case NIX_RX_ACTIONOP_UCAST_IPSEC: 2869 seq_puts(s, "\taction: Unicast ipsec\n"); 2870 break; 2871 case NIX_RX_ACTIONOP_MCAST: 2872 seq_puts(s, "\taction: Multicast\n"); 2873 break; 2874 default: 2875 break; 2876 } 2877 } 2878 } 2879 2880 static const char *rvu_dbg_get_intf_name(int intf) 2881 { 2882 switch (intf) { 2883 case NIX_INTFX_RX(0): 2884 return "NIX0_RX"; 2885 case NIX_INTFX_RX(1): 2886 return "NIX1_RX"; 2887 case NIX_INTFX_TX(0): 2888 return "NIX0_TX"; 2889 case NIX_INTFX_TX(1): 2890 return "NIX1_TX"; 2891 default: 2892 break; 2893 } 2894 2895 return "unknown"; 2896 } 2897 2898 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused) 2899 { 2900 struct rvu_npc_mcam_rule *iter; 2901 struct rvu *rvu = s->private; 2902 struct npc_mcam *mcam; 2903 int pf, vf = -1; 2904 bool enabled; 2905 int blkaddr; 2906 u16 target; 2907 u64 hits; 2908 2909 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2910 if (blkaddr < 0) 2911 return 0; 2912 2913 mcam = &rvu->hw->mcam; 2914 2915 mutex_lock(&mcam->lock); 2916 list_for_each_entry(iter, &mcam->mcam_rules, list) { 2917 pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 2918 seq_printf(s, "\n\tInstalled by: PF%d ", pf); 2919 2920 if (iter->owner & RVU_PFVF_FUNC_MASK) { 2921 vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1; 2922 seq_printf(s, "VF%d", vf); 2923 } 2924 seq_puts(s, "\n"); 2925 2926 seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ? 2927 "RX" : "TX"); 2928 seq_printf(s, "\tinterface: %s\n", 2929 rvu_dbg_get_intf_name(iter->intf)); 2930 seq_printf(s, "\tmcam entry: %d\n", iter->entry); 2931 2932 rvu_dbg_npc_mcam_show_flows(s, iter); 2933 if (is_npc_intf_rx(iter->intf)) { 2934 target = iter->rx_action.pf_func; 2935 pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 2936 seq_printf(s, "\tForward to: PF%d ", pf); 2937 2938 if (target & RVU_PFVF_FUNC_MASK) { 2939 vf = (target & RVU_PFVF_FUNC_MASK) - 1; 2940 seq_printf(s, "VF%d", vf); 2941 } 2942 seq_puts(s, "\n"); 2943 seq_printf(s, "\tchannel: 0x%x\n", iter->chan); 2944 seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask); 2945 } 2946 2947 rvu_dbg_npc_mcam_show_action(s, iter); 2948 2949 enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry); 2950 seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no"); 2951 2952 if (!iter->has_cntr) 2953 continue; 2954 seq_printf(s, "\tcounter: %d\n", iter->cntr); 2955 2956 hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr)); 2957 seq_printf(s, "\thits: %lld\n", hits); 2958 } 2959 mutex_unlock(&mcam->lock); 2960 2961 return 0; 2962 } 2963 2964 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL); 2965 2966 static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused) 2967 { 2968 struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 }; 2969 struct npc_exact_table_entry *cam_entry; 2970 struct npc_exact_table *table; 2971 struct rvu *rvu = s->private; 2972 int i, j; 2973 2974 u8 bitmap = 0; 2975 2976 table = rvu->hw->table; 2977 2978 mutex_lock(&table->lock); 2979 2980 /* Check if there is at least one entry in mem table */ 2981 if (!table->mem_tbl_entry_cnt) 2982 goto dump_cam_table; 2983 2984 /* Print table headers */ 2985 seq_puts(s, "\n\tExact Match MEM Table\n"); 2986 seq_puts(s, "Index\t"); 2987 2988 for (i = 0; i < table->mem_table.ways; i++) { 2989 mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i], 2990 struct npc_exact_table_entry, list); 2991 2992 seq_printf(s, "Way-%d\t\t\t\t\t", i); 2993 } 2994 2995 seq_puts(s, "\n"); 2996 for (i = 0; i < table->mem_table.ways; i++) 2997 seq_puts(s, "\tChan MAC \t"); 2998 2999 seq_puts(s, "\n\n"); 3000 3001 /* Print mem table entries */ 3002 for (i = 0; i < table->mem_table.depth; i++) { 3003 bitmap = 0; 3004 for (j = 0; j < table->mem_table.ways; j++) { 3005 if (!mem_entry[j]) 3006 continue; 3007 3008 if (mem_entry[j]->index != i) 3009 continue; 3010 3011 bitmap |= BIT(j); 3012 } 3013 3014 /* No valid entries */ 3015 if (!bitmap) 3016 continue; 3017 3018 seq_printf(s, "%d\t", i); 3019 for (j = 0; j < table->mem_table.ways; j++) { 3020 if (!(bitmap & BIT(j))) { 3021 seq_puts(s, "nil\t\t\t\t\t"); 3022 continue; 3023 } 3024 3025 seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan, 3026 mem_entry[j]->mac); 3027 mem_entry[j] = list_next_entry(mem_entry[j], list); 3028 } 3029 seq_puts(s, "\n"); 3030 } 3031 3032 dump_cam_table: 3033 3034 if (!table->cam_tbl_entry_cnt) 3035 goto done; 3036 3037 seq_puts(s, "\n\tExact Match CAM Table\n"); 3038 seq_puts(s, "index\tchan\tMAC\n"); 3039 3040 /* Traverse cam table entries */ 3041 list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) { 3042 seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan, 3043 cam_entry->mac); 3044 } 3045 3046 done: 3047 mutex_unlock(&table->lock); 3048 return 0; 3049 } 3050 3051 RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL); 3052 3053 static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused) 3054 { 3055 struct npc_exact_table *table; 3056 struct rvu *rvu = s->private; 3057 int i; 3058 3059 table = rvu->hw->table; 3060 3061 seq_puts(s, "\n\tExact Table Info\n"); 3062 seq_printf(s, "Exact Match Feature : %s\n", 3063 rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable"); 3064 if (!rvu->hw->cap.npc_exact_match_enabled) 3065 return 0; 3066 3067 seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n"); 3068 for (i = 0; i < table->num_drop_rules; i++) 3069 seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]); 3070 3071 seq_puts(s, "\nMcam Index\tPromisc Mode Status\n"); 3072 for (i = 0; i < table->num_drop_rules; i++) 3073 seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off"); 3074 3075 seq_puts(s, "\n\tMEM Table Info\n"); 3076 seq_printf(s, "Ways : %d\n", table->mem_table.ways); 3077 seq_printf(s, "Depth : %d\n", table->mem_table.depth); 3078 seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask); 3079 seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask); 3080 seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset); 3081 3082 seq_puts(s, "\n\tCAM Table Info\n"); 3083 seq_printf(s, "Depth : %d\n", table->cam_table.depth); 3084 3085 return 0; 3086 } 3087 3088 RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL); 3089 3090 static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused) 3091 { 3092 struct npc_exact_table *table; 3093 struct rvu *rvu = s->private; 3094 struct npc_key_field *field; 3095 u16 chan, pcifunc; 3096 int blkaddr, i; 3097 u64 cfg, cam1; 3098 char *str; 3099 3100 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3101 table = rvu->hw->table; 3102 3103 field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN]; 3104 3105 seq_puts(s, "\n\t Exact Hit on drop status\n"); 3106 seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n"); 3107 3108 for (i = 0; i < table->num_drop_rules; i++) { 3109 pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i); 3110 cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0)); 3111 3112 /* channel will be always in keyword 0 */ 3113 cam1 = rvu_read64(rvu, blkaddr, 3114 NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1)); 3115 chan = field->kw_mask[0] & cam1; 3116 3117 str = (cfg & 1) ? "enabled" : "disabled"; 3118 3119 seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i, 3120 rvu_read64(rvu, blkaddr, 3121 NPC_AF_MATCH_STATX(table->counter_idx[i])), 3122 chan, str); 3123 } 3124 3125 return 0; 3126 } 3127 3128 RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL); 3129 3130 static void rvu_dbg_npc_init(struct rvu *rvu) 3131 { 3132 rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root); 3133 3134 debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu, 3135 &rvu_dbg_npc_mcam_info_fops); 3136 debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu, 3137 &rvu_dbg_npc_mcam_rules_fops); 3138 3139 debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu, 3140 &rvu_dbg_npc_rx_miss_act_fops); 3141 3142 if (!rvu->hw->cap.npc_exact_match_enabled) 3143 return; 3144 3145 debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu, 3146 &rvu_dbg_npc_exact_entries_fops); 3147 3148 debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu, 3149 &rvu_dbg_npc_exact_info_fops); 3150 3151 debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu, 3152 &rvu_dbg_npc_exact_drop_cnt_fops); 3153 3154 } 3155 3156 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type) 3157 { 3158 struct cpt_ctx *ctx = filp->private; 3159 u64 busy_sts = 0, free_sts = 0; 3160 u32 e_min = 0, e_max = 0, e, i; 3161 u16 max_ses, max_ies, max_aes; 3162 struct rvu *rvu = ctx->rvu; 3163 int blkaddr = ctx->blkaddr; 3164 u64 reg; 3165 3166 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1); 3167 max_ses = reg & 0xffff; 3168 max_ies = (reg >> 16) & 0xffff; 3169 max_aes = (reg >> 32) & 0xffff; 3170 3171 switch (eng_type) { 3172 case CPT_AE_TYPE: 3173 e_min = max_ses + max_ies; 3174 e_max = max_ses + max_ies + max_aes; 3175 break; 3176 case CPT_SE_TYPE: 3177 e_min = 0; 3178 e_max = max_ses; 3179 break; 3180 case CPT_IE_TYPE: 3181 e_min = max_ses; 3182 e_max = max_ses + max_ies; 3183 break; 3184 default: 3185 return -EINVAL; 3186 } 3187 3188 for (e = e_min, i = 0; e < e_max; e++, i++) { 3189 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e)); 3190 if (reg & 0x1) 3191 busy_sts |= 1ULL << i; 3192 3193 if (reg & 0x2) 3194 free_sts |= 1ULL << i; 3195 } 3196 seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts); 3197 seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts); 3198 3199 return 0; 3200 } 3201 3202 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused) 3203 { 3204 return cpt_eng_sts_display(filp, CPT_AE_TYPE); 3205 } 3206 3207 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL); 3208 3209 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused) 3210 { 3211 return cpt_eng_sts_display(filp, CPT_SE_TYPE); 3212 } 3213 3214 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL); 3215 3216 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused) 3217 { 3218 return cpt_eng_sts_display(filp, CPT_IE_TYPE); 3219 } 3220 3221 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL); 3222 3223 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused) 3224 { 3225 struct cpt_ctx *ctx = filp->private; 3226 u16 max_ses, max_ies, max_aes; 3227 struct rvu *rvu = ctx->rvu; 3228 int blkaddr = ctx->blkaddr; 3229 u32 e_max, e; 3230 u64 reg; 3231 3232 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1); 3233 max_ses = reg & 0xffff; 3234 max_ies = (reg >> 16) & 0xffff; 3235 max_aes = (reg >> 32) & 0xffff; 3236 3237 e_max = max_ses + max_ies + max_aes; 3238 3239 seq_puts(filp, "===========================================\n"); 3240 for (e = 0; e < e_max; e++) { 3241 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e)); 3242 seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e, 3243 reg & 0xff); 3244 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e)); 3245 seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e, 3246 reg); 3247 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e)); 3248 seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e, 3249 reg); 3250 seq_puts(filp, "===========================================\n"); 3251 } 3252 return 0; 3253 } 3254 3255 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL); 3256 3257 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused) 3258 { 3259 struct cpt_ctx *ctx = filp->private; 3260 int blkaddr = ctx->blkaddr; 3261 struct rvu *rvu = ctx->rvu; 3262 struct rvu_block *block; 3263 struct rvu_hwinfo *hw; 3264 u64 reg; 3265 u32 lf; 3266 3267 hw = rvu->hw; 3268 block = &hw->block[blkaddr]; 3269 if (!block->lf.bmap) 3270 return -ENODEV; 3271 3272 seq_puts(filp, "===========================================\n"); 3273 for (lf = 0; lf < block->lf.max; lf++) { 3274 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf)); 3275 seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg); 3276 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf)); 3277 seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg); 3278 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf)); 3279 seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg); 3280 reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg | 3281 (lf << block->lfshift)); 3282 seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg); 3283 seq_puts(filp, "===========================================\n"); 3284 } 3285 return 0; 3286 } 3287 3288 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL); 3289 3290 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused) 3291 { 3292 struct cpt_ctx *ctx = filp->private; 3293 struct rvu *rvu = ctx->rvu; 3294 int blkaddr = ctx->blkaddr; 3295 u64 reg0, reg1; 3296 3297 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0)); 3298 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1)); 3299 seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1); 3300 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0)); 3301 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1)); 3302 seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1); 3303 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0)); 3304 seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0); 3305 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT); 3306 seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0); 3307 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT); 3308 seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0); 3309 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO); 3310 seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0); 3311 3312 return 0; 3313 } 3314 3315 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL); 3316 3317 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused) 3318 { 3319 struct cpt_ctx *ctx = filp->private; 3320 struct rvu *rvu = ctx->rvu; 3321 int blkaddr = ctx->blkaddr; 3322 u64 reg; 3323 3324 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC); 3325 seq_printf(filp, "CPT instruction requests %llu\n", reg); 3326 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC); 3327 seq_printf(filp, "CPT instruction latency %llu\n", reg); 3328 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC); 3329 seq_printf(filp, "CPT NCB read requests %llu\n", reg); 3330 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC); 3331 seq_printf(filp, "CPT NCB read latency %llu\n", reg); 3332 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC); 3333 seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg); 3334 reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC); 3335 seq_printf(filp, "CPT active cycles pc %llu\n", reg); 3336 reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT); 3337 seq_printf(filp, "CPT clock count pc %llu\n", reg); 3338 3339 return 0; 3340 } 3341 3342 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL); 3343 3344 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr) 3345 { 3346 struct cpt_ctx *ctx; 3347 3348 if (!is_block_implemented(rvu->hw, blkaddr)) 3349 return; 3350 3351 if (blkaddr == BLKADDR_CPT0) { 3352 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root); 3353 ctx = &rvu->rvu_dbg.cpt_ctx[0]; 3354 ctx->blkaddr = BLKADDR_CPT0; 3355 ctx->rvu = rvu; 3356 } else { 3357 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1", 3358 rvu->rvu_dbg.root); 3359 ctx = &rvu->rvu_dbg.cpt_ctx[1]; 3360 ctx->blkaddr = BLKADDR_CPT1; 3361 ctx->rvu = rvu; 3362 } 3363 3364 debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx, 3365 &rvu_dbg_cpt_pc_fops); 3366 debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx, 3367 &rvu_dbg_cpt_ae_sts_fops); 3368 debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx, 3369 &rvu_dbg_cpt_se_sts_fops); 3370 debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx, 3371 &rvu_dbg_cpt_ie_sts_fops); 3372 debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx, 3373 &rvu_dbg_cpt_engines_info_fops); 3374 debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx, 3375 &rvu_dbg_cpt_lfs_info_fops); 3376 debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx, 3377 &rvu_dbg_cpt_err_info_fops); 3378 } 3379 3380 static const char *rvu_get_dbg_dir_name(struct rvu *rvu) 3381 { 3382 if (!is_rvu_otx2(rvu)) 3383 return "cn10k"; 3384 else 3385 return "octeontx2"; 3386 } 3387 3388 void rvu_dbg_init(struct rvu *rvu) 3389 { 3390 rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL); 3391 3392 debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu, 3393 &rvu_dbg_rsrc_status_fops); 3394 3395 if (!is_rvu_otx2(rvu)) 3396 debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root, 3397 rvu, &rvu_dbg_lmtst_map_table_fops); 3398 3399 if (!cgx_get_cgxcnt_max()) 3400 goto create; 3401 3402 if (is_rvu_otx2(rvu)) 3403 debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root, 3404 rvu, &rvu_dbg_rvu_pf_cgx_map_fops); 3405 else 3406 debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root, 3407 rvu, &rvu_dbg_rvu_pf_cgx_map_fops); 3408 3409 create: 3410 rvu_dbg_npa_init(rvu); 3411 rvu_dbg_nix_init(rvu, BLKADDR_NIX0); 3412 3413 rvu_dbg_nix_init(rvu, BLKADDR_NIX1); 3414 rvu_dbg_cgx_init(rvu); 3415 rvu_dbg_npc_init(rvu); 3416 rvu_dbg_cpt_init(rvu, BLKADDR_CPT0); 3417 rvu_dbg_cpt_init(rvu, BLKADDR_CPT1); 3418 rvu_dbg_mcs_init(rvu); 3419 } 3420 3421 void rvu_dbg_exit(struct rvu *rvu) 3422 { 3423 debugfs_remove_recursive(rvu->rvu_dbg.root); 3424 } 3425 3426 #endif /* CONFIG_DEBUG_FS */ 3427