1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2019 Marvell. 5 * 6 */ 7 8 #ifdef CONFIG_DEBUG_FS 9 10 #include <linux/fs.h> 11 #include <linux/debugfs.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 15 #include "rvu_struct.h" 16 #include "rvu_reg.h" 17 #include "rvu.h" 18 #include "cgx.h" 19 #include "lmac_common.h" 20 #include "npc.h" 21 #include "rvu_npc_hash.h" 22 #include "mcs.h" 23 24 #define DEBUGFS_DIR_NAME "octeontx2" 25 26 enum { 27 CGX_STAT0, 28 CGX_STAT1, 29 CGX_STAT2, 30 CGX_STAT3, 31 CGX_STAT4, 32 CGX_STAT5, 33 CGX_STAT6, 34 CGX_STAT7, 35 CGX_STAT8, 36 CGX_STAT9, 37 CGX_STAT10, 38 CGX_STAT11, 39 CGX_STAT12, 40 CGX_STAT13, 41 CGX_STAT14, 42 CGX_STAT15, 43 CGX_STAT16, 44 CGX_STAT17, 45 CGX_STAT18, 46 }; 47 48 /* NIX TX stats */ 49 enum nix_stat_lf_tx { 50 TX_UCAST = 0x0, 51 TX_BCAST = 0x1, 52 TX_MCAST = 0x2, 53 TX_DROP = 0x3, 54 TX_OCTS = 0x4, 55 TX_STATS_ENUM_LAST, 56 }; 57 58 /* NIX RX stats */ 59 enum nix_stat_lf_rx { 60 RX_OCTS = 0x0, 61 RX_UCAST = 0x1, 62 RX_BCAST = 0x2, 63 RX_MCAST = 0x3, 64 RX_DROP = 0x4, 65 RX_DROP_OCTS = 0x5, 66 RX_FCS = 0x6, 67 RX_ERR = 0x7, 68 RX_DRP_BCAST = 0x8, 69 RX_DRP_MCAST = 0x9, 70 RX_DRP_L3BCAST = 0xa, 71 RX_DRP_L3MCAST = 0xb, 72 RX_STATS_ENUM_LAST, 73 }; 74 75 static char *cgx_rx_stats_fields[] = { 76 [CGX_STAT0] = "Received packets", 77 [CGX_STAT1] = "Octets of received packets", 78 [CGX_STAT2] = "Received PAUSE packets", 79 [CGX_STAT3] = "Received PAUSE and control packets", 80 [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets", 81 [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets", 82 [CGX_STAT6] = "Packets dropped due to RX FIFO full", 83 [CGX_STAT7] = "Octets dropped due to RX FIFO full", 84 [CGX_STAT8] = "Error packets", 85 [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets", 86 [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets", 87 [CGX_STAT11] = "NCSI-bound packets dropped", 88 [CGX_STAT12] = "NCSI-bound octets dropped", 89 }; 90 91 static char *cgx_tx_stats_fields[] = { 92 [CGX_STAT0] = "Packets dropped due to excessive collisions", 93 [CGX_STAT1] = "Packets dropped due to excessive deferral", 94 [CGX_STAT2] = "Multiple collisions before successful transmission", 95 [CGX_STAT3] = "Single collisions before successful transmission", 96 [CGX_STAT4] = "Total octets sent on the interface", 97 [CGX_STAT5] = "Total frames sent on the interface", 98 [CGX_STAT6] = "Packets sent with an octet count < 64", 99 [CGX_STAT7] = "Packets sent with an octet count == 64", 100 [CGX_STAT8] = "Packets sent with an octet count of 65-127", 101 [CGX_STAT9] = "Packets sent with an octet count of 128-255", 102 [CGX_STAT10] = "Packets sent with an octet count of 256-511", 103 [CGX_STAT11] = "Packets sent with an octet count of 512-1023", 104 [CGX_STAT12] = "Packets sent with an octet count of 1024-1518", 105 [CGX_STAT13] = "Packets sent with an octet count of > 1518", 106 [CGX_STAT14] = "Packets sent to a broadcast DMAC", 107 [CGX_STAT15] = "Packets sent to the multicast DMAC", 108 [CGX_STAT16] = "Transmit underflow and were truncated", 109 [CGX_STAT17] = "Control/PAUSE packets sent", 110 }; 111 112 static char *rpm_rx_stats_fields[] = { 113 "Octets of received packets", 114 "Octets of received packets with out error", 115 "Received packets with alignment errors", 116 "Control/PAUSE packets received", 117 "Packets received with Frame too long Errors", 118 "Packets received with a1nrange length Errors", 119 "Received packets", 120 "Packets received with FrameCheckSequenceErrors", 121 "Packets received with VLAN header", 122 "Error packets", 123 "Packets received with unicast DMAC", 124 "Packets received with multicast DMAC", 125 "Packets received with broadcast DMAC", 126 "Dropped packets", 127 "Total frames received on interface", 128 "Packets received with an octet count < 64", 129 "Packets received with an octet count == 64", 130 "Packets received with an octet count of 65-127", 131 "Packets received with an octet count of 128-255", 132 "Packets received with an octet count of 256-511", 133 "Packets received with an octet count of 512-1023", 134 "Packets received with an octet count of 1024-1518", 135 "Packets received with an octet count of > 1518", 136 "Oversized Packets", 137 "Jabber Packets", 138 "Fragmented Packets", 139 "CBFC(class based flow control) pause frames received for class 0", 140 "CBFC pause frames received for class 1", 141 "CBFC pause frames received for class 2", 142 "CBFC pause frames received for class 3", 143 "CBFC pause frames received for class 4", 144 "CBFC pause frames received for class 5", 145 "CBFC pause frames received for class 6", 146 "CBFC pause frames received for class 7", 147 "CBFC pause frames received for class 8", 148 "CBFC pause frames received for class 9", 149 "CBFC pause frames received for class 10", 150 "CBFC pause frames received for class 11", 151 "CBFC pause frames received for class 12", 152 "CBFC pause frames received for class 13", 153 "CBFC pause frames received for class 14", 154 "CBFC pause frames received for class 15", 155 "MAC control packets received", 156 }; 157 158 static char *rpm_tx_stats_fields[] = { 159 "Total octets sent on the interface", 160 "Total octets transmitted OK", 161 "Control/Pause frames sent", 162 "Total frames transmitted OK", 163 "Total frames sent with VLAN header", 164 "Error Packets", 165 "Packets sent to unicast DMAC", 166 "Packets sent to the multicast DMAC", 167 "Packets sent to a broadcast DMAC", 168 "Packets sent with an octet count == 64", 169 "Packets sent with an octet count of 65-127", 170 "Packets sent with an octet count of 128-255", 171 "Packets sent with an octet count of 256-511", 172 "Packets sent with an octet count of 512-1023", 173 "Packets sent with an octet count of 1024-1518", 174 "Packets sent with an octet count of > 1518", 175 "CBFC(class based flow control) pause frames transmitted for class 0", 176 "CBFC pause frames transmitted for class 1", 177 "CBFC pause frames transmitted for class 2", 178 "CBFC pause frames transmitted for class 3", 179 "CBFC pause frames transmitted for class 4", 180 "CBFC pause frames transmitted for class 5", 181 "CBFC pause frames transmitted for class 6", 182 "CBFC pause frames transmitted for class 7", 183 "CBFC pause frames transmitted for class 8", 184 "CBFC pause frames transmitted for class 9", 185 "CBFC pause frames transmitted for class 10", 186 "CBFC pause frames transmitted for class 11", 187 "CBFC pause frames transmitted for class 12", 188 "CBFC pause frames transmitted for class 13", 189 "CBFC pause frames transmitted for class 14", 190 "CBFC pause frames transmitted for class 15", 191 "MAC control packets sent", 192 "Total frames sent on the interface" 193 }; 194 195 enum cpt_eng_type { 196 CPT_AE_TYPE = 1, 197 CPT_SE_TYPE = 2, 198 CPT_IE_TYPE = 3, 199 }; 200 201 #define rvu_dbg_NULL NULL 202 #define rvu_dbg_open_NULL NULL 203 204 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \ 205 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \ 206 { \ 207 return single_open(file, rvu_dbg_##read_op, inode->i_private); \ 208 } \ 209 static const struct file_operations rvu_dbg_##name##_fops = { \ 210 .owner = THIS_MODULE, \ 211 .open = rvu_dbg_open_##name, \ 212 .read = seq_read, \ 213 .write = rvu_dbg_##write_op, \ 214 .llseek = seq_lseek, \ 215 .release = single_release, \ 216 } 217 218 #define RVU_DEBUG_FOPS(name, read_op, write_op) \ 219 static const struct file_operations rvu_dbg_##name##_fops = { \ 220 .owner = THIS_MODULE, \ 221 .open = simple_open, \ 222 .read = rvu_dbg_##read_op, \ 223 .write = rvu_dbg_##write_op \ 224 } 225 226 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf); 227 228 static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir) 229 { 230 struct mcs *mcs = filp->private; 231 struct mcs_port_stats stats; 232 int lmac; 233 234 seq_puts(filp, "\n port stats\n"); 235 mutex_lock(&mcs->stats_lock); 236 for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) { 237 mcs_get_port_stats(mcs, &stats, lmac, dir); 238 seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt); 239 seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt); 240 241 if (dir == MCS_RX && mcs->hw->mcs_blks > 1) 242 seq_printf(filp, "port%d: Preempt error: %lld\n", lmac, 243 stats.preempt_err_cnt); 244 if (dir == MCS_TX) 245 seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac, 246 stats.sectag_insert_err_cnt); 247 } 248 mutex_unlock(&mcs->stats_lock); 249 return 0; 250 } 251 252 static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused) 253 { 254 return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX); 255 } 256 257 RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL); 258 259 static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused) 260 { 261 return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX); 262 } 263 264 RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL); 265 266 static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir) 267 { 268 struct mcs *mcs = filp->private; 269 struct mcs_sa_stats stats; 270 struct rsrc_bmap *map; 271 int sa_id; 272 273 if (dir == MCS_TX) { 274 map = &mcs->tx.sa; 275 mutex_lock(&mcs->stats_lock); 276 for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) { 277 seq_puts(filp, "\n TX SA stats\n"); 278 mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX); 279 seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id, 280 stats.pkt_encrypt_cnt); 281 282 seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id, 283 stats.pkt_protected_cnt); 284 } 285 mutex_unlock(&mcs->stats_lock); 286 return 0; 287 } 288 289 /* RX stats */ 290 map = &mcs->rx.sa; 291 mutex_lock(&mcs->stats_lock); 292 for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) { 293 seq_puts(filp, "\n RX SA stats\n"); 294 mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX); 295 seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt); 296 seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt); 297 seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt); 298 seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt); 299 seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt); 300 } 301 mutex_unlock(&mcs->stats_lock); 302 return 0; 303 } 304 305 static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused) 306 { 307 return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX); 308 } 309 310 RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL); 311 312 static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused) 313 { 314 return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX); 315 } 316 317 RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL); 318 319 static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused) 320 { 321 struct mcs *mcs = filp->private; 322 struct mcs_sc_stats stats; 323 struct rsrc_bmap *map; 324 int sc_id; 325 326 map = &mcs->tx.sc; 327 seq_puts(filp, "\n SC stats\n"); 328 329 mutex_lock(&mcs->stats_lock); 330 for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) { 331 mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX); 332 seq_printf(filp, "\n=======sc%d======\n\n", sc_id); 333 seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt); 334 seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt); 335 336 if (mcs->hw->mcs_blks == 1) { 337 seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id, 338 stats.octet_encrypt_cnt); 339 seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id, 340 stats.octet_protected_cnt); 341 } 342 } 343 mutex_unlock(&mcs->stats_lock); 344 return 0; 345 } 346 347 RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL); 348 349 static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused) 350 { 351 struct mcs *mcs = filp->private; 352 struct mcs_sc_stats stats; 353 struct rsrc_bmap *map; 354 int sc_id; 355 356 map = &mcs->rx.sc; 357 seq_puts(filp, "\n SC stats\n"); 358 359 mutex_lock(&mcs->stats_lock); 360 for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) { 361 mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX); 362 seq_printf(filp, "\n=======sc%d======\n\n", sc_id); 363 seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt); 364 seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt); 365 seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt); 366 seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt); 367 seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt); 368 369 if (mcs->hw->mcs_blks > 1) { 370 seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt); 371 seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt); 372 } 373 if (mcs->hw->mcs_blks == 1) { 374 seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id, 375 stats.octet_decrypt_cnt); 376 seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id, 377 stats.octet_validate_cnt); 378 } 379 } 380 mutex_unlock(&mcs->stats_lock); 381 return 0; 382 } 383 384 RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL); 385 386 static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir) 387 { 388 struct mcs *mcs = filp->private; 389 struct mcs_flowid_stats stats; 390 struct rsrc_bmap *map; 391 int flow_id; 392 393 seq_puts(filp, "\n Flowid stats\n"); 394 395 if (dir == MCS_RX) 396 map = &mcs->rx.flow_ids; 397 else 398 map = &mcs->tx.flow_ids; 399 400 mutex_lock(&mcs->stats_lock); 401 for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) { 402 mcs_get_flowid_stats(mcs, &stats, flow_id, dir); 403 seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt); 404 } 405 mutex_unlock(&mcs->stats_lock); 406 return 0; 407 } 408 409 static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused) 410 { 411 return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX); 412 } 413 414 RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL); 415 416 static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused) 417 { 418 return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX); 419 } 420 421 RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL); 422 423 static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused) 424 { 425 struct mcs *mcs = filp->private; 426 struct mcs_secy_stats stats; 427 struct rsrc_bmap *map; 428 int secy_id; 429 430 map = &mcs->tx.secy; 431 seq_puts(filp, "\n MCS TX secy stats\n"); 432 433 mutex_lock(&mcs->stats_lock); 434 for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) { 435 mcs_get_tx_secy_stats(mcs, &stats, secy_id); 436 seq_printf(filp, "\n=======Secy%d======\n\n", secy_id); 437 seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id, 438 stats.ctl_pkt_bcast_cnt); 439 seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id, 440 stats.ctl_pkt_mcast_cnt); 441 seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id, 442 stats.ctl_pkt_ucast_cnt); 443 seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt); 444 seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id, 445 stats.unctl_pkt_bcast_cnt); 446 seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id, 447 stats.unctl_pkt_mcast_cnt); 448 seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id, 449 stats.unctl_pkt_ucast_cnt); 450 seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt); 451 seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id, 452 stats.octet_encrypted_cnt); 453 seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id, 454 stats.octet_protected_cnt); 455 seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id, 456 stats.pkt_noactivesa_cnt); 457 seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt); 458 seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt); 459 } 460 mutex_unlock(&mcs->stats_lock); 461 return 0; 462 } 463 464 RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL); 465 466 static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused) 467 { 468 struct mcs *mcs = filp->private; 469 struct mcs_secy_stats stats; 470 struct rsrc_bmap *map; 471 int secy_id; 472 473 map = &mcs->rx.secy; 474 seq_puts(filp, "\n MCS secy stats\n"); 475 476 mutex_lock(&mcs->stats_lock); 477 for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) { 478 mcs_get_rx_secy_stats(mcs, &stats, secy_id); 479 seq_printf(filp, "\n=======Secy%d======\n\n", secy_id); 480 seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id, 481 stats.ctl_pkt_bcast_cnt); 482 seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id, 483 stats.ctl_pkt_mcast_cnt); 484 seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id, 485 stats.ctl_pkt_ucast_cnt); 486 seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt); 487 seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id, 488 stats.unctl_pkt_bcast_cnt); 489 seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id, 490 stats.unctl_pkt_mcast_cnt); 491 seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id, 492 stats.unctl_pkt_ucast_cnt); 493 seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt); 494 seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id, 495 stats.octet_decrypted_cnt); 496 seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id, 497 stats.octet_validated_cnt); 498 seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id, 499 stats.pkt_port_disabled_cnt); 500 seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt); 501 seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id, 502 stats.pkt_nosa_cnt); 503 seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id, 504 stats.pkt_nosaerror_cnt); 505 seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id, 506 stats.pkt_tagged_ctl_cnt); 507 seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt); 508 seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt); 509 if (mcs->hw->mcs_blks > 1) 510 seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id, 511 stats.pkt_notag_cnt); 512 } 513 mutex_unlock(&mcs->stats_lock); 514 return 0; 515 } 516 517 RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL); 518 519 static void rvu_dbg_mcs_init(struct rvu *rvu) 520 { 521 struct mcs *mcs; 522 char dname[10]; 523 int i; 524 525 if (!rvu->mcs_blk_cnt) 526 return; 527 528 rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root); 529 530 for (i = 0; i < rvu->mcs_blk_cnt; i++) { 531 mcs = mcs_get_pdata(i); 532 533 sprintf(dname, "mcs%d", i); 534 rvu->rvu_dbg.mcs = debugfs_create_dir(dname, 535 rvu->rvu_dbg.mcs_root); 536 537 rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs); 538 539 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs, 540 &rvu_dbg_mcs_rx_flowid_stats_fops); 541 542 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs, 543 &rvu_dbg_mcs_rx_secy_stats_fops); 544 545 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs, 546 &rvu_dbg_mcs_rx_sc_stats_fops); 547 548 debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs, 549 &rvu_dbg_mcs_rx_sa_stats_fops); 550 551 debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs, 552 &rvu_dbg_mcs_rx_port_stats_fops); 553 554 rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs); 555 556 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs, 557 &rvu_dbg_mcs_tx_flowid_stats_fops); 558 559 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs, 560 &rvu_dbg_mcs_tx_secy_stats_fops); 561 562 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs, 563 &rvu_dbg_mcs_tx_sc_stats_fops); 564 565 debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs, 566 &rvu_dbg_mcs_tx_sa_stats_fops); 567 568 debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs, 569 &rvu_dbg_mcs_tx_port_stats_fops); 570 } 571 } 572 573 #define LMT_MAPTBL_ENTRY_SIZE 16 574 /* Dump LMTST map table */ 575 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, 576 char __user *buffer, 577 size_t count, loff_t *ppos) 578 { 579 struct rvu *rvu = filp->private_data; 580 u64 lmt_addr, val, tbl_base; 581 int pf, vf, num_vfs, hw_vfs; 582 void __iomem *lmt_map_base; 583 int buf_size = 10240; 584 size_t off = 0; 585 int index = 0; 586 char *buf; 587 int ret; 588 589 /* don't allow partial reads */ 590 if (*ppos != 0) 591 return 0; 592 593 buf = kzalloc(buf_size, GFP_KERNEL); 594 if (!buf) 595 return -ENOMEM; 596 597 tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); 598 599 lmt_map_base = ioremap_wc(tbl_base, 128 * 1024); 600 if (!lmt_map_base) { 601 dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); 602 kfree(buf); 603 return false; 604 } 605 606 off += scnprintf(&buf[off], buf_size - 1 - off, 607 "\n\t\t\t\t\tLmtst Map Table Entries"); 608 off += scnprintf(&buf[off], buf_size - 1 - off, 609 "\n\t\t\t\t\t======================="); 610 off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t"); 611 off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t"); 612 off += scnprintf(&buf[off], buf_size - 1 - off, 613 "Lmtline Base (word 0)\t\t"); 614 off += scnprintf(&buf[off], buf_size - 1 - off, 615 "Lmt Map Entry (word 1)"); 616 off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); 617 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 618 off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t", 619 pf); 620 621 index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE; 622 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t", 623 (tbl_base + index)); 624 lmt_addr = readq(lmt_map_base + index); 625 off += scnprintf(&buf[off], buf_size - 1 - off, 626 " 0x%016llx\t\t", lmt_addr); 627 index += 8; 628 val = readq(lmt_map_base + index); 629 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n", 630 val); 631 /* Reading num of VFs per PF */ 632 rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs); 633 for (vf = 0; vf < num_vfs; vf++) { 634 index = (pf * rvu->hw->total_vfs * 16) + 635 ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE); 636 off += scnprintf(&buf[off], buf_size - 1 - off, 637 "PF%d:VF%d \t\t", pf, vf); 638 off += scnprintf(&buf[off], buf_size - 1 - off, 639 " 0x%llx\t\t", (tbl_base + index)); 640 lmt_addr = readq(lmt_map_base + index); 641 off += scnprintf(&buf[off], buf_size - 1 - off, 642 " 0x%016llx\t\t", lmt_addr); 643 index += 8; 644 val = readq(lmt_map_base + index); 645 off += scnprintf(&buf[off], buf_size - 1 - off, 646 " 0x%016llx\n", val); 647 } 648 } 649 off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); 650 651 ret = min(off, count); 652 if (copy_to_user(buffer, buf, ret)) 653 ret = -EFAULT; 654 kfree(buf); 655 656 iounmap(lmt_map_base); 657 if (ret < 0) 658 return ret; 659 660 *ppos = ret; 661 return ret; 662 } 663 664 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL); 665 666 static void get_lf_str_list(struct rvu_block block, int pcifunc, 667 char *lfs) 668 { 669 int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max; 670 671 for_each_set_bit(lf, block.lf.bmap, block.lf.max) { 672 if (lf >= block.lf.max) 673 break; 674 675 if (block.fn_map[lf] != pcifunc) 676 continue; 677 678 if (lf == prev_lf + 1) { 679 prev_lf = lf; 680 seq = 1; 681 continue; 682 } 683 684 if (seq) 685 len += sprintf(lfs + len, "-%d,%d", prev_lf, lf); 686 else 687 len += (len ? sprintf(lfs + len, ",%d", lf) : 688 sprintf(lfs + len, "%d", lf)); 689 690 prev_lf = lf; 691 seq = 0; 692 } 693 694 if (seq) 695 len += sprintf(lfs + len, "-%d", prev_lf); 696 697 lfs[len] = '\0'; 698 } 699 700 static int get_max_column_width(struct rvu *rvu) 701 { 702 int index, pf, vf, lf_str_size = 12, buf_size = 256; 703 struct rvu_block block; 704 u16 pcifunc; 705 char *buf; 706 707 buf = kzalloc(buf_size, GFP_KERNEL); 708 if (!buf) 709 return -ENOMEM; 710 711 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 712 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { 713 pcifunc = pf << 10 | vf; 714 if (!pcifunc) 715 continue; 716 717 for (index = 0; index < BLK_COUNT; index++) { 718 block = rvu->hw->block[index]; 719 if (!strlen(block.name)) 720 continue; 721 722 get_lf_str_list(block, pcifunc, buf); 723 if (lf_str_size <= strlen(buf)) 724 lf_str_size = strlen(buf) + 1; 725 } 726 } 727 } 728 729 kfree(buf); 730 return lf_str_size; 731 } 732 733 /* Dumps current provisioning status of all RVU block LFs */ 734 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, 735 char __user *buffer, 736 size_t count, loff_t *ppos) 737 { 738 int index, off = 0, flag = 0, len = 0, i = 0; 739 struct rvu *rvu = filp->private_data; 740 int bytes_not_copied = 0; 741 struct rvu_block block; 742 int pf, vf, pcifunc; 743 int buf_size = 2048; 744 int lf_str_size; 745 char *lfs; 746 char *buf; 747 748 /* don't allow partial reads */ 749 if (*ppos != 0) 750 return 0; 751 752 buf = kzalloc(buf_size, GFP_KERNEL); 753 if (!buf) 754 return -ENOMEM; 755 756 /* Get the maximum width of a column */ 757 lf_str_size = get_max_column_width(rvu); 758 759 lfs = kzalloc(lf_str_size, GFP_KERNEL); 760 if (!lfs) { 761 kfree(buf); 762 return -ENOMEM; 763 } 764 off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size, 765 "pcifunc"); 766 for (index = 0; index < BLK_COUNT; index++) 767 if (strlen(rvu->hw->block[index].name)) { 768 off += scnprintf(&buf[off], buf_size - 1 - off, 769 "%-*s", lf_str_size, 770 rvu->hw->block[index].name); 771 } 772 773 off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); 774 bytes_not_copied = copy_to_user(buffer + (i * off), buf, off); 775 if (bytes_not_copied) 776 goto out; 777 778 i++; 779 *ppos += off; 780 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 781 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { 782 off = 0; 783 flag = 0; 784 pcifunc = pf << 10 | vf; 785 if (!pcifunc) 786 continue; 787 788 if (vf) { 789 sprintf(lfs, "PF%d:VF%d", pf, vf - 1); 790 off = scnprintf(&buf[off], 791 buf_size - 1 - off, 792 "%-*s", lf_str_size, lfs); 793 } else { 794 sprintf(lfs, "PF%d", pf); 795 off = scnprintf(&buf[off], 796 buf_size - 1 - off, 797 "%-*s", lf_str_size, lfs); 798 } 799 800 for (index = 0; index < BLK_COUNT; index++) { 801 block = rvu->hw->block[index]; 802 if (!strlen(block.name)) 803 continue; 804 len = 0; 805 lfs[len] = '\0'; 806 get_lf_str_list(block, pcifunc, lfs); 807 if (strlen(lfs)) 808 flag = 1; 809 810 off += scnprintf(&buf[off], buf_size - 1 - off, 811 "%-*s", lf_str_size, lfs); 812 } 813 if (flag) { 814 off += scnprintf(&buf[off], 815 buf_size - 1 - off, "\n"); 816 bytes_not_copied = copy_to_user(buffer + 817 (i * off), 818 buf, off); 819 if (bytes_not_copied) 820 goto out; 821 822 i++; 823 *ppos += off; 824 } 825 } 826 } 827 828 out: 829 kfree(lfs); 830 kfree(buf); 831 if (bytes_not_copied) 832 return -EFAULT; 833 834 return *ppos; 835 } 836 837 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL); 838 839 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) 840 { 841 struct rvu *rvu = filp->private; 842 struct pci_dev *pdev = NULL; 843 struct mac_ops *mac_ops; 844 char cgx[10], lmac[10]; 845 struct rvu_pfvf *pfvf; 846 int pf, domain, blkid; 847 u8 cgx_id, lmac_id; 848 u16 pcifunc; 849 850 domain = 2; 851 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); 852 /* There can be no CGX devices at all */ 853 if (!mac_ops) 854 return 0; 855 seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n", 856 mac_ops->name); 857 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 858 if (!is_pf_cgxmapped(rvu, pf)) 859 continue; 860 861 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0); 862 if (!pdev) 863 continue; 864 865 cgx[0] = 0; 866 lmac[0] = 0; 867 pcifunc = pf << 10; 868 pfvf = rvu_get_pfvf(rvu, pcifunc); 869 870 if (pfvf->nix_blkaddr == BLKADDR_NIX0) 871 blkid = 0; 872 else 873 blkid = 1; 874 875 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, 876 &lmac_id); 877 sprintf(cgx, "%s%d", mac_ops->name, cgx_id); 878 sprintf(lmac, "LMAC%d", lmac_id); 879 seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n", 880 dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac); 881 882 pci_dev_put(pdev); 883 } 884 return 0; 885 } 886 887 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL); 888 889 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf, 890 u16 *pcifunc) 891 { 892 struct rvu_block *block; 893 struct rvu_hwinfo *hw; 894 895 hw = rvu->hw; 896 block = &hw->block[blkaddr]; 897 898 if (lf < 0 || lf >= block->lf.max) { 899 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n", 900 block->lf.max - 1); 901 return false; 902 } 903 904 *pcifunc = block->fn_map[lf]; 905 if (!*pcifunc) { 906 dev_warn(rvu->dev, 907 "This LF is not attached to any RVU PFFUNC\n"); 908 return false; 909 } 910 return true; 911 } 912 913 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf) 914 { 915 char *buf; 916 917 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 918 if (!buf) 919 return; 920 921 if (!pfvf->aura_ctx) { 922 seq_puts(m, "Aura context is not initialized\n"); 923 } else { 924 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap, 925 pfvf->aura_ctx->qsize); 926 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize); 927 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf); 928 } 929 930 if (!pfvf->pool_ctx) { 931 seq_puts(m, "Pool context is not initialized\n"); 932 } else { 933 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap, 934 pfvf->pool_ctx->qsize); 935 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize); 936 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf); 937 } 938 kfree(buf); 939 } 940 941 /* The 'qsize' entry dumps current Aura/Pool context Qsize 942 * and each context's current enable/disable status in a bitmap. 943 */ 944 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused, 945 int blktype) 946 { 947 void (*print_qsize)(struct seq_file *filp, 948 struct rvu_pfvf *pfvf) = NULL; 949 struct dentry *current_dir; 950 struct rvu_pfvf *pfvf; 951 struct rvu *rvu; 952 int qsize_id; 953 u16 pcifunc; 954 int blkaddr; 955 956 rvu = filp->private; 957 switch (blktype) { 958 case BLKTYPE_NPA: 959 qsize_id = rvu->rvu_dbg.npa_qsize_id; 960 print_qsize = print_npa_qsize; 961 break; 962 963 case BLKTYPE_NIX: 964 qsize_id = rvu->rvu_dbg.nix_qsize_id; 965 print_qsize = print_nix_qsize; 966 break; 967 968 default: 969 return -EINVAL; 970 } 971 972 if (blktype == BLKTYPE_NPA) { 973 blkaddr = BLKADDR_NPA; 974 } else { 975 current_dir = filp->file->f_path.dentry->d_parent; 976 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ? 977 BLKADDR_NIX1 : BLKADDR_NIX0); 978 } 979 980 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc)) 981 return -EINVAL; 982 983 pfvf = rvu_get_pfvf(rvu, pcifunc); 984 print_qsize(filp, pfvf); 985 986 return 0; 987 } 988 989 static ssize_t rvu_dbg_qsize_write(struct file *filp, 990 const char __user *buffer, size_t count, 991 loff_t *ppos, int blktype) 992 { 993 char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix"; 994 struct seq_file *seqfile = filp->private_data; 995 char *cmd_buf, *cmd_buf_tmp, *subtoken; 996 struct rvu *rvu = seqfile->private; 997 struct dentry *current_dir; 998 int blkaddr; 999 u16 pcifunc; 1000 int ret, lf; 1001 1002 cmd_buf = memdup_user_nul(buffer, count); 1003 if (IS_ERR(cmd_buf)) 1004 return -ENOMEM; 1005 1006 cmd_buf_tmp = strchr(cmd_buf, '\n'); 1007 if (cmd_buf_tmp) { 1008 *cmd_buf_tmp = '\0'; 1009 count = cmd_buf_tmp - cmd_buf + 1; 1010 } 1011 1012 cmd_buf_tmp = cmd_buf; 1013 subtoken = strsep(&cmd_buf, " "); 1014 ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL; 1015 if (cmd_buf) 1016 ret = -EINVAL; 1017 1018 if (ret < 0 || !strncmp(subtoken, "help", 4)) { 1019 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string); 1020 goto qsize_write_done; 1021 } 1022 1023 if (blktype == BLKTYPE_NPA) { 1024 blkaddr = BLKADDR_NPA; 1025 } else { 1026 current_dir = filp->f_path.dentry->d_parent; 1027 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ? 1028 BLKADDR_NIX1 : BLKADDR_NIX0); 1029 } 1030 1031 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) { 1032 ret = -EINVAL; 1033 goto qsize_write_done; 1034 } 1035 if (blktype == BLKTYPE_NPA) 1036 rvu->rvu_dbg.npa_qsize_id = lf; 1037 else 1038 rvu->rvu_dbg.nix_qsize_id = lf; 1039 1040 qsize_write_done: 1041 kfree(cmd_buf_tmp); 1042 return ret ? ret : count; 1043 } 1044 1045 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp, 1046 const char __user *buffer, 1047 size_t count, loff_t *ppos) 1048 { 1049 return rvu_dbg_qsize_write(filp, buffer, count, ppos, 1050 BLKTYPE_NPA); 1051 } 1052 1053 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused) 1054 { 1055 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA); 1056 } 1057 1058 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write); 1059 1060 /* Dumps given NPA Aura's context */ 1061 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) 1062 { 1063 struct npa_aura_s *aura = &rsp->aura; 1064 struct rvu *rvu = m->private; 1065 1066 seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr); 1067 1068 seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n", 1069 aura->ena, aura->pool_caching); 1070 seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n", 1071 aura->pool_way_mask, aura->avg_con); 1072 seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n", 1073 aura->pool_drop_ena, aura->aura_drop_ena); 1074 seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n", 1075 aura->bp_ena, aura->aura_drop); 1076 seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n", 1077 aura->shift, aura->avg_level); 1078 1079 seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n", 1080 (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid); 1081 1082 seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n", 1083 (u64)aura->limit, aura->bp, aura->fc_ena); 1084 1085 if (!is_rvu_otx2(rvu)) 1086 seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be); 1087 seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n", 1088 aura->fc_up_crossing, aura->fc_stype); 1089 seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits); 1090 1091 seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr); 1092 1093 seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n", 1094 aura->pool_drop, aura->update_time); 1095 seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n", 1096 aura->err_int, aura->err_int_ena); 1097 seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n", 1098 aura->thresh_int, aura->thresh_int_ena); 1099 seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n", 1100 aura->thresh_up, aura->thresh_qint_idx); 1101 seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx); 1102 1103 seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh); 1104 if (!is_rvu_otx2(rvu)) 1105 seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst); 1106 } 1107 1108 /* Dumps given NPA Pool's context */ 1109 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) 1110 { 1111 struct npa_pool_s *pool = &rsp->pool; 1112 struct rvu *rvu = m->private; 1113 1114 seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base); 1115 1116 seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n", 1117 pool->ena, pool->nat_align); 1118 seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n", 1119 pool->stack_caching, pool->stack_way_mask); 1120 seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n", 1121 pool->buf_offset, pool->buf_size); 1122 1123 seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n", 1124 pool->stack_max_pages, pool->stack_pages); 1125 1126 seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc); 1127 1128 seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n", 1129 pool->stack_offset, pool->shift, pool->avg_level); 1130 seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n", 1131 pool->avg_con, pool->fc_ena, pool->fc_stype); 1132 seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n", 1133 pool->fc_hyst_bits, pool->fc_up_crossing); 1134 if (!is_rvu_otx2(rvu)) 1135 seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be); 1136 seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time); 1137 1138 seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr); 1139 1140 seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start); 1141 1142 seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end); 1143 1144 seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n", 1145 pool->err_int, pool->err_int_ena); 1146 seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int); 1147 seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n", 1148 pool->thresh_int_ena, pool->thresh_up); 1149 seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n", 1150 pool->thresh_qint_idx, pool->err_qint_idx); 1151 if (!is_rvu_otx2(rvu)) 1152 seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst); 1153 } 1154 1155 /* Reads aura/pool's ctx from admin queue */ 1156 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype) 1157 { 1158 void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp); 1159 struct npa_aq_enq_req aq_req; 1160 struct npa_aq_enq_rsp rsp; 1161 struct rvu_pfvf *pfvf; 1162 int aura, rc, max_id; 1163 int npalf, id, all; 1164 struct rvu *rvu; 1165 u16 pcifunc; 1166 1167 rvu = m->private; 1168 1169 switch (ctype) { 1170 case NPA_AQ_CTYPE_AURA: 1171 npalf = rvu->rvu_dbg.npa_aura_ctx.lf; 1172 id = rvu->rvu_dbg.npa_aura_ctx.id; 1173 all = rvu->rvu_dbg.npa_aura_ctx.all; 1174 break; 1175 1176 case NPA_AQ_CTYPE_POOL: 1177 npalf = rvu->rvu_dbg.npa_pool_ctx.lf; 1178 id = rvu->rvu_dbg.npa_pool_ctx.id; 1179 all = rvu->rvu_dbg.npa_pool_ctx.all; 1180 break; 1181 default: 1182 return -EINVAL; 1183 } 1184 1185 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc)) 1186 return -EINVAL; 1187 1188 pfvf = rvu_get_pfvf(rvu, pcifunc); 1189 if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) { 1190 seq_puts(m, "Aura context is not initialized\n"); 1191 return -EINVAL; 1192 } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) { 1193 seq_puts(m, "Pool context is not initialized\n"); 1194 return -EINVAL; 1195 } 1196 1197 memset(&aq_req, 0, sizeof(struct npa_aq_enq_req)); 1198 aq_req.hdr.pcifunc = pcifunc; 1199 aq_req.ctype = ctype; 1200 aq_req.op = NPA_AQ_INSTOP_READ; 1201 if (ctype == NPA_AQ_CTYPE_AURA) { 1202 max_id = pfvf->aura_ctx->qsize; 1203 print_npa_ctx = print_npa_aura_ctx; 1204 } else { 1205 max_id = pfvf->pool_ctx->qsize; 1206 print_npa_ctx = print_npa_pool_ctx; 1207 } 1208 1209 if (id < 0 || id >= max_id) { 1210 seq_printf(m, "Invalid %s, valid range is 0-%d\n", 1211 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool", 1212 max_id - 1); 1213 return -EINVAL; 1214 } 1215 1216 if (all) 1217 id = 0; 1218 else 1219 max_id = id + 1; 1220 1221 for (aura = id; aura < max_id; aura++) { 1222 aq_req.aura_id = aura; 1223 1224 /* Skip if queue is uninitialized */ 1225 if (ctype == NPA_AQ_CTYPE_POOL && !test_bit(aura, pfvf->pool_bmap)) 1226 continue; 1227 1228 seq_printf(m, "======%s : %d=======\n", 1229 (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL", 1230 aq_req.aura_id); 1231 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp); 1232 if (rc) { 1233 seq_puts(m, "Failed to read context\n"); 1234 return -EINVAL; 1235 } 1236 print_npa_ctx(m, &rsp); 1237 } 1238 return 0; 1239 } 1240 1241 static int write_npa_ctx(struct rvu *rvu, bool all, 1242 int npalf, int id, int ctype) 1243 { 1244 struct rvu_pfvf *pfvf; 1245 int max_id = 0; 1246 u16 pcifunc; 1247 1248 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc)) 1249 return -EINVAL; 1250 1251 pfvf = rvu_get_pfvf(rvu, pcifunc); 1252 1253 if (ctype == NPA_AQ_CTYPE_AURA) { 1254 if (!pfvf->aura_ctx) { 1255 dev_warn(rvu->dev, "Aura context is not initialized\n"); 1256 return -EINVAL; 1257 } 1258 max_id = pfvf->aura_ctx->qsize; 1259 } else if (ctype == NPA_AQ_CTYPE_POOL) { 1260 if (!pfvf->pool_ctx) { 1261 dev_warn(rvu->dev, "Pool context is not initialized\n"); 1262 return -EINVAL; 1263 } 1264 max_id = pfvf->pool_ctx->qsize; 1265 } 1266 1267 if (id < 0 || id >= max_id) { 1268 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n", 1269 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool", 1270 max_id - 1); 1271 return -EINVAL; 1272 } 1273 1274 switch (ctype) { 1275 case NPA_AQ_CTYPE_AURA: 1276 rvu->rvu_dbg.npa_aura_ctx.lf = npalf; 1277 rvu->rvu_dbg.npa_aura_ctx.id = id; 1278 rvu->rvu_dbg.npa_aura_ctx.all = all; 1279 break; 1280 1281 case NPA_AQ_CTYPE_POOL: 1282 rvu->rvu_dbg.npa_pool_ctx.lf = npalf; 1283 rvu->rvu_dbg.npa_pool_ctx.id = id; 1284 rvu->rvu_dbg.npa_pool_ctx.all = all; 1285 break; 1286 default: 1287 return -EINVAL; 1288 } 1289 return 0; 1290 } 1291 1292 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count, 1293 const char __user *buffer, int *npalf, 1294 int *id, bool *all) 1295 { 1296 int bytes_not_copied; 1297 char *cmd_buf_tmp; 1298 char *subtoken; 1299 int ret; 1300 1301 bytes_not_copied = copy_from_user(cmd_buf, buffer, *count); 1302 if (bytes_not_copied) 1303 return -EFAULT; 1304 1305 cmd_buf[*count] = '\0'; 1306 cmd_buf_tmp = strchr(cmd_buf, '\n'); 1307 1308 if (cmd_buf_tmp) { 1309 *cmd_buf_tmp = '\0'; 1310 *count = cmd_buf_tmp - cmd_buf + 1; 1311 } 1312 1313 subtoken = strsep(&cmd_buf, " "); 1314 ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL; 1315 if (ret < 0) 1316 return ret; 1317 subtoken = strsep(&cmd_buf, " "); 1318 if (subtoken && strcmp(subtoken, "all") == 0) { 1319 *all = true; 1320 } else { 1321 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL; 1322 if (ret < 0) 1323 return ret; 1324 } 1325 if (cmd_buf) 1326 return -EINVAL; 1327 return ret; 1328 } 1329 1330 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp, 1331 const char __user *buffer, 1332 size_t count, loff_t *ppos, int ctype) 1333 { 1334 char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ? 1335 "aura" : "pool"; 1336 struct seq_file *seqfp = filp->private_data; 1337 struct rvu *rvu = seqfp->private; 1338 int npalf, id = 0, ret; 1339 bool all = false; 1340 1341 if ((*ppos != 0) || !count) 1342 return -EINVAL; 1343 1344 cmd_buf = kzalloc(count + 1, GFP_KERNEL); 1345 if (!cmd_buf) 1346 return count; 1347 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer, 1348 &npalf, &id, &all); 1349 if (ret < 0) { 1350 dev_info(rvu->dev, 1351 "Usage: echo <npalf> [%s number/all] > %s_ctx\n", 1352 ctype_string, ctype_string); 1353 goto done; 1354 } else { 1355 ret = write_npa_ctx(rvu, all, npalf, id, ctype); 1356 } 1357 done: 1358 kfree(cmd_buf); 1359 return ret ? ret : count; 1360 } 1361 1362 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp, 1363 const char __user *buffer, 1364 size_t count, loff_t *ppos) 1365 { 1366 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos, 1367 NPA_AQ_CTYPE_AURA); 1368 } 1369 1370 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused) 1371 { 1372 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA); 1373 } 1374 1375 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write); 1376 1377 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp, 1378 const char __user *buffer, 1379 size_t count, loff_t *ppos) 1380 { 1381 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos, 1382 NPA_AQ_CTYPE_POOL); 1383 } 1384 1385 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused) 1386 { 1387 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL); 1388 } 1389 1390 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write); 1391 1392 static void ndc_cache_stats(struct seq_file *s, int blk_addr, 1393 int ctype, int transaction) 1394 { 1395 u64 req, out_req, lat, cant_alloc; 1396 struct nix_hw *nix_hw; 1397 struct rvu *rvu; 1398 int port; 1399 1400 if (blk_addr == BLKADDR_NDC_NPA0) { 1401 rvu = s->private; 1402 } else { 1403 nix_hw = s->private; 1404 rvu = nix_hw->rvu; 1405 } 1406 1407 for (port = 0; port < NDC_MAX_PORT; port++) { 1408 req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC 1409 (port, ctype, transaction)); 1410 lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC 1411 (port, ctype, transaction)); 1412 out_req = rvu_read64(rvu, blk_addr, 1413 NDC_AF_PORTX_RTX_RWX_OSTDN_PC 1414 (port, ctype, transaction)); 1415 cant_alloc = rvu_read64(rvu, blk_addr, 1416 NDC_AF_PORTX_RTX_CANT_ALLOC_PC 1417 (port, transaction)); 1418 seq_printf(s, "\nPort:%d\n", port); 1419 seq_printf(s, "\tTotal Requests:\t\t%lld\n", req); 1420 seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat); 1421 seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req); 1422 seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req); 1423 seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc); 1424 } 1425 } 1426 1427 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr) 1428 { 1429 seq_puts(s, "\n***** CACHE mode read stats *****\n"); 1430 ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS); 1431 seq_puts(s, "\n***** CACHE mode write stats *****\n"); 1432 ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS); 1433 seq_puts(s, "\n***** BY-PASS mode read stats *****\n"); 1434 ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS); 1435 seq_puts(s, "\n***** BY-PASS mode write stats *****\n"); 1436 ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS); 1437 return 0; 1438 } 1439 1440 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused) 1441 { 1442 return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0); 1443 } 1444 1445 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL); 1446 1447 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr) 1448 { 1449 struct nix_hw *nix_hw; 1450 struct rvu *rvu; 1451 int bank, max_bank; 1452 u64 ndc_af_const; 1453 1454 if (blk_addr == BLKADDR_NDC_NPA0) { 1455 rvu = s->private; 1456 } else { 1457 nix_hw = s->private; 1458 rvu = nix_hw->rvu; 1459 } 1460 1461 ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST); 1462 max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const); 1463 for (bank = 0; bank < max_bank; bank++) { 1464 seq_printf(s, "BANK:%d\n", bank); 1465 seq_printf(s, "\tHits:\t%lld\n", 1466 (u64)rvu_read64(rvu, blk_addr, 1467 NDC_AF_BANKX_HIT_PC(bank))); 1468 seq_printf(s, "\tMiss:\t%lld\n", 1469 (u64)rvu_read64(rvu, blk_addr, 1470 NDC_AF_BANKX_MISS_PC(bank))); 1471 } 1472 return 0; 1473 } 1474 1475 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused) 1476 { 1477 struct nix_hw *nix_hw = filp->private; 1478 int blkaddr = 0; 1479 int ndc_idx = 0; 1480 1481 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ? 1482 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX); 1483 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX); 1484 1485 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr); 1486 } 1487 1488 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL); 1489 1490 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused) 1491 { 1492 struct nix_hw *nix_hw = filp->private; 1493 int blkaddr = 0; 1494 int ndc_idx = 0; 1495 1496 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ? 1497 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX); 1498 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX); 1499 1500 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr); 1501 } 1502 1503 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL); 1504 1505 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp, 1506 void *unused) 1507 { 1508 return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0); 1509 } 1510 1511 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL); 1512 1513 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp, 1514 void *unused) 1515 { 1516 struct nix_hw *nix_hw = filp->private; 1517 int ndc_idx = NPA0_U; 1518 int blkaddr = 0; 1519 1520 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ? 1521 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX); 1522 1523 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr); 1524 } 1525 1526 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL); 1527 1528 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp, 1529 void *unused) 1530 { 1531 struct nix_hw *nix_hw = filp->private; 1532 int ndc_idx = NPA0_U; 1533 int blkaddr = 0; 1534 1535 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ? 1536 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX); 1537 1538 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr); 1539 } 1540 1541 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL); 1542 1543 static void print_nix_cn10k_sq_ctx(struct seq_file *m, 1544 struct nix_cn10k_sq_ctx_s *sq_ctx) 1545 { 1546 seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n", 1547 sq_ctx->ena, sq_ctx->qint_idx); 1548 seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n", 1549 sq_ctx->substream, sq_ctx->sdp_mcast); 1550 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n", 1551 sq_ctx->cq, sq_ctx->sqe_way_mask); 1552 1553 seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n", 1554 sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff); 1555 seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n", 1556 sq_ctx->sso_ena, sq_ctx->smq_rr_weight); 1557 seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n", 1558 sq_ctx->default_chan, sq_ctx->sqb_count); 1559 1560 seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb); 1561 seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub); 1562 seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n", 1563 sq_ctx->sqb_aura, sq_ctx->sq_int); 1564 seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n", 1565 sq_ctx->sq_int_ena, sq_ctx->sqe_stype); 1566 1567 seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n", 1568 sq_ctx->max_sqe_size, sq_ctx->cq_limit); 1569 seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n", 1570 sq_ctx->mnq_dis, sq_ctx->lmt_dis); 1571 seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n", 1572 sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum); 1573 seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n", 1574 sq_ctx->tail_offset, sq_ctx->smenq_offset); 1575 seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n", 1576 sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld); 1577 1578 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n", 1579 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend); 1580 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb); 1581 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb); 1582 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb); 1583 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n", 1584 sq_ctx->smenq_next_sqb); 1585 1586 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb); 1587 1588 seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total); 1589 seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n", 1590 sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb); 1591 seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n", 1592 sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena); 1593 seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n", 1594 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena); 1595 1596 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n", 1597 (u64)sq_ctx->scm_lso_rem); 1598 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs); 1599 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts); 1600 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n", 1601 (u64)sq_ctx->dropped_octs); 1602 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n", 1603 (u64)sq_ctx->dropped_pkts); 1604 } 1605 1606 /* Dumps given nix_sq's context */ 1607 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) 1608 { 1609 struct nix_sq_ctx_s *sq_ctx = &rsp->sq; 1610 struct nix_hw *nix_hw = m->private; 1611 struct rvu *rvu = nix_hw->rvu; 1612 1613 if (!is_rvu_otx2(rvu)) { 1614 print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx); 1615 return; 1616 } 1617 seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n", 1618 sq_ctx->sqe_way_mask, sq_ctx->cq); 1619 seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n", 1620 sq_ctx->sdp_mcast, sq_ctx->substream); 1621 seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n", 1622 sq_ctx->qint_idx, sq_ctx->ena); 1623 1624 seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n", 1625 sq_ctx->sqb_count, sq_ctx->default_chan); 1626 seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n", 1627 sq_ctx->smq_rr_quantum, sq_ctx->sso_ena); 1628 seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n", 1629 sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq); 1630 1631 seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n", 1632 sq_ctx->sqe_stype, sq_ctx->sq_int_ena); 1633 seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n", 1634 sq_ctx->sq_int, sq_ctx->sqb_aura); 1635 seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count); 1636 1637 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n", 1638 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend); 1639 seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n", 1640 sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset); 1641 seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n", 1642 sq_ctx->smenq_offset, sq_ctx->tail_offset); 1643 seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n", 1644 sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq); 1645 seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n", 1646 sq_ctx->mnq_dis, sq_ctx->lmt_dis); 1647 seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n", 1648 sq_ctx->cq_limit, sq_ctx->max_sqe_size); 1649 1650 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb); 1651 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb); 1652 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb); 1653 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n", 1654 sq_ctx->smenq_next_sqb); 1655 1656 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb); 1657 1658 seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n", 1659 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena); 1660 seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n", 1661 sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps); 1662 seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n", 1663 sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1); 1664 seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total); 1665 1666 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n", 1667 (u64)sq_ctx->scm_lso_rem); 1668 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs); 1669 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts); 1670 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n", 1671 (u64)sq_ctx->dropped_octs); 1672 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n", 1673 (u64)sq_ctx->dropped_pkts); 1674 } 1675 1676 static void print_nix_cn10k_rq_ctx(struct seq_file *m, 1677 struct nix_cn10k_rq_ctx_s *rq_ctx) 1678 { 1679 seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n", 1680 rq_ctx->ena, rq_ctx->sso_ena); 1681 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n", 1682 rq_ctx->ipsech_ena, rq_ctx->ena_wqwd); 1683 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n", 1684 rq_ctx->cq, rq_ctx->lenerr_dis); 1685 seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n", 1686 rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis); 1687 seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n", 1688 rq_ctx->len_il4_dis, rq_ctx->len_il3_dis); 1689 seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n", 1690 rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis); 1691 seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura); 1692 1693 seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n", 1694 rq_ctx->spb_aura, rq_ctx->lpb_aura); 1695 seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura); 1696 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n", 1697 rq_ctx->sso_grp, rq_ctx->sso_tt); 1698 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n", 1699 rq_ctx->pb_caching, rq_ctx->wqe_caching); 1700 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n", 1701 rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena); 1702 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n", 1703 rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing); 1704 seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n", 1705 rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena); 1706 1707 seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id); 1708 seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena); 1709 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1); 1710 seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n", 1711 rq_ctx->wqe_skip, rq_ctx->spb_ena); 1712 seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n", 1713 rq_ctx->lpb_sizem1, rq_ctx->first_skip); 1714 seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n", 1715 rq_ctx->later_skip, rq_ctx->xqe_imm_size); 1716 seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n", 1717 rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split); 1718 1719 seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n", 1720 rq_ctx->xqe_drop, rq_ctx->xqe_pass); 1721 seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n", 1722 rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass); 1723 seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n", 1724 rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass); 1725 seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n", 1726 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop); 1727 1728 seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n", 1729 rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop); 1730 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n", 1731 rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass); 1732 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n", 1733 rq_ctx->rq_int, rq_ctx->rq_int_ena); 1734 seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx); 1735 1736 seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n", 1737 rq_ctx->ltag, rq_ctx->good_utag); 1738 seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n", 1739 rq_ctx->bad_utag, rq_ctx->flow_tagw); 1740 seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n", 1741 rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena); 1742 seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n", 1743 rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp); 1744 seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip); 1745 1746 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs); 1747 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts); 1748 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs); 1749 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts); 1750 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts); 1751 } 1752 1753 /* Dumps given nix_rq's context */ 1754 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) 1755 { 1756 struct nix_rq_ctx_s *rq_ctx = &rsp->rq; 1757 struct nix_hw *nix_hw = m->private; 1758 struct rvu *rvu = nix_hw->rvu; 1759 1760 if (!is_rvu_otx2(rvu)) { 1761 print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx); 1762 return; 1763 } 1764 1765 seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n", 1766 rq_ctx->wqe_aura, rq_ctx->substream); 1767 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n", 1768 rq_ctx->cq, rq_ctx->ena_wqwd); 1769 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n", 1770 rq_ctx->ipsech_ena, rq_ctx->sso_ena); 1771 seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena); 1772 1773 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n", 1774 rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena); 1775 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n", 1776 rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching); 1777 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n", 1778 rq_ctx->pb_caching, rq_ctx->sso_tt); 1779 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n", 1780 rq_ctx->sso_grp, rq_ctx->lpb_aura); 1781 seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura); 1782 1783 seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n", 1784 rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy); 1785 seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n", 1786 rq_ctx->xqe_imm_size, rq_ctx->later_skip); 1787 seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n", 1788 rq_ctx->first_skip, rq_ctx->lpb_sizem1); 1789 seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n", 1790 rq_ctx->spb_ena, rq_ctx->wqe_skip); 1791 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1); 1792 1793 seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n", 1794 rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop); 1795 seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n", 1796 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop); 1797 seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n", 1798 rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop); 1799 seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n", 1800 rq_ctx->xqe_pass, rq_ctx->xqe_drop); 1801 1802 seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n", 1803 rq_ctx->qint_idx, rq_ctx->rq_int_ena); 1804 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n", 1805 rq_ctx->rq_int, rq_ctx->lpb_pool_pass); 1806 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n", 1807 rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass); 1808 seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop); 1809 1810 seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n", 1811 rq_ctx->flow_tagw, rq_ctx->bad_utag); 1812 seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n", 1813 rq_ctx->good_utag, rq_ctx->ltag); 1814 1815 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs); 1816 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts); 1817 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs); 1818 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts); 1819 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts); 1820 } 1821 1822 /* Dumps given nix_cq's context */ 1823 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) 1824 { 1825 struct nix_cq_ctx_s *cq_ctx = &rsp->cq; 1826 1827 seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base); 1828 1829 seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr); 1830 seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n", 1831 cq_ctx->avg_con, cq_ctx->cint_idx); 1832 seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n", 1833 cq_ctx->cq_err, cq_ctx->qint_idx); 1834 seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n", 1835 cq_ctx->bpid, cq_ctx->bp_ena); 1836 1837 seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n", 1838 cq_ctx->update_time, cq_ctx->avg_level); 1839 seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n", 1840 cq_ctx->head, cq_ctx->tail); 1841 1842 seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n", 1843 cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int); 1844 seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n", 1845 cq_ctx->qsize, cq_ctx->caching); 1846 seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n", 1847 cq_ctx->substream, cq_ctx->ena); 1848 seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n", 1849 cq_ctx->drop_ena, cq_ctx->drop); 1850 seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp); 1851 } 1852 1853 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp, 1854 void *unused, int ctype) 1855 { 1856 void (*print_nix_ctx)(struct seq_file *filp, 1857 struct nix_aq_enq_rsp *rsp) = NULL; 1858 struct nix_hw *nix_hw = filp->private; 1859 struct rvu *rvu = nix_hw->rvu; 1860 struct nix_aq_enq_req aq_req; 1861 struct nix_aq_enq_rsp rsp; 1862 char *ctype_string = NULL; 1863 int qidx, rc, max_id = 0; 1864 struct rvu_pfvf *pfvf; 1865 int nixlf, id, all; 1866 u16 pcifunc; 1867 1868 switch (ctype) { 1869 case NIX_AQ_CTYPE_CQ: 1870 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf; 1871 id = rvu->rvu_dbg.nix_cq_ctx.id; 1872 all = rvu->rvu_dbg.nix_cq_ctx.all; 1873 break; 1874 1875 case NIX_AQ_CTYPE_SQ: 1876 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf; 1877 id = rvu->rvu_dbg.nix_sq_ctx.id; 1878 all = rvu->rvu_dbg.nix_sq_ctx.all; 1879 break; 1880 1881 case NIX_AQ_CTYPE_RQ: 1882 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf; 1883 id = rvu->rvu_dbg.nix_rq_ctx.id; 1884 all = rvu->rvu_dbg.nix_rq_ctx.all; 1885 break; 1886 1887 default: 1888 return -EINVAL; 1889 } 1890 1891 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) 1892 return -EINVAL; 1893 1894 pfvf = rvu_get_pfvf(rvu, pcifunc); 1895 if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) { 1896 seq_puts(filp, "SQ context is not initialized\n"); 1897 return -EINVAL; 1898 } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) { 1899 seq_puts(filp, "RQ context is not initialized\n"); 1900 return -EINVAL; 1901 } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) { 1902 seq_puts(filp, "CQ context is not initialized\n"); 1903 return -EINVAL; 1904 } 1905 1906 if (ctype == NIX_AQ_CTYPE_SQ) { 1907 max_id = pfvf->sq_ctx->qsize; 1908 ctype_string = "sq"; 1909 print_nix_ctx = print_nix_sq_ctx; 1910 } else if (ctype == NIX_AQ_CTYPE_RQ) { 1911 max_id = pfvf->rq_ctx->qsize; 1912 ctype_string = "rq"; 1913 print_nix_ctx = print_nix_rq_ctx; 1914 } else if (ctype == NIX_AQ_CTYPE_CQ) { 1915 max_id = pfvf->cq_ctx->qsize; 1916 ctype_string = "cq"; 1917 print_nix_ctx = print_nix_cq_ctx; 1918 } 1919 1920 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); 1921 aq_req.hdr.pcifunc = pcifunc; 1922 aq_req.ctype = ctype; 1923 aq_req.op = NIX_AQ_INSTOP_READ; 1924 if (all) 1925 id = 0; 1926 else 1927 max_id = id + 1; 1928 for (qidx = id; qidx < max_id; qidx++) { 1929 aq_req.qidx = qidx; 1930 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n", 1931 ctype_string, nixlf, aq_req.qidx); 1932 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp); 1933 if (rc) { 1934 seq_puts(filp, "Failed to read the context\n"); 1935 return -EINVAL; 1936 } 1937 print_nix_ctx(filp, &rsp); 1938 } 1939 return 0; 1940 } 1941 1942 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf, 1943 int id, int ctype, char *ctype_string, 1944 struct seq_file *m) 1945 { 1946 struct nix_hw *nix_hw = m->private; 1947 struct rvu_pfvf *pfvf; 1948 int max_id = 0; 1949 u16 pcifunc; 1950 1951 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) 1952 return -EINVAL; 1953 1954 pfvf = rvu_get_pfvf(rvu, pcifunc); 1955 1956 if (ctype == NIX_AQ_CTYPE_SQ) { 1957 if (!pfvf->sq_ctx) { 1958 dev_warn(rvu->dev, "SQ context is not initialized\n"); 1959 return -EINVAL; 1960 } 1961 max_id = pfvf->sq_ctx->qsize; 1962 } else if (ctype == NIX_AQ_CTYPE_RQ) { 1963 if (!pfvf->rq_ctx) { 1964 dev_warn(rvu->dev, "RQ context is not initialized\n"); 1965 return -EINVAL; 1966 } 1967 max_id = pfvf->rq_ctx->qsize; 1968 } else if (ctype == NIX_AQ_CTYPE_CQ) { 1969 if (!pfvf->cq_ctx) { 1970 dev_warn(rvu->dev, "CQ context is not initialized\n"); 1971 return -EINVAL; 1972 } 1973 max_id = pfvf->cq_ctx->qsize; 1974 } 1975 1976 if (id < 0 || id >= max_id) { 1977 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n", 1978 ctype_string, max_id - 1); 1979 return -EINVAL; 1980 } 1981 switch (ctype) { 1982 case NIX_AQ_CTYPE_CQ: 1983 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf; 1984 rvu->rvu_dbg.nix_cq_ctx.id = id; 1985 rvu->rvu_dbg.nix_cq_ctx.all = all; 1986 break; 1987 1988 case NIX_AQ_CTYPE_SQ: 1989 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf; 1990 rvu->rvu_dbg.nix_sq_ctx.id = id; 1991 rvu->rvu_dbg.nix_sq_ctx.all = all; 1992 break; 1993 1994 case NIX_AQ_CTYPE_RQ: 1995 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf; 1996 rvu->rvu_dbg.nix_rq_ctx.id = id; 1997 rvu->rvu_dbg.nix_rq_ctx.all = all; 1998 break; 1999 default: 2000 return -EINVAL; 2001 } 2002 return 0; 2003 } 2004 2005 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp, 2006 const char __user *buffer, 2007 size_t count, loff_t *ppos, 2008 int ctype) 2009 { 2010 struct seq_file *m = filp->private_data; 2011 struct nix_hw *nix_hw = m->private; 2012 struct rvu *rvu = nix_hw->rvu; 2013 char *cmd_buf, *ctype_string; 2014 int nixlf, id = 0, ret; 2015 bool all = false; 2016 2017 if ((*ppos != 0) || !count) 2018 return -EINVAL; 2019 2020 switch (ctype) { 2021 case NIX_AQ_CTYPE_SQ: 2022 ctype_string = "sq"; 2023 break; 2024 case NIX_AQ_CTYPE_RQ: 2025 ctype_string = "rq"; 2026 break; 2027 case NIX_AQ_CTYPE_CQ: 2028 ctype_string = "cq"; 2029 break; 2030 default: 2031 return -EINVAL; 2032 } 2033 2034 cmd_buf = kzalloc(count + 1, GFP_KERNEL); 2035 2036 if (!cmd_buf) 2037 return count; 2038 2039 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer, 2040 &nixlf, &id, &all); 2041 if (ret < 0) { 2042 dev_info(rvu->dev, 2043 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n", 2044 ctype_string, ctype_string); 2045 goto done; 2046 } else { 2047 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype, 2048 ctype_string, m); 2049 } 2050 done: 2051 kfree(cmd_buf); 2052 return ret ? ret : count; 2053 } 2054 2055 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp, 2056 const char __user *buffer, 2057 size_t count, loff_t *ppos) 2058 { 2059 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos, 2060 NIX_AQ_CTYPE_SQ); 2061 } 2062 2063 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused) 2064 { 2065 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ); 2066 } 2067 2068 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write); 2069 2070 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp, 2071 const char __user *buffer, 2072 size_t count, loff_t *ppos) 2073 { 2074 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos, 2075 NIX_AQ_CTYPE_RQ); 2076 } 2077 2078 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused) 2079 { 2080 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ); 2081 } 2082 2083 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write); 2084 2085 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp, 2086 const char __user *buffer, 2087 size_t count, loff_t *ppos) 2088 { 2089 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos, 2090 NIX_AQ_CTYPE_CQ); 2091 } 2092 2093 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused) 2094 { 2095 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ); 2096 } 2097 2098 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write); 2099 2100 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize, 2101 unsigned long *bmap, char *qtype) 2102 { 2103 char *buf; 2104 2105 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 2106 if (!buf) 2107 return; 2108 2109 bitmap_print_to_pagebuf(false, buf, bmap, qsize); 2110 seq_printf(filp, "%s context count : %d\n", qtype, qsize); 2111 seq_printf(filp, "%s context ena/dis bitmap : %s\n", 2112 qtype, buf); 2113 kfree(buf); 2114 } 2115 2116 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf) 2117 { 2118 if (!pfvf->cq_ctx) 2119 seq_puts(filp, "cq context is not initialized\n"); 2120 else 2121 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap, 2122 "cq"); 2123 2124 if (!pfvf->rq_ctx) 2125 seq_puts(filp, "rq context is not initialized\n"); 2126 else 2127 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap, 2128 "rq"); 2129 2130 if (!pfvf->sq_ctx) 2131 seq_puts(filp, "sq context is not initialized\n"); 2132 else 2133 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap, 2134 "sq"); 2135 } 2136 2137 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp, 2138 const char __user *buffer, 2139 size_t count, loff_t *ppos) 2140 { 2141 return rvu_dbg_qsize_write(filp, buffer, count, ppos, 2142 BLKTYPE_NIX); 2143 } 2144 2145 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused) 2146 { 2147 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX); 2148 } 2149 2150 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write); 2151 2152 static void print_band_prof_ctx(struct seq_file *m, 2153 struct nix_bandprof_s *prof) 2154 { 2155 char *str; 2156 2157 switch (prof->pc_mode) { 2158 case NIX_RX_PC_MODE_VLAN: 2159 str = "VLAN"; 2160 break; 2161 case NIX_RX_PC_MODE_DSCP: 2162 str = "DSCP"; 2163 break; 2164 case NIX_RX_PC_MODE_GEN: 2165 str = "Generic"; 2166 break; 2167 case NIX_RX_PC_MODE_RSVD: 2168 str = "Reserved"; 2169 break; 2170 } 2171 seq_printf(m, "W0: pc_mode\t\t%s\n", str); 2172 str = (prof->icolor == 3) ? "Color blind" : 2173 (prof->icolor == 0) ? "Green" : 2174 (prof->icolor == 1) ? "Yellow" : "Red"; 2175 seq_printf(m, "W0: icolor\t\t%s\n", str); 2176 seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena); 2177 seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent); 2178 seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent); 2179 seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent); 2180 seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent); 2181 seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa); 2182 seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa); 2183 seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa); 2184 2185 seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa); 2186 str = (prof->lmode == 0) ? "byte" : "packet"; 2187 seq_printf(m, "W1: lmode\t\t%s\n", str); 2188 seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect); 2189 seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv); 2190 seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent); 2191 seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa); 2192 str = (prof->gc_action == 0) ? "PASS" : 2193 (prof->gc_action == 1) ? "DROP" : "RED"; 2194 seq_printf(m, "W1: gc_action\t\t%s\n", str); 2195 str = (prof->yc_action == 0) ? "PASS" : 2196 (prof->yc_action == 1) ? "DROP" : "RED"; 2197 seq_printf(m, "W1: yc_action\t\t%s\n", str); 2198 str = (prof->rc_action == 0) ? "PASS" : 2199 (prof->rc_action == 1) ? "DROP" : "RED"; 2200 seq_printf(m, "W1: rc_action\t\t%s\n", str); 2201 seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo); 2202 seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id); 2203 seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en); 2204 2205 seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts); 2206 seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum); 2207 seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum); 2208 seq_printf(m, "W4: green_pkt_pass\t%lld\n", 2209 (u64)prof->green_pkt_pass); 2210 seq_printf(m, "W5: yellow_pkt_pass\t%lld\n", 2211 (u64)prof->yellow_pkt_pass); 2212 seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass); 2213 seq_printf(m, "W7: green_octs_pass\t%lld\n", 2214 (u64)prof->green_octs_pass); 2215 seq_printf(m, "W8: yellow_octs_pass\t%lld\n", 2216 (u64)prof->yellow_octs_pass); 2217 seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass); 2218 seq_printf(m, "W10: green_pkt_drop\t%lld\n", 2219 (u64)prof->green_pkt_drop); 2220 seq_printf(m, "W11: yellow_pkt_drop\t%lld\n", 2221 (u64)prof->yellow_pkt_drop); 2222 seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop); 2223 seq_printf(m, "W13: green_octs_drop\t%lld\n", 2224 (u64)prof->green_octs_drop); 2225 seq_printf(m, "W14: yellow_octs_drop\t%lld\n", 2226 (u64)prof->yellow_octs_drop); 2227 seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop); 2228 seq_puts(m, "==============================\n"); 2229 } 2230 2231 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused) 2232 { 2233 struct nix_hw *nix_hw = m->private; 2234 struct nix_cn10k_aq_enq_req aq_req; 2235 struct nix_cn10k_aq_enq_rsp aq_rsp; 2236 struct rvu *rvu = nix_hw->rvu; 2237 struct nix_ipolicer *ipolicer; 2238 int layer, prof_idx, idx, rc; 2239 u16 pcifunc; 2240 char *str; 2241 2242 /* Ingress policers do not exist on all platforms */ 2243 if (!nix_hw->ipolicer) 2244 return 0; 2245 2246 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 2247 if (layer == BAND_PROF_INVAL_LAYER) 2248 continue; 2249 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" : 2250 (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top"; 2251 2252 seq_printf(m, "\n%s bandwidth profiles\n", str); 2253 seq_puts(m, "=======================\n"); 2254 2255 ipolicer = &nix_hw->ipolicer[layer]; 2256 2257 for (idx = 0; idx < ipolicer->band_prof.max; idx++) { 2258 if (is_rsrc_free(&ipolicer->band_prof, idx)) 2259 continue; 2260 2261 prof_idx = (idx & 0x3FFF) | (layer << 14); 2262 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 2263 0x00, NIX_AQ_CTYPE_BANDPROF, 2264 prof_idx); 2265 if (rc) { 2266 dev_err(rvu->dev, 2267 "%s: Failed to fetch context of %s profile %d, err %d\n", 2268 __func__, str, idx, rc); 2269 return 0; 2270 } 2271 seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx); 2272 pcifunc = ipolicer->pfvf_map[idx]; 2273 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) 2274 seq_printf(m, "Allocated to :: PF %d\n", 2275 rvu_get_pf(pcifunc)); 2276 else 2277 seq_printf(m, "Allocated to :: PF %d VF %d\n", 2278 rvu_get_pf(pcifunc), 2279 (pcifunc & RVU_PFVF_FUNC_MASK) - 1); 2280 print_band_prof_ctx(m, &aq_rsp.prof); 2281 } 2282 } 2283 return 0; 2284 } 2285 2286 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL); 2287 2288 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused) 2289 { 2290 struct nix_hw *nix_hw = m->private; 2291 struct nix_ipolicer *ipolicer; 2292 int layer; 2293 char *str; 2294 2295 /* Ingress policers do not exist on all platforms */ 2296 if (!nix_hw->ipolicer) 2297 return 0; 2298 2299 seq_puts(m, "\nBandwidth profile resource free count\n"); 2300 seq_puts(m, "=====================================\n"); 2301 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 2302 if (layer == BAND_PROF_INVAL_LAYER) 2303 continue; 2304 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" : 2305 (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top "; 2306 2307 ipolicer = &nix_hw->ipolicer[layer]; 2308 seq_printf(m, "%s :: Max: %4d Free: %4d\n", str, 2309 ipolicer->band_prof.max, 2310 rvu_rsrc_free_count(&ipolicer->band_prof)); 2311 } 2312 seq_puts(m, "=====================================\n"); 2313 2314 return 0; 2315 } 2316 2317 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL); 2318 2319 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr) 2320 { 2321 struct nix_hw *nix_hw; 2322 2323 if (!is_block_implemented(rvu->hw, blkaddr)) 2324 return; 2325 2326 if (blkaddr == BLKADDR_NIX0) { 2327 rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root); 2328 nix_hw = &rvu->hw->nix[0]; 2329 } else { 2330 rvu->rvu_dbg.nix = debugfs_create_dir("nix1", 2331 rvu->rvu_dbg.root); 2332 nix_hw = &rvu->hw->nix[1]; 2333 } 2334 2335 debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, 2336 &rvu_dbg_nix_sq_ctx_fops); 2337 debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, 2338 &rvu_dbg_nix_rq_ctx_fops); 2339 debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, 2340 &rvu_dbg_nix_cq_ctx_fops); 2341 debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw, 2342 &rvu_dbg_nix_ndc_tx_cache_fops); 2343 debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw, 2344 &rvu_dbg_nix_ndc_rx_cache_fops); 2345 debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw, 2346 &rvu_dbg_nix_ndc_tx_hits_miss_fops); 2347 debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw, 2348 &rvu_dbg_nix_ndc_rx_hits_miss_fops); 2349 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu, 2350 &rvu_dbg_nix_qsize_fops); 2351 debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, 2352 &rvu_dbg_nix_band_prof_ctx_fops); 2353 debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw, 2354 &rvu_dbg_nix_band_prof_rsrc_fops); 2355 } 2356 2357 static void rvu_dbg_npa_init(struct rvu *rvu) 2358 { 2359 rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root); 2360 2361 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu, 2362 &rvu_dbg_npa_qsize_fops); 2363 debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu, 2364 &rvu_dbg_npa_aura_ctx_fops); 2365 debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu, 2366 &rvu_dbg_npa_pool_ctx_fops); 2367 debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu, 2368 &rvu_dbg_npa_ndc_cache_fops); 2369 debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu, 2370 &rvu_dbg_npa_ndc_hits_miss_fops); 2371 } 2372 2373 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \ 2374 ({ \ 2375 u64 cnt; \ 2376 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \ 2377 NIX_STATS_RX, &(cnt)); \ 2378 if (!err) \ 2379 seq_printf(s, "%s: %llu\n", name, cnt); \ 2380 cnt; \ 2381 }) 2382 2383 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \ 2384 ({ \ 2385 u64 cnt; \ 2386 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \ 2387 NIX_STATS_TX, &(cnt)); \ 2388 if (!err) \ 2389 seq_printf(s, "%s: %llu\n", name, cnt); \ 2390 cnt; \ 2391 }) 2392 2393 static int cgx_print_stats(struct seq_file *s, int lmac_id) 2394 { 2395 struct cgx_link_user_info linfo; 2396 struct mac_ops *mac_ops; 2397 void *cgxd = s->private; 2398 u64 ucast, mcast, bcast; 2399 int stat = 0, err = 0; 2400 u64 tx_stat, rx_stat; 2401 struct rvu *rvu; 2402 2403 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM, 2404 PCI_DEVID_OCTEONTX2_RVU_AF, NULL)); 2405 if (!rvu) 2406 return -ENODEV; 2407 2408 mac_ops = get_mac_ops(cgxd); 2409 /* There can be no CGX devices at all */ 2410 if (!mac_ops) 2411 return 0; 2412 2413 /* Link status */ 2414 seq_puts(s, "\n=======Link Status======\n\n"); 2415 err = cgx_get_link_info(cgxd, lmac_id, &linfo); 2416 if (err) 2417 seq_puts(s, "Failed to read link status\n"); 2418 seq_printf(s, "\nLink is %s %d Mbps\n\n", 2419 linfo.link_up ? "UP" : "DOWN", linfo.speed); 2420 2421 /* Rx stats */ 2422 seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n", 2423 mac_ops->name); 2424 ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames"); 2425 if (err) 2426 return err; 2427 mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames"); 2428 if (err) 2429 return err; 2430 bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames"); 2431 if (err) 2432 return err; 2433 seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast); 2434 PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes"); 2435 if (err) 2436 return err; 2437 PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops"); 2438 if (err) 2439 return err; 2440 PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors"); 2441 if (err) 2442 return err; 2443 2444 /* Tx stats */ 2445 seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n", 2446 mac_ops->name); 2447 ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames"); 2448 if (err) 2449 return err; 2450 mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames"); 2451 if (err) 2452 return err; 2453 bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames"); 2454 if (err) 2455 return err; 2456 seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast); 2457 PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes"); 2458 if (err) 2459 return err; 2460 PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops"); 2461 if (err) 2462 return err; 2463 2464 /* Rx stats */ 2465 seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name); 2466 while (stat < mac_ops->rx_stats_cnt) { 2467 err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat); 2468 if (err) 2469 return err; 2470 if (is_rvu_otx2(rvu)) 2471 seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], 2472 rx_stat); 2473 else 2474 seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat], 2475 rx_stat); 2476 stat++; 2477 } 2478 2479 /* Tx stats */ 2480 stat = 0; 2481 seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name); 2482 while (stat < mac_ops->tx_stats_cnt) { 2483 err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat); 2484 if (err) 2485 return err; 2486 2487 if (is_rvu_otx2(rvu)) 2488 seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], 2489 tx_stat); 2490 else 2491 seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat], 2492 tx_stat); 2493 stat++; 2494 } 2495 2496 return err; 2497 } 2498 2499 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id) 2500 { 2501 struct dentry *current_dir; 2502 char *buf; 2503 2504 current_dir = filp->file->f_path.dentry->d_parent; 2505 buf = strrchr(current_dir->d_name.name, 'c'); 2506 if (!buf) 2507 return -EINVAL; 2508 2509 return kstrtoint(buf + 1, 10, lmac_id); 2510 } 2511 2512 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) 2513 { 2514 int lmac_id, err; 2515 2516 err = rvu_dbg_derive_lmacid(filp, &lmac_id); 2517 if (!err) 2518 return cgx_print_stats(filp, lmac_id); 2519 2520 return err; 2521 } 2522 2523 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL); 2524 2525 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id) 2526 { 2527 struct pci_dev *pdev = NULL; 2528 void *cgxd = s->private; 2529 char *bcast, *mcast; 2530 u16 index, domain; 2531 u8 dmac[ETH_ALEN]; 2532 struct rvu *rvu; 2533 u64 cfg, mac; 2534 int pf; 2535 2536 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM, 2537 PCI_DEVID_OCTEONTX2_RVU_AF, NULL)); 2538 if (!rvu) 2539 return -ENODEV; 2540 2541 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id); 2542 domain = 2; 2543 2544 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0); 2545 if (!pdev) 2546 return 0; 2547 2548 cfg = cgx_read_dmac_ctrl(cgxd, lmac_id); 2549 bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT"; 2550 mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT"; 2551 2552 seq_puts(s, 2553 "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n"); 2554 seq_printf(s, "%s PF%d %9s %9s", 2555 dev_name(&pdev->dev), pf, bcast, mcast); 2556 if (cfg & CGX_DMAC_CAM_ACCEPT) 2557 seq_printf(s, "%12s\n\n", "UNICAST"); 2558 else 2559 seq_printf(s, "%16s\n\n", "PROMISCUOUS"); 2560 2561 seq_puts(s, "\nDMAC-INDEX ADDRESS\n"); 2562 2563 for (index = 0 ; index < 32 ; index++) { 2564 cfg = cgx_read_dmac_entry(cgxd, index); 2565 /* Display enabled dmac entries associated with current lmac */ 2566 if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) && 2567 FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) { 2568 mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg); 2569 u64_to_ether_addr(mac, dmac); 2570 seq_printf(s, "%7d %pM\n", index, dmac); 2571 } 2572 } 2573 2574 pci_dev_put(pdev); 2575 return 0; 2576 } 2577 2578 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused) 2579 { 2580 int err, lmac_id; 2581 2582 err = rvu_dbg_derive_lmacid(filp, &lmac_id); 2583 if (!err) 2584 return cgx_print_dmac_flt(filp, lmac_id); 2585 2586 return err; 2587 } 2588 2589 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL); 2590 2591 static void rvu_dbg_cgx_init(struct rvu *rvu) 2592 { 2593 struct mac_ops *mac_ops; 2594 unsigned long lmac_bmap; 2595 int i, lmac_id; 2596 char dname[20]; 2597 void *cgx; 2598 2599 if (!cgx_get_cgxcnt_max()) 2600 return; 2601 2602 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); 2603 if (!mac_ops) 2604 return; 2605 2606 rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name, 2607 rvu->rvu_dbg.root); 2608 2609 for (i = 0; i < cgx_get_cgxcnt_max(); i++) { 2610 cgx = rvu_cgx_pdata(i, rvu); 2611 if (!cgx) 2612 continue; 2613 lmac_bmap = cgx_get_lmac_bmap(cgx); 2614 /* cgx debugfs dir */ 2615 sprintf(dname, "%s%d", mac_ops->name, i); 2616 rvu->rvu_dbg.cgx = debugfs_create_dir(dname, 2617 rvu->rvu_dbg.cgx_root); 2618 2619 for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) { 2620 /* lmac debugfs dir */ 2621 sprintf(dname, "lmac%d", lmac_id); 2622 rvu->rvu_dbg.lmac = 2623 debugfs_create_dir(dname, rvu->rvu_dbg.cgx); 2624 2625 debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac, 2626 cgx, &rvu_dbg_cgx_stat_fops); 2627 debugfs_create_file("mac_filter", 0600, 2628 rvu->rvu_dbg.lmac, cgx, 2629 &rvu_dbg_cgx_dmac_flt_fops); 2630 } 2631 } 2632 } 2633 2634 /* NPC debugfs APIs */ 2635 static void rvu_print_npc_mcam_info(struct seq_file *s, 2636 u16 pcifunc, int blkaddr) 2637 { 2638 struct rvu *rvu = s->private; 2639 int entry_acnt, entry_ecnt; 2640 int cntr_acnt, cntr_ecnt; 2641 2642 rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr, 2643 &entry_acnt, &entry_ecnt); 2644 rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr, 2645 &cntr_acnt, &cntr_ecnt); 2646 if (!entry_acnt && !cntr_acnt) 2647 return; 2648 2649 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) 2650 seq_printf(s, "\n\t\t Device \t\t: PF%d\n", 2651 rvu_get_pf(pcifunc)); 2652 else 2653 seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n", 2654 rvu_get_pf(pcifunc), 2655 (pcifunc & RVU_PFVF_FUNC_MASK) - 1); 2656 2657 if (entry_acnt) { 2658 seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt); 2659 seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt); 2660 } 2661 if (cntr_acnt) { 2662 seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt); 2663 seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt); 2664 } 2665 } 2666 2667 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued) 2668 { 2669 struct rvu *rvu = filp->private; 2670 int pf, vf, numvfs, blkaddr; 2671 struct npc_mcam *mcam; 2672 u16 pcifunc, counters; 2673 u64 cfg; 2674 2675 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2676 if (blkaddr < 0) 2677 return -ENODEV; 2678 2679 mcam = &rvu->hw->mcam; 2680 counters = rvu->hw->npc_counters; 2681 2682 seq_puts(filp, "\nNPC MCAM info:\n"); 2683 /* MCAM keywidth on receive and transmit sides */ 2684 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX)); 2685 cfg = (cfg >> 32) & 0x07; 2686 seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ? 2687 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ? 2688 "224bits" : "448bits")); 2689 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX)); 2690 cfg = (cfg >> 32) & 0x07; 2691 seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ? 2692 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ? 2693 "224bits" : "448bits")); 2694 2695 mutex_lock(&mcam->lock); 2696 /* MCAM entries */ 2697 seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries); 2698 seq_printf(filp, "\t\t Reserved \t: %d\n", 2699 mcam->total_entries - mcam->bmap_entries); 2700 seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt); 2701 2702 /* MCAM counters */ 2703 seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters); 2704 seq_printf(filp, "\t\t Reserved \t: %d\n", 2705 counters - mcam->counters.max); 2706 seq_printf(filp, "\t\t Available \t: %d\n", 2707 rvu_rsrc_free_count(&mcam->counters)); 2708 2709 if (mcam->bmap_entries == mcam->bmap_fcnt) { 2710 mutex_unlock(&mcam->lock); 2711 return 0; 2712 } 2713 2714 seq_puts(filp, "\n\t\t Current allocation\n"); 2715 seq_puts(filp, "\t\t====================\n"); 2716 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 2717 pcifunc = (pf << RVU_PFVF_PF_SHIFT); 2718 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr); 2719 2720 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 2721 numvfs = (cfg >> 12) & 0xFF; 2722 for (vf = 0; vf < numvfs; vf++) { 2723 pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1); 2724 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr); 2725 } 2726 } 2727 2728 mutex_unlock(&mcam->lock); 2729 return 0; 2730 } 2731 2732 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL); 2733 2734 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp, 2735 void *unused) 2736 { 2737 struct rvu *rvu = filp->private; 2738 struct npc_mcam *mcam; 2739 int blkaddr; 2740 2741 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2742 if (blkaddr < 0) 2743 return -ENODEV; 2744 2745 mcam = &rvu->hw->mcam; 2746 2747 seq_puts(filp, "\nNPC MCAM RX miss action stats\n"); 2748 seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr, 2749 rvu_read64(rvu, blkaddr, 2750 NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr))); 2751 2752 return 0; 2753 } 2754 2755 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL); 2756 2757 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, 2758 struct rvu_npc_mcam_rule *rule) 2759 { 2760 u8 bit; 2761 2762 for_each_set_bit(bit, (unsigned long *)&rule->features, 64) { 2763 seq_printf(s, "\t%s ", npc_get_field_name(bit)); 2764 switch (bit) { 2765 case NPC_LXMB: 2766 if (rule->lxmb == 1) 2767 seq_puts(s, "\tL2M nibble is set\n"); 2768 else 2769 seq_puts(s, "\tL2B nibble is set\n"); 2770 break; 2771 case NPC_DMAC: 2772 seq_printf(s, "%pM ", rule->packet.dmac); 2773 seq_printf(s, "mask %pM\n", rule->mask.dmac); 2774 break; 2775 case NPC_SMAC: 2776 seq_printf(s, "%pM ", rule->packet.smac); 2777 seq_printf(s, "mask %pM\n", rule->mask.smac); 2778 break; 2779 case NPC_ETYPE: 2780 seq_printf(s, "0x%x ", ntohs(rule->packet.etype)); 2781 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype)); 2782 break; 2783 case NPC_OUTER_VID: 2784 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci)); 2785 seq_printf(s, "mask 0x%x\n", 2786 ntohs(rule->mask.vlan_tci)); 2787 break; 2788 case NPC_INNER_VID: 2789 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_itci)); 2790 seq_printf(s, "mask 0x%x\n", 2791 ntohs(rule->mask.vlan_itci)); 2792 break; 2793 case NPC_TOS: 2794 seq_printf(s, "%d ", rule->packet.tos); 2795 seq_printf(s, "mask 0x%x\n", rule->mask.tos); 2796 break; 2797 case NPC_SIP_IPV4: 2798 seq_printf(s, "%pI4 ", &rule->packet.ip4src); 2799 seq_printf(s, "mask %pI4\n", &rule->mask.ip4src); 2800 break; 2801 case NPC_DIP_IPV4: 2802 seq_printf(s, "%pI4 ", &rule->packet.ip4dst); 2803 seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst); 2804 break; 2805 case NPC_SIP_IPV6: 2806 seq_printf(s, "%pI6 ", rule->packet.ip6src); 2807 seq_printf(s, "mask %pI6\n", rule->mask.ip6src); 2808 break; 2809 case NPC_DIP_IPV6: 2810 seq_printf(s, "%pI6 ", rule->packet.ip6dst); 2811 seq_printf(s, "mask %pI6\n", rule->mask.ip6dst); 2812 break; 2813 case NPC_IPFRAG_IPV6: 2814 seq_printf(s, "0x%x ", rule->packet.next_header); 2815 seq_printf(s, "mask 0x%x\n", rule->mask.next_header); 2816 break; 2817 case NPC_IPFRAG_IPV4: 2818 seq_printf(s, "0x%x ", rule->packet.ip_flag); 2819 seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag); 2820 break; 2821 case NPC_SPORT_TCP: 2822 case NPC_SPORT_UDP: 2823 case NPC_SPORT_SCTP: 2824 seq_printf(s, "%d ", ntohs(rule->packet.sport)); 2825 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport)); 2826 break; 2827 case NPC_DPORT_TCP: 2828 case NPC_DPORT_UDP: 2829 case NPC_DPORT_SCTP: 2830 seq_printf(s, "%d ", ntohs(rule->packet.dport)); 2831 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport)); 2832 break; 2833 case NPC_IPSEC_SPI: 2834 seq_printf(s, "0x%x ", ntohl(rule->packet.spi)); 2835 seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi)); 2836 break; 2837 default: 2838 seq_puts(s, "\n"); 2839 break; 2840 } 2841 } 2842 } 2843 2844 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s, 2845 struct rvu_npc_mcam_rule *rule) 2846 { 2847 if (is_npc_intf_tx(rule->intf)) { 2848 switch (rule->tx_action.op) { 2849 case NIX_TX_ACTIONOP_DROP: 2850 seq_puts(s, "\taction: Drop\n"); 2851 break; 2852 case NIX_TX_ACTIONOP_UCAST_DEFAULT: 2853 seq_puts(s, "\taction: Unicast to default channel\n"); 2854 break; 2855 case NIX_TX_ACTIONOP_UCAST_CHAN: 2856 seq_printf(s, "\taction: Unicast to channel %d\n", 2857 rule->tx_action.index); 2858 break; 2859 case NIX_TX_ACTIONOP_MCAST: 2860 seq_puts(s, "\taction: Multicast\n"); 2861 break; 2862 case NIX_TX_ACTIONOP_DROP_VIOL: 2863 seq_puts(s, "\taction: Lockdown Violation Drop\n"); 2864 break; 2865 default: 2866 break; 2867 } 2868 } else { 2869 switch (rule->rx_action.op) { 2870 case NIX_RX_ACTIONOP_DROP: 2871 seq_puts(s, "\taction: Drop\n"); 2872 break; 2873 case NIX_RX_ACTIONOP_UCAST: 2874 seq_printf(s, "\taction: Direct to queue %d\n", 2875 rule->rx_action.index); 2876 break; 2877 case NIX_RX_ACTIONOP_RSS: 2878 seq_puts(s, "\taction: RSS\n"); 2879 break; 2880 case NIX_RX_ACTIONOP_UCAST_IPSEC: 2881 seq_puts(s, "\taction: Unicast ipsec\n"); 2882 break; 2883 case NIX_RX_ACTIONOP_MCAST: 2884 seq_puts(s, "\taction: Multicast\n"); 2885 break; 2886 default: 2887 break; 2888 } 2889 } 2890 } 2891 2892 static const char *rvu_dbg_get_intf_name(int intf) 2893 { 2894 switch (intf) { 2895 case NIX_INTFX_RX(0): 2896 return "NIX0_RX"; 2897 case NIX_INTFX_RX(1): 2898 return "NIX1_RX"; 2899 case NIX_INTFX_TX(0): 2900 return "NIX0_TX"; 2901 case NIX_INTFX_TX(1): 2902 return "NIX1_TX"; 2903 default: 2904 break; 2905 } 2906 2907 return "unknown"; 2908 } 2909 2910 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused) 2911 { 2912 struct rvu_npc_mcam_rule *iter; 2913 struct rvu *rvu = s->private; 2914 struct npc_mcam *mcam; 2915 int pf, vf = -1; 2916 bool enabled; 2917 int blkaddr; 2918 u16 target; 2919 u64 hits; 2920 2921 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2922 if (blkaddr < 0) 2923 return 0; 2924 2925 mcam = &rvu->hw->mcam; 2926 2927 mutex_lock(&mcam->lock); 2928 list_for_each_entry(iter, &mcam->mcam_rules, list) { 2929 pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 2930 seq_printf(s, "\n\tInstalled by: PF%d ", pf); 2931 2932 if (iter->owner & RVU_PFVF_FUNC_MASK) { 2933 vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1; 2934 seq_printf(s, "VF%d", vf); 2935 } 2936 seq_puts(s, "\n"); 2937 2938 seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ? 2939 "RX" : "TX"); 2940 seq_printf(s, "\tinterface: %s\n", 2941 rvu_dbg_get_intf_name(iter->intf)); 2942 seq_printf(s, "\tmcam entry: %d\n", iter->entry); 2943 2944 rvu_dbg_npc_mcam_show_flows(s, iter); 2945 if (is_npc_intf_rx(iter->intf)) { 2946 target = iter->rx_action.pf_func; 2947 pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 2948 seq_printf(s, "\tForward to: PF%d ", pf); 2949 2950 if (target & RVU_PFVF_FUNC_MASK) { 2951 vf = (target & RVU_PFVF_FUNC_MASK) - 1; 2952 seq_printf(s, "VF%d", vf); 2953 } 2954 seq_puts(s, "\n"); 2955 seq_printf(s, "\tchannel: 0x%x\n", iter->chan); 2956 seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask); 2957 } 2958 2959 rvu_dbg_npc_mcam_show_action(s, iter); 2960 2961 enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry); 2962 seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no"); 2963 2964 if (!iter->has_cntr) 2965 continue; 2966 seq_printf(s, "\tcounter: %d\n", iter->cntr); 2967 2968 hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr)); 2969 seq_printf(s, "\thits: %lld\n", hits); 2970 } 2971 mutex_unlock(&mcam->lock); 2972 2973 return 0; 2974 } 2975 2976 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL); 2977 2978 static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused) 2979 { 2980 struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 }; 2981 struct npc_exact_table_entry *cam_entry; 2982 struct npc_exact_table *table; 2983 struct rvu *rvu = s->private; 2984 int i, j; 2985 2986 u8 bitmap = 0; 2987 2988 table = rvu->hw->table; 2989 2990 mutex_lock(&table->lock); 2991 2992 /* Check if there is at least one entry in mem table */ 2993 if (!table->mem_tbl_entry_cnt) 2994 goto dump_cam_table; 2995 2996 /* Print table headers */ 2997 seq_puts(s, "\n\tExact Match MEM Table\n"); 2998 seq_puts(s, "Index\t"); 2999 3000 for (i = 0; i < table->mem_table.ways; i++) { 3001 mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i], 3002 struct npc_exact_table_entry, list); 3003 3004 seq_printf(s, "Way-%d\t\t\t\t\t", i); 3005 } 3006 3007 seq_puts(s, "\n"); 3008 for (i = 0; i < table->mem_table.ways; i++) 3009 seq_puts(s, "\tChan MAC \t"); 3010 3011 seq_puts(s, "\n\n"); 3012 3013 /* Print mem table entries */ 3014 for (i = 0; i < table->mem_table.depth; i++) { 3015 bitmap = 0; 3016 for (j = 0; j < table->mem_table.ways; j++) { 3017 if (!mem_entry[j]) 3018 continue; 3019 3020 if (mem_entry[j]->index != i) 3021 continue; 3022 3023 bitmap |= BIT(j); 3024 } 3025 3026 /* No valid entries */ 3027 if (!bitmap) 3028 continue; 3029 3030 seq_printf(s, "%d\t", i); 3031 for (j = 0; j < table->mem_table.ways; j++) { 3032 if (!(bitmap & BIT(j))) { 3033 seq_puts(s, "nil\t\t\t\t\t"); 3034 continue; 3035 } 3036 3037 seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan, 3038 mem_entry[j]->mac); 3039 mem_entry[j] = list_next_entry(mem_entry[j], list); 3040 } 3041 seq_puts(s, "\n"); 3042 } 3043 3044 dump_cam_table: 3045 3046 if (!table->cam_tbl_entry_cnt) 3047 goto done; 3048 3049 seq_puts(s, "\n\tExact Match CAM Table\n"); 3050 seq_puts(s, "index\tchan\tMAC\n"); 3051 3052 /* Traverse cam table entries */ 3053 list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) { 3054 seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan, 3055 cam_entry->mac); 3056 } 3057 3058 done: 3059 mutex_unlock(&table->lock); 3060 return 0; 3061 } 3062 3063 RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL); 3064 3065 static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused) 3066 { 3067 struct npc_exact_table *table; 3068 struct rvu *rvu = s->private; 3069 int i; 3070 3071 table = rvu->hw->table; 3072 3073 seq_puts(s, "\n\tExact Table Info\n"); 3074 seq_printf(s, "Exact Match Feature : %s\n", 3075 rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable"); 3076 if (!rvu->hw->cap.npc_exact_match_enabled) 3077 return 0; 3078 3079 seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n"); 3080 for (i = 0; i < table->num_drop_rules; i++) 3081 seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]); 3082 3083 seq_puts(s, "\nMcam Index\tPromisc Mode Status\n"); 3084 for (i = 0; i < table->num_drop_rules; i++) 3085 seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off"); 3086 3087 seq_puts(s, "\n\tMEM Table Info\n"); 3088 seq_printf(s, "Ways : %d\n", table->mem_table.ways); 3089 seq_printf(s, "Depth : %d\n", table->mem_table.depth); 3090 seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask); 3091 seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask); 3092 seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset); 3093 3094 seq_puts(s, "\n\tCAM Table Info\n"); 3095 seq_printf(s, "Depth : %d\n", table->cam_table.depth); 3096 3097 return 0; 3098 } 3099 3100 RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL); 3101 3102 static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused) 3103 { 3104 struct npc_exact_table *table; 3105 struct rvu *rvu = s->private; 3106 struct npc_key_field *field; 3107 u16 chan, pcifunc; 3108 int blkaddr, i; 3109 u64 cfg, cam1; 3110 char *str; 3111 3112 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3113 table = rvu->hw->table; 3114 3115 field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN]; 3116 3117 seq_puts(s, "\n\t Exact Hit on drop status\n"); 3118 seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n"); 3119 3120 for (i = 0; i < table->num_drop_rules; i++) { 3121 pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i); 3122 cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0)); 3123 3124 /* channel will be always in keyword 0 */ 3125 cam1 = rvu_read64(rvu, blkaddr, 3126 NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1)); 3127 chan = field->kw_mask[0] & cam1; 3128 3129 str = (cfg & 1) ? "enabled" : "disabled"; 3130 3131 seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i, 3132 rvu_read64(rvu, blkaddr, 3133 NPC_AF_MATCH_STATX(table->counter_idx[i])), 3134 chan, str); 3135 } 3136 3137 return 0; 3138 } 3139 3140 RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL); 3141 3142 static void rvu_dbg_npc_init(struct rvu *rvu) 3143 { 3144 rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root); 3145 3146 debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu, 3147 &rvu_dbg_npc_mcam_info_fops); 3148 debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu, 3149 &rvu_dbg_npc_mcam_rules_fops); 3150 3151 debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu, 3152 &rvu_dbg_npc_rx_miss_act_fops); 3153 3154 if (!rvu->hw->cap.npc_exact_match_enabled) 3155 return; 3156 3157 debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu, 3158 &rvu_dbg_npc_exact_entries_fops); 3159 3160 debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu, 3161 &rvu_dbg_npc_exact_info_fops); 3162 3163 debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu, 3164 &rvu_dbg_npc_exact_drop_cnt_fops); 3165 3166 } 3167 3168 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type) 3169 { 3170 struct cpt_ctx *ctx = filp->private; 3171 u64 busy_sts = 0, free_sts = 0; 3172 u32 e_min = 0, e_max = 0, e, i; 3173 u16 max_ses, max_ies, max_aes; 3174 struct rvu *rvu = ctx->rvu; 3175 int blkaddr = ctx->blkaddr; 3176 u64 reg; 3177 3178 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1); 3179 max_ses = reg & 0xffff; 3180 max_ies = (reg >> 16) & 0xffff; 3181 max_aes = (reg >> 32) & 0xffff; 3182 3183 switch (eng_type) { 3184 case CPT_AE_TYPE: 3185 e_min = max_ses + max_ies; 3186 e_max = max_ses + max_ies + max_aes; 3187 break; 3188 case CPT_SE_TYPE: 3189 e_min = 0; 3190 e_max = max_ses; 3191 break; 3192 case CPT_IE_TYPE: 3193 e_min = max_ses; 3194 e_max = max_ses + max_ies; 3195 break; 3196 default: 3197 return -EINVAL; 3198 } 3199 3200 for (e = e_min, i = 0; e < e_max; e++, i++) { 3201 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e)); 3202 if (reg & 0x1) 3203 busy_sts |= 1ULL << i; 3204 3205 if (reg & 0x2) 3206 free_sts |= 1ULL << i; 3207 } 3208 seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts); 3209 seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts); 3210 3211 return 0; 3212 } 3213 3214 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused) 3215 { 3216 return cpt_eng_sts_display(filp, CPT_AE_TYPE); 3217 } 3218 3219 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL); 3220 3221 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused) 3222 { 3223 return cpt_eng_sts_display(filp, CPT_SE_TYPE); 3224 } 3225 3226 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL); 3227 3228 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused) 3229 { 3230 return cpt_eng_sts_display(filp, CPT_IE_TYPE); 3231 } 3232 3233 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL); 3234 3235 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused) 3236 { 3237 struct cpt_ctx *ctx = filp->private; 3238 u16 max_ses, max_ies, max_aes; 3239 struct rvu *rvu = ctx->rvu; 3240 int blkaddr = ctx->blkaddr; 3241 u32 e_max, e; 3242 u64 reg; 3243 3244 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1); 3245 max_ses = reg & 0xffff; 3246 max_ies = (reg >> 16) & 0xffff; 3247 max_aes = (reg >> 32) & 0xffff; 3248 3249 e_max = max_ses + max_ies + max_aes; 3250 3251 seq_puts(filp, "===========================================\n"); 3252 for (e = 0; e < e_max; e++) { 3253 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e)); 3254 seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e, 3255 reg & 0xff); 3256 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e)); 3257 seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e, 3258 reg); 3259 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e)); 3260 seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e, 3261 reg); 3262 seq_puts(filp, "===========================================\n"); 3263 } 3264 return 0; 3265 } 3266 3267 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL); 3268 3269 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused) 3270 { 3271 struct cpt_ctx *ctx = filp->private; 3272 int blkaddr = ctx->blkaddr; 3273 struct rvu *rvu = ctx->rvu; 3274 struct rvu_block *block; 3275 struct rvu_hwinfo *hw; 3276 u64 reg; 3277 u32 lf; 3278 3279 hw = rvu->hw; 3280 block = &hw->block[blkaddr]; 3281 if (!block->lf.bmap) 3282 return -ENODEV; 3283 3284 seq_puts(filp, "===========================================\n"); 3285 for (lf = 0; lf < block->lf.max; lf++) { 3286 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf)); 3287 seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg); 3288 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf)); 3289 seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg); 3290 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf)); 3291 seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg); 3292 reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg | 3293 (lf << block->lfshift)); 3294 seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg); 3295 seq_puts(filp, "===========================================\n"); 3296 } 3297 return 0; 3298 } 3299 3300 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL); 3301 3302 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused) 3303 { 3304 struct cpt_ctx *ctx = filp->private; 3305 struct rvu *rvu = ctx->rvu; 3306 int blkaddr = ctx->blkaddr; 3307 u64 reg0, reg1; 3308 3309 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0)); 3310 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1)); 3311 seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1); 3312 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0)); 3313 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1)); 3314 seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1); 3315 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0)); 3316 seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0); 3317 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT); 3318 seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0); 3319 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT); 3320 seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0); 3321 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO); 3322 seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0); 3323 3324 return 0; 3325 } 3326 3327 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL); 3328 3329 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused) 3330 { 3331 struct cpt_ctx *ctx = filp->private; 3332 struct rvu *rvu = ctx->rvu; 3333 int blkaddr = ctx->blkaddr; 3334 u64 reg; 3335 3336 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC); 3337 seq_printf(filp, "CPT instruction requests %llu\n", reg); 3338 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC); 3339 seq_printf(filp, "CPT instruction latency %llu\n", reg); 3340 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC); 3341 seq_printf(filp, "CPT NCB read requests %llu\n", reg); 3342 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC); 3343 seq_printf(filp, "CPT NCB read latency %llu\n", reg); 3344 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC); 3345 seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg); 3346 reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC); 3347 seq_printf(filp, "CPT active cycles pc %llu\n", reg); 3348 reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT); 3349 seq_printf(filp, "CPT clock count pc %llu\n", reg); 3350 3351 return 0; 3352 } 3353 3354 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL); 3355 3356 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr) 3357 { 3358 struct cpt_ctx *ctx; 3359 3360 if (!is_block_implemented(rvu->hw, blkaddr)) 3361 return; 3362 3363 if (blkaddr == BLKADDR_CPT0) { 3364 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root); 3365 ctx = &rvu->rvu_dbg.cpt_ctx[0]; 3366 ctx->blkaddr = BLKADDR_CPT0; 3367 ctx->rvu = rvu; 3368 } else { 3369 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1", 3370 rvu->rvu_dbg.root); 3371 ctx = &rvu->rvu_dbg.cpt_ctx[1]; 3372 ctx->blkaddr = BLKADDR_CPT1; 3373 ctx->rvu = rvu; 3374 } 3375 3376 debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx, 3377 &rvu_dbg_cpt_pc_fops); 3378 debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx, 3379 &rvu_dbg_cpt_ae_sts_fops); 3380 debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx, 3381 &rvu_dbg_cpt_se_sts_fops); 3382 debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx, 3383 &rvu_dbg_cpt_ie_sts_fops); 3384 debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx, 3385 &rvu_dbg_cpt_engines_info_fops); 3386 debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx, 3387 &rvu_dbg_cpt_lfs_info_fops); 3388 debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx, 3389 &rvu_dbg_cpt_err_info_fops); 3390 } 3391 3392 static const char *rvu_get_dbg_dir_name(struct rvu *rvu) 3393 { 3394 if (!is_rvu_otx2(rvu)) 3395 return "cn10k"; 3396 else 3397 return "octeontx2"; 3398 } 3399 3400 void rvu_dbg_init(struct rvu *rvu) 3401 { 3402 rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL); 3403 3404 debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu, 3405 &rvu_dbg_rsrc_status_fops); 3406 3407 if (!is_rvu_otx2(rvu)) 3408 debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root, 3409 rvu, &rvu_dbg_lmtst_map_table_fops); 3410 3411 if (!cgx_get_cgxcnt_max()) 3412 goto create; 3413 3414 if (is_rvu_otx2(rvu)) 3415 debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root, 3416 rvu, &rvu_dbg_rvu_pf_cgx_map_fops); 3417 else 3418 debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root, 3419 rvu, &rvu_dbg_rvu_pf_cgx_map_fops); 3420 3421 create: 3422 rvu_dbg_npa_init(rvu); 3423 rvu_dbg_nix_init(rvu, BLKADDR_NIX0); 3424 3425 rvu_dbg_nix_init(rvu, BLKADDR_NIX1); 3426 rvu_dbg_cgx_init(rvu); 3427 rvu_dbg_npc_init(rvu); 3428 rvu_dbg_cpt_init(rvu, BLKADDR_CPT0); 3429 rvu_dbg_cpt_init(rvu, BLKADDR_CPT1); 3430 rvu_dbg_mcs_init(rvu); 3431 } 3432 3433 void rvu_dbg_exit(struct rvu *rvu) 3434 { 3435 debugfs_remove_recursive(rvu->rvu_dbg.root); 3436 } 3437 3438 #endif /* CONFIG_DEBUG_FS */ 3439