1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2019 Marvell. 5 * 6 */ 7 8 #ifdef CONFIG_DEBUG_FS 9 10 #include <linux/fs.h> 11 #include <linux/debugfs.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 15 #include "rvu_struct.h" 16 #include "rvu_reg.h" 17 #include "rvu.h" 18 #include "cgx.h" 19 #include "lmac_common.h" 20 #include "npc.h" 21 #include "rvu_npc_hash.h" 22 #include "mcs.h" 23 24 #define DEBUGFS_DIR_NAME "octeontx2" 25 26 enum { 27 CGX_STAT0, 28 CGX_STAT1, 29 CGX_STAT2, 30 CGX_STAT3, 31 CGX_STAT4, 32 CGX_STAT5, 33 CGX_STAT6, 34 CGX_STAT7, 35 CGX_STAT8, 36 CGX_STAT9, 37 CGX_STAT10, 38 CGX_STAT11, 39 CGX_STAT12, 40 CGX_STAT13, 41 CGX_STAT14, 42 CGX_STAT15, 43 CGX_STAT16, 44 CGX_STAT17, 45 CGX_STAT18, 46 }; 47 48 /* NIX TX stats */ 49 enum nix_stat_lf_tx { 50 TX_UCAST = 0x0, 51 TX_BCAST = 0x1, 52 TX_MCAST = 0x2, 53 TX_DROP = 0x3, 54 TX_OCTS = 0x4, 55 TX_STATS_ENUM_LAST, 56 }; 57 58 /* NIX RX stats */ 59 enum nix_stat_lf_rx { 60 RX_OCTS = 0x0, 61 RX_UCAST = 0x1, 62 RX_BCAST = 0x2, 63 RX_MCAST = 0x3, 64 RX_DROP = 0x4, 65 RX_DROP_OCTS = 0x5, 66 RX_FCS = 0x6, 67 RX_ERR = 0x7, 68 RX_DRP_BCAST = 0x8, 69 RX_DRP_MCAST = 0x9, 70 RX_DRP_L3BCAST = 0xa, 71 RX_DRP_L3MCAST = 0xb, 72 RX_STATS_ENUM_LAST, 73 }; 74 75 static char *cgx_rx_stats_fields[] = { 76 [CGX_STAT0] = "Received packets", 77 [CGX_STAT1] = "Octets of received packets", 78 [CGX_STAT2] = "Received PAUSE packets", 79 [CGX_STAT3] = "Received PAUSE and control packets", 80 [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets", 81 [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets", 82 [CGX_STAT6] = "Packets dropped due to RX FIFO full", 83 [CGX_STAT7] = "Octets dropped due to RX FIFO full", 84 [CGX_STAT8] = "Error packets", 85 [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets", 86 [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets", 87 [CGX_STAT11] = "NCSI-bound packets dropped", 88 [CGX_STAT12] = "NCSI-bound octets dropped", 89 }; 90 91 static char *cgx_tx_stats_fields[] = { 92 [CGX_STAT0] = "Packets dropped due to excessive collisions", 93 [CGX_STAT1] = "Packets dropped due to excessive deferral", 94 [CGX_STAT2] = "Multiple collisions before successful transmission", 95 [CGX_STAT3] = "Single collisions before successful transmission", 96 [CGX_STAT4] = "Total octets sent on the interface", 97 [CGX_STAT5] = "Total frames sent on the interface", 98 [CGX_STAT6] = "Packets sent with an octet count < 64", 99 [CGX_STAT7] = "Packets sent with an octet count == 64", 100 [CGX_STAT8] = "Packets sent with an octet count of 65-127", 101 [CGX_STAT9] = "Packets sent with an octet count of 128-255", 102 [CGX_STAT10] = "Packets sent with an octet count of 256-511", 103 [CGX_STAT11] = "Packets sent with an octet count of 512-1023", 104 [CGX_STAT12] = "Packets sent with an octet count of 1024-1518", 105 [CGX_STAT13] = "Packets sent with an octet count of > 1518", 106 [CGX_STAT14] = "Packets sent to a broadcast DMAC", 107 [CGX_STAT15] = "Packets sent to the multicast DMAC", 108 [CGX_STAT16] = "Transmit underflow and were truncated", 109 [CGX_STAT17] = "Control/PAUSE packets sent", 110 }; 111 112 static char *rpm_rx_stats_fields[] = { 113 "Octets of received packets", 114 "Octets of received packets with out error", 115 "Received packets with alignment errors", 116 "Control/PAUSE packets received", 117 "Packets received with Frame too long Errors", 118 "Packets received with a1nrange length Errors", 119 "Received packets", 120 "Packets received with FrameCheckSequenceErrors", 121 "Packets received with VLAN header", 122 "Error packets", 123 "Packets received with unicast DMAC", 124 "Packets received with multicast DMAC", 125 "Packets received with broadcast DMAC", 126 "Dropped packets", 127 "Total frames received on interface", 128 "Packets received with an octet count < 64", 129 "Packets received with an octet count == 64", 130 "Packets received with an octet count of 65-127", 131 "Packets received with an octet count of 128-255", 132 "Packets received with an octet count of 256-511", 133 "Packets received with an octet count of 512-1023", 134 "Packets received with an octet count of 1024-1518", 135 "Packets received with an octet count of > 1518", 136 "Oversized Packets", 137 "Jabber Packets", 138 "Fragmented Packets", 139 "CBFC(class based flow control) pause frames received for class 0", 140 "CBFC pause frames received for class 1", 141 "CBFC pause frames received for class 2", 142 "CBFC pause frames received for class 3", 143 "CBFC pause frames received for class 4", 144 "CBFC pause frames received for class 5", 145 "CBFC pause frames received for class 6", 146 "CBFC pause frames received for class 7", 147 "CBFC pause frames received for class 8", 148 "CBFC pause frames received for class 9", 149 "CBFC pause frames received for class 10", 150 "CBFC pause frames received for class 11", 151 "CBFC pause frames received for class 12", 152 "CBFC pause frames received for class 13", 153 "CBFC pause frames received for class 14", 154 "CBFC pause frames received for class 15", 155 "MAC control packets received", 156 }; 157 158 static char *rpm_tx_stats_fields[] = { 159 "Total octets sent on the interface", 160 "Total octets transmitted OK", 161 "Control/Pause frames sent", 162 "Total frames transmitted OK", 163 "Total frames sent with VLAN header", 164 "Error Packets", 165 "Packets sent to unicast DMAC", 166 "Packets sent to the multicast DMAC", 167 "Packets sent to a broadcast DMAC", 168 "Packets sent with an octet count == 64", 169 "Packets sent with an octet count of 65-127", 170 "Packets sent with an octet count of 128-255", 171 "Packets sent with an octet count of 256-511", 172 "Packets sent with an octet count of 512-1023", 173 "Packets sent with an octet count of 1024-1518", 174 "Packets sent with an octet count of > 1518", 175 "CBFC(class based flow control) pause frames transmitted for class 0", 176 "CBFC pause frames transmitted for class 1", 177 "CBFC pause frames transmitted for class 2", 178 "CBFC pause frames transmitted for class 3", 179 "CBFC pause frames transmitted for class 4", 180 "CBFC pause frames transmitted for class 5", 181 "CBFC pause frames transmitted for class 6", 182 "CBFC pause frames transmitted for class 7", 183 "CBFC pause frames transmitted for class 8", 184 "CBFC pause frames transmitted for class 9", 185 "CBFC pause frames transmitted for class 10", 186 "CBFC pause frames transmitted for class 11", 187 "CBFC pause frames transmitted for class 12", 188 "CBFC pause frames transmitted for class 13", 189 "CBFC pause frames transmitted for class 14", 190 "CBFC pause frames transmitted for class 15", 191 "MAC control packets sent", 192 "Total frames sent on the interface" 193 }; 194 195 enum cpt_eng_type { 196 CPT_AE_TYPE = 1, 197 CPT_SE_TYPE = 2, 198 CPT_IE_TYPE = 3, 199 }; 200 201 #define rvu_dbg_NULL NULL 202 #define rvu_dbg_open_NULL NULL 203 204 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \ 205 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \ 206 { \ 207 return single_open(file, rvu_dbg_##read_op, inode->i_private); \ 208 } \ 209 static const struct file_operations rvu_dbg_##name##_fops = { \ 210 .owner = THIS_MODULE, \ 211 .open = rvu_dbg_open_##name, \ 212 .read = seq_read, \ 213 .write = rvu_dbg_##write_op, \ 214 .llseek = seq_lseek, \ 215 .release = single_release, \ 216 } 217 218 #define RVU_DEBUG_FOPS(name, read_op, write_op) \ 219 static const struct file_operations rvu_dbg_##name##_fops = { \ 220 .owner = THIS_MODULE, \ 221 .open = simple_open, \ 222 .read = rvu_dbg_##read_op, \ 223 .write = rvu_dbg_##write_op \ 224 } 225 226 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf); 227 228 static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir) 229 { 230 struct mcs *mcs = filp->private; 231 struct mcs_port_stats stats; 232 int lmac; 233 234 seq_puts(filp, "\n port stats\n"); 235 mutex_lock(&mcs->stats_lock); 236 for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) { 237 mcs_get_port_stats(mcs, &stats, lmac, dir); 238 seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt); 239 seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt); 240 241 if (dir == MCS_RX && mcs->hw->mcs_blks > 1) 242 seq_printf(filp, "port%d: Preempt error: %lld\n", lmac, 243 stats.preempt_err_cnt); 244 if (dir == MCS_TX) 245 seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac, 246 stats.sectag_insert_err_cnt); 247 } 248 mutex_unlock(&mcs->stats_lock); 249 return 0; 250 } 251 252 static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused) 253 { 254 return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX); 255 } 256 257 RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL); 258 259 static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused) 260 { 261 return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX); 262 } 263 264 RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL); 265 266 static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir) 267 { 268 struct mcs *mcs = filp->private; 269 struct mcs_sa_stats stats; 270 struct rsrc_bmap *map; 271 int sa_id; 272 273 if (dir == MCS_TX) { 274 map = &mcs->tx.sa; 275 mutex_lock(&mcs->stats_lock); 276 for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) { 277 seq_puts(filp, "\n TX SA stats\n"); 278 mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX); 279 seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id, 280 stats.pkt_encrypt_cnt); 281 282 seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id, 283 stats.pkt_protected_cnt); 284 } 285 mutex_unlock(&mcs->stats_lock); 286 return 0; 287 } 288 289 /* RX stats */ 290 map = &mcs->rx.sa; 291 mutex_lock(&mcs->stats_lock); 292 for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) { 293 seq_puts(filp, "\n RX SA stats\n"); 294 mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX); 295 seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt); 296 seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt); 297 seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt); 298 seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt); 299 seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt); 300 } 301 mutex_unlock(&mcs->stats_lock); 302 return 0; 303 } 304 305 static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused) 306 { 307 return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX); 308 } 309 310 RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL); 311 312 static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused) 313 { 314 return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX); 315 } 316 317 RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL); 318 319 static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused) 320 { 321 struct mcs *mcs = filp->private; 322 struct mcs_sc_stats stats; 323 struct rsrc_bmap *map; 324 int sc_id; 325 326 map = &mcs->tx.sc; 327 seq_puts(filp, "\n SC stats\n"); 328 329 mutex_lock(&mcs->stats_lock); 330 for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) { 331 mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX); 332 seq_printf(filp, "\n=======sc%d======\n\n", sc_id); 333 seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt); 334 seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt); 335 336 if (mcs->hw->mcs_blks == 1) { 337 seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id, 338 stats.octet_encrypt_cnt); 339 seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id, 340 stats.octet_protected_cnt); 341 } 342 } 343 mutex_unlock(&mcs->stats_lock); 344 return 0; 345 } 346 347 RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL); 348 349 static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused) 350 { 351 struct mcs *mcs = filp->private; 352 struct mcs_sc_stats stats; 353 struct rsrc_bmap *map; 354 int sc_id; 355 356 map = &mcs->rx.sc; 357 seq_puts(filp, "\n SC stats\n"); 358 359 mutex_lock(&mcs->stats_lock); 360 for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) { 361 mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX); 362 seq_printf(filp, "\n=======sc%d======\n\n", sc_id); 363 seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt); 364 seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt); 365 seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt); 366 seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt); 367 seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt); 368 369 if (mcs->hw->mcs_blks > 1) { 370 seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt); 371 seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt); 372 } 373 if (mcs->hw->mcs_blks == 1) { 374 seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id, 375 stats.octet_decrypt_cnt); 376 seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id, 377 stats.octet_validate_cnt); 378 } 379 } 380 mutex_unlock(&mcs->stats_lock); 381 return 0; 382 } 383 384 RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL); 385 386 static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir) 387 { 388 struct mcs *mcs = filp->private; 389 struct mcs_flowid_stats stats; 390 struct rsrc_bmap *map; 391 int flow_id; 392 393 seq_puts(filp, "\n Flowid stats\n"); 394 395 if (dir == MCS_RX) 396 map = &mcs->rx.flow_ids; 397 else 398 map = &mcs->tx.flow_ids; 399 400 mutex_lock(&mcs->stats_lock); 401 for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) { 402 mcs_get_flowid_stats(mcs, &stats, flow_id, dir); 403 seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt); 404 } 405 mutex_unlock(&mcs->stats_lock); 406 return 0; 407 } 408 409 static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused) 410 { 411 return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX); 412 } 413 414 RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL); 415 416 static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused) 417 { 418 return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX); 419 } 420 421 RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL); 422 423 static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused) 424 { 425 struct mcs *mcs = filp->private; 426 struct mcs_secy_stats stats; 427 struct rsrc_bmap *map; 428 int secy_id; 429 430 map = &mcs->tx.secy; 431 seq_puts(filp, "\n MCS TX secy stats\n"); 432 433 mutex_lock(&mcs->stats_lock); 434 for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) { 435 mcs_get_tx_secy_stats(mcs, &stats, secy_id); 436 seq_printf(filp, "\n=======Secy%d======\n\n", secy_id); 437 seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id, 438 stats.ctl_pkt_bcast_cnt); 439 seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id, 440 stats.ctl_pkt_mcast_cnt); 441 seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id, 442 stats.ctl_pkt_ucast_cnt); 443 seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt); 444 seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id, 445 stats.unctl_pkt_bcast_cnt); 446 seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id, 447 stats.unctl_pkt_mcast_cnt); 448 seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id, 449 stats.unctl_pkt_ucast_cnt); 450 seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt); 451 seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id, 452 stats.octet_encrypted_cnt); 453 seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id, 454 stats.octet_protected_cnt); 455 seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id, 456 stats.pkt_noactivesa_cnt); 457 seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt); 458 seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt); 459 } 460 mutex_unlock(&mcs->stats_lock); 461 return 0; 462 } 463 464 RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL); 465 466 static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused) 467 { 468 struct mcs *mcs = filp->private; 469 struct mcs_secy_stats stats; 470 struct rsrc_bmap *map; 471 int secy_id; 472 473 map = &mcs->rx.secy; 474 seq_puts(filp, "\n MCS secy stats\n"); 475 476 mutex_lock(&mcs->stats_lock); 477 for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) { 478 mcs_get_rx_secy_stats(mcs, &stats, secy_id); 479 seq_printf(filp, "\n=======Secy%d======\n\n", secy_id); 480 seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id, 481 stats.ctl_pkt_bcast_cnt); 482 seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id, 483 stats.ctl_pkt_mcast_cnt); 484 seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id, 485 stats.ctl_pkt_ucast_cnt); 486 seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt); 487 seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id, 488 stats.unctl_pkt_bcast_cnt); 489 seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id, 490 stats.unctl_pkt_mcast_cnt); 491 seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id, 492 stats.unctl_pkt_ucast_cnt); 493 seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt); 494 seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id, 495 stats.octet_decrypted_cnt); 496 seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id, 497 stats.octet_validated_cnt); 498 seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id, 499 stats.pkt_port_disabled_cnt); 500 seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_badtag_cnt); 501 seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_nosa_cnt); 502 seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id, 503 stats.pkt_nosaerror_cnt); 504 seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id, 505 stats.pkt_tagged_ctl_cnt); 506 seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt); 507 seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt); 508 if (mcs->hw->mcs_blks > 1) 509 seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id, 510 stats.pkt_notag_cnt); 511 } 512 mutex_unlock(&mcs->stats_lock); 513 return 0; 514 } 515 516 RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL); 517 518 static void rvu_dbg_mcs_init(struct rvu *rvu) 519 { 520 struct mcs *mcs; 521 char dname[10]; 522 int i; 523 524 if (!rvu->mcs_blk_cnt) 525 return; 526 527 rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root); 528 529 for (i = 0; i < rvu->mcs_blk_cnt; i++) { 530 mcs = mcs_get_pdata(i); 531 532 sprintf(dname, "mcs%d", i); 533 rvu->rvu_dbg.mcs = debugfs_create_dir(dname, 534 rvu->rvu_dbg.mcs_root); 535 536 rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs); 537 538 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs, 539 &rvu_dbg_mcs_rx_flowid_stats_fops); 540 541 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs, 542 &rvu_dbg_mcs_rx_secy_stats_fops); 543 544 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs, 545 &rvu_dbg_mcs_rx_sc_stats_fops); 546 547 debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs, 548 &rvu_dbg_mcs_rx_sa_stats_fops); 549 550 debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs, 551 &rvu_dbg_mcs_rx_port_stats_fops); 552 553 rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs); 554 555 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs, 556 &rvu_dbg_mcs_tx_flowid_stats_fops); 557 558 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs, 559 &rvu_dbg_mcs_tx_secy_stats_fops); 560 561 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs, 562 &rvu_dbg_mcs_tx_sc_stats_fops); 563 564 debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs, 565 &rvu_dbg_mcs_tx_sa_stats_fops); 566 567 debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs, 568 &rvu_dbg_mcs_tx_port_stats_fops); 569 } 570 } 571 572 #define LMT_MAPTBL_ENTRY_SIZE 16 573 /* Dump LMTST map table */ 574 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, 575 char __user *buffer, 576 size_t count, loff_t *ppos) 577 { 578 struct rvu *rvu = filp->private_data; 579 u64 lmt_addr, val, tbl_base; 580 int pf, vf, num_vfs, hw_vfs; 581 void __iomem *lmt_map_base; 582 int buf_size = 10240; 583 size_t off = 0; 584 int index = 0; 585 char *buf; 586 int ret; 587 588 /* don't allow partial reads */ 589 if (*ppos != 0) 590 return 0; 591 592 buf = kzalloc(buf_size, GFP_KERNEL); 593 if (!buf) 594 return -ENOMEM; 595 596 tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); 597 598 lmt_map_base = ioremap_wc(tbl_base, 128 * 1024); 599 if (!lmt_map_base) { 600 dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); 601 kfree(buf); 602 return false; 603 } 604 605 off += scnprintf(&buf[off], buf_size - 1 - off, 606 "\n\t\t\t\t\tLmtst Map Table Entries"); 607 off += scnprintf(&buf[off], buf_size - 1 - off, 608 "\n\t\t\t\t\t======================="); 609 off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t"); 610 off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t"); 611 off += scnprintf(&buf[off], buf_size - 1 - off, 612 "Lmtline Base (word 0)\t\t"); 613 off += scnprintf(&buf[off], buf_size - 1 - off, 614 "Lmt Map Entry (word 1)"); 615 off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); 616 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 617 off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t", 618 pf); 619 620 index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE; 621 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t", 622 (tbl_base + index)); 623 lmt_addr = readq(lmt_map_base + index); 624 off += scnprintf(&buf[off], buf_size - 1 - off, 625 " 0x%016llx\t\t", lmt_addr); 626 index += 8; 627 val = readq(lmt_map_base + index); 628 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n", 629 val); 630 /* Reading num of VFs per PF */ 631 rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs); 632 for (vf = 0; vf < num_vfs; vf++) { 633 index = (pf * rvu->hw->total_vfs * 16) + 634 ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE); 635 off += scnprintf(&buf[off], buf_size - 1 - off, 636 "PF%d:VF%d \t\t", pf, vf); 637 off += scnprintf(&buf[off], buf_size - 1 - off, 638 " 0x%llx\t\t", (tbl_base + index)); 639 lmt_addr = readq(lmt_map_base + index); 640 off += scnprintf(&buf[off], buf_size - 1 - off, 641 " 0x%016llx\t\t", lmt_addr); 642 index += 8; 643 val = readq(lmt_map_base + index); 644 off += scnprintf(&buf[off], buf_size - 1 - off, 645 " 0x%016llx\n", val); 646 } 647 } 648 off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); 649 650 ret = min(off, count); 651 if (copy_to_user(buffer, buf, ret)) 652 ret = -EFAULT; 653 kfree(buf); 654 655 iounmap(lmt_map_base); 656 if (ret < 0) 657 return ret; 658 659 *ppos = ret; 660 return ret; 661 } 662 663 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL); 664 665 static void get_lf_str_list(struct rvu_block block, int pcifunc, 666 char *lfs) 667 { 668 int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max; 669 670 for_each_set_bit(lf, block.lf.bmap, block.lf.max) { 671 if (lf >= block.lf.max) 672 break; 673 674 if (block.fn_map[lf] != pcifunc) 675 continue; 676 677 if (lf == prev_lf + 1) { 678 prev_lf = lf; 679 seq = 1; 680 continue; 681 } 682 683 if (seq) 684 len += sprintf(lfs + len, "-%d,%d", prev_lf, lf); 685 else 686 len += (len ? sprintf(lfs + len, ",%d", lf) : 687 sprintf(lfs + len, "%d", lf)); 688 689 prev_lf = lf; 690 seq = 0; 691 } 692 693 if (seq) 694 len += sprintf(lfs + len, "-%d", prev_lf); 695 696 lfs[len] = '\0'; 697 } 698 699 static int get_max_column_width(struct rvu *rvu) 700 { 701 int index, pf, vf, lf_str_size = 12, buf_size = 256; 702 struct rvu_block block; 703 u16 pcifunc; 704 char *buf; 705 706 buf = kzalloc(buf_size, GFP_KERNEL); 707 if (!buf) 708 return -ENOMEM; 709 710 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 711 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { 712 pcifunc = pf << 10 | vf; 713 if (!pcifunc) 714 continue; 715 716 for (index = 0; index < BLK_COUNT; index++) { 717 block = rvu->hw->block[index]; 718 if (!strlen(block.name)) 719 continue; 720 721 get_lf_str_list(block, pcifunc, buf); 722 if (lf_str_size <= strlen(buf)) 723 lf_str_size = strlen(buf) + 1; 724 } 725 } 726 } 727 728 kfree(buf); 729 return lf_str_size; 730 } 731 732 /* Dumps current provisioning status of all RVU block LFs */ 733 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, 734 char __user *buffer, 735 size_t count, loff_t *ppos) 736 { 737 int index, off = 0, flag = 0, len = 0, i = 0; 738 struct rvu *rvu = filp->private_data; 739 int bytes_not_copied = 0; 740 struct rvu_block block; 741 int pf, vf, pcifunc; 742 int buf_size = 2048; 743 int lf_str_size; 744 char *lfs; 745 char *buf; 746 747 /* don't allow partial reads */ 748 if (*ppos != 0) 749 return 0; 750 751 buf = kzalloc(buf_size, GFP_KERNEL); 752 if (!buf) 753 return -ENOMEM; 754 755 /* Get the maximum width of a column */ 756 lf_str_size = get_max_column_width(rvu); 757 758 lfs = kzalloc(lf_str_size, GFP_KERNEL); 759 if (!lfs) { 760 kfree(buf); 761 return -ENOMEM; 762 } 763 off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size, 764 "pcifunc"); 765 for (index = 0; index < BLK_COUNT; index++) 766 if (strlen(rvu->hw->block[index].name)) { 767 off += scnprintf(&buf[off], buf_size - 1 - off, 768 "%-*s", lf_str_size, 769 rvu->hw->block[index].name); 770 } 771 772 off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); 773 bytes_not_copied = copy_to_user(buffer + (i * off), buf, off); 774 if (bytes_not_copied) 775 goto out; 776 777 i++; 778 *ppos += off; 779 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 780 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { 781 off = 0; 782 flag = 0; 783 pcifunc = pf << 10 | vf; 784 if (!pcifunc) 785 continue; 786 787 if (vf) { 788 sprintf(lfs, "PF%d:VF%d", pf, vf - 1); 789 off = scnprintf(&buf[off], 790 buf_size - 1 - off, 791 "%-*s", lf_str_size, lfs); 792 } else { 793 sprintf(lfs, "PF%d", pf); 794 off = scnprintf(&buf[off], 795 buf_size - 1 - off, 796 "%-*s", lf_str_size, lfs); 797 } 798 799 for (index = 0; index < BLK_COUNT; index++) { 800 block = rvu->hw->block[index]; 801 if (!strlen(block.name)) 802 continue; 803 len = 0; 804 lfs[len] = '\0'; 805 get_lf_str_list(block, pcifunc, lfs); 806 if (strlen(lfs)) 807 flag = 1; 808 809 off += scnprintf(&buf[off], buf_size - 1 - off, 810 "%-*s", lf_str_size, lfs); 811 } 812 if (flag) { 813 off += scnprintf(&buf[off], 814 buf_size - 1 - off, "\n"); 815 bytes_not_copied = copy_to_user(buffer + 816 (i * off), 817 buf, off); 818 if (bytes_not_copied) 819 goto out; 820 821 i++; 822 *ppos += off; 823 } 824 } 825 } 826 827 out: 828 kfree(lfs); 829 kfree(buf); 830 if (bytes_not_copied) 831 return -EFAULT; 832 833 return *ppos; 834 } 835 836 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL); 837 838 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) 839 { 840 struct rvu *rvu = filp->private; 841 struct pci_dev *pdev = NULL; 842 struct mac_ops *mac_ops; 843 char cgx[10], lmac[10]; 844 struct rvu_pfvf *pfvf; 845 int pf, domain, blkid; 846 u8 cgx_id, lmac_id; 847 u16 pcifunc; 848 849 domain = 2; 850 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); 851 /* There can be no CGX devices at all */ 852 if (!mac_ops) 853 return 0; 854 seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n", 855 mac_ops->name); 856 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 857 if (!is_pf_cgxmapped(rvu, pf)) 858 continue; 859 860 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0); 861 if (!pdev) 862 continue; 863 864 cgx[0] = 0; 865 lmac[0] = 0; 866 pcifunc = pf << 10; 867 pfvf = rvu_get_pfvf(rvu, pcifunc); 868 869 if (pfvf->nix_blkaddr == BLKADDR_NIX0) 870 blkid = 0; 871 else 872 blkid = 1; 873 874 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, 875 &lmac_id); 876 sprintf(cgx, "%s%d", mac_ops->name, cgx_id); 877 sprintf(lmac, "LMAC%d", lmac_id); 878 seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n", 879 dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac); 880 881 pci_dev_put(pdev); 882 } 883 return 0; 884 } 885 886 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL); 887 888 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf, 889 u16 *pcifunc) 890 { 891 struct rvu_block *block; 892 struct rvu_hwinfo *hw; 893 894 hw = rvu->hw; 895 block = &hw->block[blkaddr]; 896 897 if (lf < 0 || lf >= block->lf.max) { 898 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n", 899 block->lf.max - 1); 900 return false; 901 } 902 903 *pcifunc = block->fn_map[lf]; 904 if (!*pcifunc) { 905 dev_warn(rvu->dev, 906 "This LF is not attached to any RVU PFFUNC\n"); 907 return false; 908 } 909 return true; 910 } 911 912 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf) 913 { 914 char *buf; 915 916 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 917 if (!buf) 918 return; 919 920 if (!pfvf->aura_ctx) { 921 seq_puts(m, "Aura context is not initialized\n"); 922 } else { 923 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap, 924 pfvf->aura_ctx->qsize); 925 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize); 926 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf); 927 } 928 929 if (!pfvf->pool_ctx) { 930 seq_puts(m, "Pool context is not initialized\n"); 931 } else { 932 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap, 933 pfvf->pool_ctx->qsize); 934 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize); 935 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf); 936 } 937 kfree(buf); 938 } 939 940 /* The 'qsize' entry dumps current Aura/Pool context Qsize 941 * and each context's current enable/disable status in a bitmap. 942 */ 943 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused, 944 int blktype) 945 { 946 void (*print_qsize)(struct seq_file *filp, 947 struct rvu_pfvf *pfvf) = NULL; 948 struct dentry *current_dir; 949 struct rvu_pfvf *pfvf; 950 struct rvu *rvu; 951 int qsize_id; 952 u16 pcifunc; 953 int blkaddr; 954 955 rvu = filp->private; 956 switch (blktype) { 957 case BLKTYPE_NPA: 958 qsize_id = rvu->rvu_dbg.npa_qsize_id; 959 print_qsize = print_npa_qsize; 960 break; 961 962 case BLKTYPE_NIX: 963 qsize_id = rvu->rvu_dbg.nix_qsize_id; 964 print_qsize = print_nix_qsize; 965 break; 966 967 default: 968 return -EINVAL; 969 } 970 971 if (blktype == BLKTYPE_NPA) { 972 blkaddr = BLKADDR_NPA; 973 } else { 974 current_dir = filp->file->f_path.dentry->d_parent; 975 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ? 976 BLKADDR_NIX1 : BLKADDR_NIX0); 977 } 978 979 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc)) 980 return -EINVAL; 981 982 pfvf = rvu_get_pfvf(rvu, pcifunc); 983 print_qsize(filp, pfvf); 984 985 return 0; 986 } 987 988 static ssize_t rvu_dbg_qsize_write(struct file *filp, 989 const char __user *buffer, size_t count, 990 loff_t *ppos, int blktype) 991 { 992 char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix"; 993 struct seq_file *seqfile = filp->private_data; 994 char *cmd_buf, *cmd_buf_tmp, *subtoken; 995 struct rvu *rvu = seqfile->private; 996 struct dentry *current_dir; 997 int blkaddr; 998 u16 pcifunc; 999 int ret, lf; 1000 1001 cmd_buf = memdup_user(buffer, count + 1); 1002 if (IS_ERR(cmd_buf)) 1003 return -ENOMEM; 1004 1005 cmd_buf[count] = '\0'; 1006 1007 cmd_buf_tmp = strchr(cmd_buf, '\n'); 1008 if (cmd_buf_tmp) { 1009 *cmd_buf_tmp = '\0'; 1010 count = cmd_buf_tmp - cmd_buf + 1; 1011 } 1012 1013 cmd_buf_tmp = cmd_buf; 1014 subtoken = strsep(&cmd_buf, " "); 1015 ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL; 1016 if (cmd_buf) 1017 ret = -EINVAL; 1018 1019 if (ret < 0 || !strncmp(subtoken, "help", 4)) { 1020 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string); 1021 goto qsize_write_done; 1022 } 1023 1024 if (blktype == BLKTYPE_NPA) { 1025 blkaddr = BLKADDR_NPA; 1026 } else { 1027 current_dir = filp->f_path.dentry->d_parent; 1028 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ? 1029 BLKADDR_NIX1 : BLKADDR_NIX0); 1030 } 1031 1032 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) { 1033 ret = -EINVAL; 1034 goto qsize_write_done; 1035 } 1036 if (blktype == BLKTYPE_NPA) 1037 rvu->rvu_dbg.npa_qsize_id = lf; 1038 else 1039 rvu->rvu_dbg.nix_qsize_id = lf; 1040 1041 qsize_write_done: 1042 kfree(cmd_buf_tmp); 1043 return ret ? ret : count; 1044 } 1045 1046 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp, 1047 const char __user *buffer, 1048 size_t count, loff_t *ppos) 1049 { 1050 return rvu_dbg_qsize_write(filp, buffer, count, ppos, 1051 BLKTYPE_NPA); 1052 } 1053 1054 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused) 1055 { 1056 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA); 1057 } 1058 1059 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write); 1060 1061 /* Dumps given NPA Aura's context */ 1062 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) 1063 { 1064 struct npa_aura_s *aura = &rsp->aura; 1065 struct rvu *rvu = m->private; 1066 1067 seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr); 1068 1069 seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n", 1070 aura->ena, aura->pool_caching); 1071 seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n", 1072 aura->pool_way_mask, aura->avg_con); 1073 seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n", 1074 aura->pool_drop_ena, aura->aura_drop_ena); 1075 seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n", 1076 aura->bp_ena, aura->aura_drop); 1077 seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n", 1078 aura->shift, aura->avg_level); 1079 1080 seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n", 1081 (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid); 1082 1083 seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n", 1084 (u64)aura->limit, aura->bp, aura->fc_ena); 1085 1086 if (!is_rvu_otx2(rvu)) 1087 seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be); 1088 seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n", 1089 aura->fc_up_crossing, aura->fc_stype); 1090 seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits); 1091 1092 seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr); 1093 1094 seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n", 1095 aura->pool_drop, aura->update_time); 1096 seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n", 1097 aura->err_int, aura->err_int_ena); 1098 seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n", 1099 aura->thresh_int, aura->thresh_int_ena); 1100 seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n", 1101 aura->thresh_up, aura->thresh_qint_idx); 1102 seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx); 1103 1104 seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh); 1105 if (!is_rvu_otx2(rvu)) 1106 seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst); 1107 } 1108 1109 /* Dumps given NPA Pool's context */ 1110 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) 1111 { 1112 struct npa_pool_s *pool = &rsp->pool; 1113 struct rvu *rvu = m->private; 1114 1115 seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base); 1116 1117 seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n", 1118 pool->ena, pool->nat_align); 1119 seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n", 1120 pool->stack_caching, pool->stack_way_mask); 1121 seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n", 1122 pool->buf_offset, pool->buf_size); 1123 1124 seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n", 1125 pool->stack_max_pages, pool->stack_pages); 1126 1127 seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc); 1128 1129 seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n", 1130 pool->stack_offset, pool->shift, pool->avg_level); 1131 seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n", 1132 pool->avg_con, pool->fc_ena, pool->fc_stype); 1133 seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n", 1134 pool->fc_hyst_bits, pool->fc_up_crossing); 1135 if (!is_rvu_otx2(rvu)) 1136 seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be); 1137 seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time); 1138 1139 seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr); 1140 1141 seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start); 1142 1143 seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end); 1144 1145 seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n", 1146 pool->err_int, pool->err_int_ena); 1147 seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int); 1148 seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n", 1149 pool->thresh_int_ena, pool->thresh_up); 1150 seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n", 1151 pool->thresh_qint_idx, pool->err_qint_idx); 1152 if (!is_rvu_otx2(rvu)) 1153 seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst); 1154 } 1155 1156 /* Reads aura/pool's ctx from admin queue */ 1157 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype) 1158 { 1159 void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp); 1160 struct npa_aq_enq_req aq_req; 1161 struct npa_aq_enq_rsp rsp; 1162 struct rvu_pfvf *pfvf; 1163 int aura, rc, max_id; 1164 int npalf, id, all; 1165 struct rvu *rvu; 1166 u16 pcifunc; 1167 1168 rvu = m->private; 1169 1170 switch (ctype) { 1171 case NPA_AQ_CTYPE_AURA: 1172 npalf = rvu->rvu_dbg.npa_aura_ctx.lf; 1173 id = rvu->rvu_dbg.npa_aura_ctx.id; 1174 all = rvu->rvu_dbg.npa_aura_ctx.all; 1175 break; 1176 1177 case NPA_AQ_CTYPE_POOL: 1178 npalf = rvu->rvu_dbg.npa_pool_ctx.lf; 1179 id = rvu->rvu_dbg.npa_pool_ctx.id; 1180 all = rvu->rvu_dbg.npa_pool_ctx.all; 1181 break; 1182 default: 1183 return -EINVAL; 1184 } 1185 1186 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc)) 1187 return -EINVAL; 1188 1189 pfvf = rvu_get_pfvf(rvu, pcifunc); 1190 if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) { 1191 seq_puts(m, "Aura context is not initialized\n"); 1192 return -EINVAL; 1193 } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) { 1194 seq_puts(m, "Pool context is not initialized\n"); 1195 return -EINVAL; 1196 } 1197 1198 memset(&aq_req, 0, sizeof(struct npa_aq_enq_req)); 1199 aq_req.hdr.pcifunc = pcifunc; 1200 aq_req.ctype = ctype; 1201 aq_req.op = NPA_AQ_INSTOP_READ; 1202 if (ctype == NPA_AQ_CTYPE_AURA) { 1203 max_id = pfvf->aura_ctx->qsize; 1204 print_npa_ctx = print_npa_aura_ctx; 1205 } else { 1206 max_id = pfvf->pool_ctx->qsize; 1207 print_npa_ctx = print_npa_pool_ctx; 1208 } 1209 1210 if (id < 0 || id >= max_id) { 1211 seq_printf(m, "Invalid %s, valid range is 0-%d\n", 1212 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool", 1213 max_id - 1); 1214 return -EINVAL; 1215 } 1216 1217 if (all) 1218 id = 0; 1219 else 1220 max_id = id + 1; 1221 1222 for (aura = id; aura < max_id; aura++) { 1223 aq_req.aura_id = aura; 1224 seq_printf(m, "======%s : %d=======\n", 1225 (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL", 1226 aq_req.aura_id); 1227 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp); 1228 if (rc) { 1229 seq_puts(m, "Failed to read context\n"); 1230 return -EINVAL; 1231 } 1232 print_npa_ctx(m, &rsp); 1233 } 1234 return 0; 1235 } 1236 1237 static int write_npa_ctx(struct rvu *rvu, bool all, 1238 int npalf, int id, int ctype) 1239 { 1240 struct rvu_pfvf *pfvf; 1241 int max_id = 0; 1242 u16 pcifunc; 1243 1244 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc)) 1245 return -EINVAL; 1246 1247 pfvf = rvu_get_pfvf(rvu, pcifunc); 1248 1249 if (ctype == NPA_AQ_CTYPE_AURA) { 1250 if (!pfvf->aura_ctx) { 1251 dev_warn(rvu->dev, "Aura context is not initialized\n"); 1252 return -EINVAL; 1253 } 1254 max_id = pfvf->aura_ctx->qsize; 1255 } else if (ctype == NPA_AQ_CTYPE_POOL) { 1256 if (!pfvf->pool_ctx) { 1257 dev_warn(rvu->dev, "Pool context is not initialized\n"); 1258 return -EINVAL; 1259 } 1260 max_id = pfvf->pool_ctx->qsize; 1261 } 1262 1263 if (id < 0 || id >= max_id) { 1264 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n", 1265 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool", 1266 max_id - 1); 1267 return -EINVAL; 1268 } 1269 1270 switch (ctype) { 1271 case NPA_AQ_CTYPE_AURA: 1272 rvu->rvu_dbg.npa_aura_ctx.lf = npalf; 1273 rvu->rvu_dbg.npa_aura_ctx.id = id; 1274 rvu->rvu_dbg.npa_aura_ctx.all = all; 1275 break; 1276 1277 case NPA_AQ_CTYPE_POOL: 1278 rvu->rvu_dbg.npa_pool_ctx.lf = npalf; 1279 rvu->rvu_dbg.npa_pool_ctx.id = id; 1280 rvu->rvu_dbg.npa_pool_ctx.all = all; 1281 break; 1282 default: 1283 return -EINVAL; 1284 } 1285 return 0; 1286 } 1287 1288 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count, 1289 const char __user *buffer, int *npalf, 1290 int *id, bool *all) 1291 { 1292 int bytes_not_copied; 1293 char *cmd_buf_tmp; 1294 char *subtoken; 1295 int ret; 1296 1297 bytes_not_copied = copy_from_user(cmd_buf, buffer, *count); 1298 if (bytes_not_copied) 1299 return -EFAULT; 1300 1301 cmd_buf[*count] = '\0'; 1302 cmd_buf_tmp = strchr(cmd_buf, '\n'); 1303 1304 if (cmd_buf_tmp) { 1305 *cmd_buf_tmp = '\0'; 1306 *count = cmd_buf_tmp - cmd_buf + 1; 1307 } 1308 1309 subtoken = strsep(&cmd_buf, " "); 1310 ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL; 1311 if (ret < 0) 1312 return ret; 1313 subtoken = strsep(&cmd_buf, " "); 1314 if (subtoken && strcmp(subtoken, "all") == 0) { 1315 *all = true; 1316 } else { 1317 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL; 1318 if (ret < 0) 1319 return ret; 1320 } 1321 if (cmd_buf) 1322 return -EINVAL; 1323 return ret; 1324 } 1325 1326 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp, 1327 const char __user *buffer, 1328 size_t count, loff_t *ppos, int ctype) 1329 { 1330 char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ? 1331 "aura" : "pool"; 1332 struct seq_file *seqfp = filp->private_data; 1333 struct rvu *rvu = seqfp->private; 1334 int npalf, id = 0, ret; 1335 bool all = false; 1336 1337 if ((*ppos != 0) || !count) 1338 return -EINVAL; 1339 1340 cmd_buf = kzalloc(count + 1, GFP_KERNEL); 1341 if (!cmd_buf) 1342 return count; 1343 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer, 1344 &npalf, &id, &all); 1345 if (ret < 0) { 1346 dev_info(rvu->dev, 1347 "Usage: echo <npalf> [%s number/all] > %s_ctx\n", 1348 ctype_string, ctype_string); 1349 goto done; 1350 } else { 1351 ret = write_npa_ctx(rvu, all, npalf, id, ctype); 1352 } 1353 done: 1354 kfree(cmd_buf); 1355 return ret ? ret : count; 1356 } 1357 1358 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp, 1359 const char __user *buffer, 1360 size_t count, loff_t *ppos) 1361 { 1362 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos, 1363 NPA_AQ_CTYPE_AURA); 1364 } 1365 1366 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused) 1367 { 1368 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA); 1369 } 1370 1371 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write); 1372 1373 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp, 1374 const char __user *buffer, 1375 size_t count, loff_t *ppos) 1376 { 1377 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos, 1378 NPA_AQ_CTYPE_POOL); 1379 } 1380 1381 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused) 1382 { 1383 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL); 1384 } 1385 1386 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write); 1387 1388 static void ndc_cache_stats(struct seq_file *s, int blk_addr, 1389 int ctype, int transaction) 1390 { 1391 u64 req, out_req, lat, cant_alloc; 1392 struct nix_hw *nix_hw; 1393 struct rvu *rvu; 1394 int port; 1395 1396 if (blk_addr == BLKADDR_NDC_NPA0) { 1397 rvu = s->private; 1398 } else { 1399 nix_hw = s->private; 1400 rvu = nix_hw->rvu; 1401 } 1402 1403 for (port = 0; port < NDC_MAX_PORT; port++) { 1404 req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC 1405 (port, ctype, transaction)); 1406 lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC 1407 (port, ctype, transaction)); 1408 out_req = rvu_read64(rvu, blk_addr, 1409 NDC_AF_PORTX_RTX_RWX_OSTDN_PC 1410 (port, ctype, transaction)); 1411 cant_alloc = rvu_read64(rvu, blk_addr, 1412 NDC_AF_PORTX_RTX_CANT_ALLOC_PC 1413 (port, transaction)); 1414 seq_printf(s, "\nPort:%d\n", port); 1415 seq_printf(s, "\tTotal Requests:\t\t%lld\n", req); 1416 seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat); 1417 seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req); 1418 seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req); 1419 seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc); 1420 } 1421 } 1422 1423 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr) 1424 { 1425 seq_puts(s, "\n***** CACHE mode read stats *****\n"); 1426 ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS); 1427 seq_puts(s, "\n***** CACHE mode write stats *****\n"); 1428 ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS); 1429 seq_puts(s, "\n***** BY-PASS mode read stats *****\n"); 1430 ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS); 1431 seq_puts(s, "\n***** BY-PASS mode write stats *****\n"); 1432 ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS); 1433 return 0; 1434 } 1435 1436 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused) 1437 { 1438 return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0); 1439 } 1440 1441 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL); 1442 1443 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr) 1444 { 1445 struct nix_hw *nix_hw; 1446 struct rvu *rvu; 1447 int bank, max_bank; 1448 u64 ndc_af_const; 1449 1450 if (blk_addr == BLKADDR_NDC_NPA0) { 1451 rvu = s->private; 1452 } else { 1453 nix_hw = s->private; 1454 rvu = nix_hw->rvu; 1455 } 1456 1457 ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST); 1458 max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const); 1459 for (bank = 0; bank < max_bank; bank++) { 1460 seq_printf(s, "BANK:%d\n", bank); 1461 seq_printf(s, "\tHits:\t%lld\n", 1462 (u64)rvu_read64(rvu, blk_addr, 1463 NDC_AF_BANKX_HIT_PC(bank))); 1464 seq_printf(s, "\tMiss:\t%lld\n", 1465 (u64)rvu_read64(rvu, blk_addr, 1466 NDC_AF_BANKX_MISS_PC(bank))); 1467 } 1468 return 0; 1469 } 1470 1471 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused) 1472 { 1473 struct nix_hw *nix_hw = filp->private; 1474 int blkaddr = 0; 1475 int ndc_idx = 0; 1476 1477 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ? 1478 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX); 1479 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX); 1480 1481 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr); 1482 } 1483 1484 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL); 1485 1486 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused) 1487 { 1488 struct nix_hw *nix_hw = filp->private; 1489 int blkaddr = 0; 1490 int ndc_idx = 0; 1491 1492 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ? 1493 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX); 1494 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX); 1495 1496 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr); 1497 } 1498 1499 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL); 1500 1501 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp, 1502 void *unused) 1503 { 1504 return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0); 1505 } 1506 1507 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL); 1508 1509 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp, 1510 void *unused) 1511 { 1512 struct nix_hw *nix_hw = filp->private; 1513 int ndc_idx = NPA0_U; 1514 int blkaddr = 0; 1515 1516 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ? 1517 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX); 1518 1519 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr); 1520 } 1521 1522 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL); 1523 1524 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp, 1525 void *unused) 1526 { 1527 struct nix_hw *nix_hw = filp->private; 1528 int ndc_idx = NPA0_U; 1529 int blkaddr = 0; 1530 1531 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ? 1532 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX); 1533 1534 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr); 1535 } 1536 1537 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL); 1538 1539 static void print_nix_cn10k_sq_ctx(struct seq_file *m, 1540 struct nix_cn10k_sq_ctx_s *sq_ctx) 1541 { 1542 seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n", 1543 sq_ctx->ena, sq_ctx->qint_idx); 1544 seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n", 1545 sq_ctx->substream, sq_ctx->sdp_mcast); 1546 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n", 1547 sq_ctx->cq, sq_ctx->sqe_way_mask); 1548 1549 seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n", 1550 sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff); 1551 seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n", 1552 sq_ctx->sso_ena, sq_ctx->smq_rr_weight); 1553 seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n", 1554 sq_ctx->default_chan, sq_ctx->sqb_count); 1555 1556 seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb); 1557 seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub); 1558 seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n", 1559 sq_ctx->sqb_aura, sq_ctx->sq_int); 1560 seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n", 1561 sq_ctx->sq_int_ena, sq_ctx->sqe_stype); 1562 1563 seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n", 1564 sq_ctx->max_sqe_size, sq_ctx->cq_limit); 1565 seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n", 1566 sq_ctx->mnq_dis, sq_ctx->lmt_dis); 1567 seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n", 1568 sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum); 1569 seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n", 1570 sq_ctx->tail_offset, sq_ctx->smenq_offset); 1571 seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n", 1572 sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld); 1573 1574 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n", 1575 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend); 1576 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb); 1577 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb); 1578 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb); 1579 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n", 1580 sq_ctx->smenq_next_sqb); 1581 1582 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb); 1583 1584 seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total); 1585 seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n", 1586 sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb); 1587 seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n", 1588 sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena); 1589 seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n", 1590 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena); 1591 1592 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n", 1593 (u64)sq_ctx->scm_lso_rem); 1594 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs); 1595 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts); 1596 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n", 1597 (u64)sq_ctx->dropped_octs); 1598 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n", 1599 (u64)sq_ctx->dropped_pkts); 1600 } 1601 1602 /* Dumps given nix_sq's context */ 1603 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) 1604 { 1605 struct nix_sq_ctx_s *sq_ctx = &rsp->sq; 1606 struct nix_hw *nix_hw = m->private; 1607 struct rvu *rvu = nix_hw->rvu; 1608 1609 if (!is_rvu_otx2(rvu)) { 1610 print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx); 1611 return; 1612 } 1613 seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n", 1614 sq_ctx->sqe_way_mask, sq_ctx->cq); 1615 seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n", 1616 sq_ctx->sdp_mcast, sq_ctx->substream); 1617 seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n", 1618 sq_ctx->qint_idx, sq_ctx->ena); 1619 1620 seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n", 1621 sq_ctx->sqb_count, sq_ctx->default_chan); 1622 seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n", 1623 sq_ctx->smq_rr_quantum, sq_ctx->sso_ena); 1624 seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n", 1625 sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq); 1626 1627 seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n", 1628 sq_ctx->sqe_stype, sq_ctx->sq_int_ena); 1629 seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n", 1630 sq_ctx->sq_int, sq_ctx->sqb_aura); 1631 seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count); 1632 1633 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n", 1634 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend); 1635 seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n", 1636 sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset); 1637 seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n", 1638 sq_ctx->smenq_offset, sq_ctx->tail_offset); 1639 seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n", 1640 sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq); 1641 seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n", 1642 sq_ctx->mnq_dis, sq_ctx->lmt_dis); 1643 seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n", 1644 sq_ctx->cq_limit, sq_ctx->max_sqe_size); 1645 1646 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb); 1647 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb); 1648 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb); 1649 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n", 1650 sq_ctx->smenq_next_sqb); 1651 1652 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb); 1653 1654 seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n", 1655 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena); 1656 seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n", 1657 sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps); 1658 seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n", 1659 sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1); 1660 seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total); 1661 1662 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n", 1663 (u64)sq_ctx->scm_lso_rem); 1664 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs); 1665 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts); 1666 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n", 1667 (u64)sq_ctx->dropped_octs); 1668 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n", 1669 (u64)sq_ctx->dropped_pkts); 1670 } 1671 1672 static void print_nix_cn10k_rq_ctx(struct seq_file *m, 1673 struct nix_cn10k_rq_ctx_s *rq_ctx) 1674 { 1675 seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n", 1676 rq_ctx->ena, rq_ctx->sso_ena); 1677 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n", 1678 rq_ctx->ipsech_ena, rq_ctx->ena_wqwd); 1679 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n", 1680 rq_ctx->cq, rq_ctx->lenerr_dis); 1681 seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n", 1682 rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis); 1683 seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n", 1684 rq_ctx->len_il4_dis, rq_ctx->len_il3_dis); 1685 seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n", 1686 rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis); 1687 seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura); 1688 1689 seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n", 1690 rq_ctx->spb_aura, rq_ctx->lpb_aura); 1691 seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura); 1692 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n", 1693 rq_ctx->sso_grp, rq_ctx->sso_tt); 1694 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n", 1695 rq_ctx->pb_caching, rq_ctx->wqe_caching); 1696 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n", 1697 rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena); 1698 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n", 1699 rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing); 1700 seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n", 1701 rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena); 1702 1703 seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id); 1704 seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena); 1705 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1); 1706 seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n", 1707 rq_ctx->wqe_skip, rq_ctx->spb_ena); 1708 seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n", 1709 rq_ctx->lpb_sizem1, rq_ctx->first_skip); 1710 seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n", 1711 rq_ctx->later_skip, rq_ctx->xqe_imm_size); 1712 seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n", 1713 rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split); 1714 1715 seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n", 1716 rq_ctx->xqe_drop, rq_ctx->xqe_pass); 1717 seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n", 1718 rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass); 1719 seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n", 1720 rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass); 1721 seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n", 1722 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop); 1723 1724 seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n", 1725 rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop); 1726 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n", 1727 rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass); 1728 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n", 1729 rq_ctx->rq_int, rq_ctx->rq_int_ena); 1730 seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx); 1731 1732 seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n", 1733 rq_ctx->ltag, rq_ctx->good_utag); 1734 seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n", 1735 rq_ctx->bad_utag, rq_ctx->flow_tagw); 1736 seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n", 1737 rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena); 1738 seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n", 1739 rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp); 1740 seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip); 1741 1742 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs); 1743 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts); 1744 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs); 1745 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts); 1746 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts); 1747 } 1748 1749 /* Dumps given nix_rq's context */ 1750 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) 1751 { 1752 struct nix_rq_ctx_s *rq_ctx = &rsp->rq; 1753 struct nix_hw *nix_hw = m->private; 1754 struct rvu *rvu = nix_hw->rvu; 1755 1756 if (!is_rvu_otx2(rvu)) { 1757 print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx); 1758 return; 1759 } 1760 1761 seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n", 1762 rq_ctx->wqe_aura, rq_ctx->substream); 1763 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n", 1764 rq_ctx->cq, rq_ctx->ena_wqwd); 1765 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n", 1766 rq_ctx->ipsech_ena, rq_ctx->sso_ena); 1767 seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena); 1768 1769 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n", 1770 rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena); 1771 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n", 1772 rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching); 1773 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n", 1774 rq_ctx->pb_caching, rq_ctx->sso_tt); 1775 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n", 1776 rq_ctx->sso_grp, rq_ctx->lpb_aura); 1777 seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura); 1778 1779 seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n", 1780 rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy); 1781 seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n", 1782 rq_ctx->xqe_imm_size, rq_ctx->later_skip); 1783 seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n", 1784 rq_ctx->first_skip, rq_ctx->lpb_sizem1); 1785 seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n", 1786 rq_ctx->spb_ena, rq_ctx->wqe_skip); 1787 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1); 1788 1789 seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n", 1790 rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop); 1791 seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n", 1792 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop); 1793 seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n", 1794 rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop); 1795 seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n", 1796 rq_ctx->xqe_pass, rq_ctx->xqe_drop); 1797 1798 seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n", 1799 rq_ctx->qint_idx, rq_ctx->rq_int_ena); 1800 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n", 1801 rq_ctx->rq_int, rq_ctx->lpb_pool_pass); 1802 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n", 1803 rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass); 1804 seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop); 1805 1806 seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n", 1807 rq_ctx->flow_tagw, rq_ctx->bad_utag); 1808 seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n", 1809 rq_ctx->good_utag, rq_ctx->ltag); 1810 1811 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs); 1812 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts); 1813 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs); 1814 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts); 1815 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts); 1816 } 1817 1818 /* Dumps given nix_cq's context */ 1819 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) 1820 { 1821 struct nix_cq_ctx_s *cq_ctx = &rsp->cq; 1822 1823 seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base); 1824 1825 seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr); 1826 seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n", 1827 cq_ctx->avg_con, cq_ctx->cint_idx); 1828 seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n", 1829 cq_ctx->cq_err, cq_ctx->qint_idx); 1830 seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n", 1831 cq_ctx->bpid, cq_ctx->bp_ena); 1832 1833 seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n", 1834 cq_ctx->update_time, cq_ctx->avg_level); 1835 seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n", 1836 cq_ctx->head, cq_ctx->tail); 1837 1838 seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n", 1839 cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int); 1840 seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n", 1841 cq_ctx->qsize, cq_ctx->caching); 1842 seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n", 1843 cq_ctx->substream, cq_ctx->ena); 1844 seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n", 1845 cq_ctx->drop_ena, cq_ctx->drop); 1846 seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp); 1847 } 1848 1849 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp, 1850 void *unused, int ctype) 1851 { 1852 void (*print_nix_ctx)(struct seq_file *filp, 1853 struct nix_aq_enq_rsp *rsp) = NULL; 1854 struct nix_hw *nix_hw = filp->private; 1855 struct rvu *rvu = nix_hw->rvu; 1856 struct nix_aq_enq_req aq_req; 1857 struct nix_aq_enq_rsp rsp; 1858 char *ctype_string = NULL; 1859 int qidx, rc, max_id = 0; 1860 struct rvu_pfvf *pfvf; 1861 int nixlf, id, all; 1862 u16 pcifunc; 1863 1864 switch (ctype) { 1865 case NIX_AQ_CTYPE_CQ: 1866 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf; 1867 id = rvu->rvu_dbg.nix_cq_ctx.id; 1868 all = rvu->rvu_dbg.nix_cq_ctx.all; 1869 break; 1870 1871 case NIX_AQ_CTYPE_SQ: 1872 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf; 1873 id = rvu->rvu_dbg.nix_sq_ctx.id; 1874 all = rvu->rvu_dbg.nix_sq_ctx.all; 1875 break; 1876 1877 case NIX_AQ_CTYPE_RQ: 1878 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf; 1879 id = rvu->rvu_dbg.nix_rq_ctx.id; 1880 all = rvu->rvu_dbg.nix_rq_ctx.all; 1881 break; 1882 1883 default: 1884 return -EINVAL; 1885 } 1886 1887 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) 1888 return -EINVAL; 1889 1890 pfvf = rvu_get_pfvf(rvu, pcifunc); 1891 if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) { 1892 seq_puts(filp, "SQ context is not initialized\n"); 1893 return -EINVAL; 1894 } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) { 1895 seq_puts(filp, "RQ context is not initialized\n"); 1896 return -EINVAL; 1897 } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) { 1898 seq_puts(filp, "CQ context is not initialized\n"); 1899 return -EINVAL; 1900 } 1901 1902 if (ctype == NIX_AQ_CTYPE_SQ) { 1903 max_id = pfvf->sq_ctx->qsize; 1904 ctype_string = "sq"; 1905 print_nix_ctx = print_nix_sq_ctx; 1906 } else if (ctype == NIX_AQ_CTYPE_RQ) { 1907 max_id = pfvf->rq_ctx->qsize; 1908 ctype_string = "rq"; 1909 print_nix_ctx = print_nix_rq_ctx; 1910 } else if (ctype == NIX_AQ_CTYPE_CQ) { 1911 max_id = pfvf->cq_ctx->qsize; 1912 ctype_string = "cq"; 1913 print_nix_ctx = print_nix_cq_ctx; 1914 } 1915 1916 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); 1917 aq_req.hdr.pcifunc = pcifunc; 1918 aq_req.ctype = ctype; 1919 aq_req.op = NIX_AQ_INSTOP_READ; 1920 if (all) 1921 id = 0; 1922 else 1923 max_id = id + 1; 1924 for (qidx = id; qidx < max_id; qidx++) { 1925 aq_req.qidx = qidx; 1926 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n", 1927 ctype_string, nixlf, aq_req.qidx); 1928 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp); 1929 if (rc) { 1930 seq_puts(filp, "Failed to read the context\n"); 1931 return -EINVAL; 1932 } 1933 print_nix_ctx(filp, &rsp); 1934 } 1935 return 0; 1936 } 1937 1938 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf, 1939 int id, int ctype, char *ctype_string, 1940 struct seq_file *m) 1941 { 1942 struct nix_hw *nix_hw = m->private; 1943 struct rvu_pfvf *pfvf; 1944 int max_id = 0; 1945 u16 pcifunc; 1946 1947 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) 1948 return -EINVAL; 1949 1950 pfvf = rvu_get_pfvf(rvu, pcifunc); 1951 1952 if (ctype == NIX_AQ_CTYPE_SQ) { 1953 if (!pfvf->sq_ctx) { 1954 dev_warn(rvu->dev, "SQ context is not initialized\n"); 1955 return -EINVAL; 1956 } 1957 max_id = pfvf->sq_ctx->qsize; 1958 } else if (ctype == NIX_AQ_CTYPE_RQ) { 1959 if (!pfvf->rq_ctx) { 1960 dev_warn(rvu->dev, "RQ context is not initialized\n"); 1961 return -EINVAL; 1962 } 1963 max_id = pfvf->rq_ctx->qsize; 1964 } else if (ctype == NIX_AQ_CTYPE_CQ) { 1965 if (!pfvf->cq_ctx) { 1966 dev_warn(rvu->dev, "CQ context is not initialized\n"); 1967 return -EINVAL; 1968 } 1969 max_id = pfvf->cq_ctx->qsize; 1970 } 1971 1972 if (id < 0 || id >= max_id) { 1973 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n", 1974 ctype_string, max_id - 1); 1975 return -EINVAL; 1976 } 1977 switch (ctype) { 1978 case NIX_AQ_CTYPE_CQ: 1979 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf; 1980 rvu->rvu_dbg.nix_cq_ctx.id = id; 1981 rvu->rvu_dbg.nix_cq_ctx.all = all; 1982 break; 1983 1984 case NIX_AQ_CTYPE_SQ: 1985 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf; 1986 rvu->rvu_dbg.nix_sq_ctx.id = id; 1987 rvu->rvu_dbg.nix_sq_ctx.all = all; 1988 break; 1989 1990 case NIX_AQ_CTYPE_RQ: 1991 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf; 1992 rvu->rvu_dbg.nix_rq_ctx.id = id; 1993 rvu->rvu_dbg.nix_rq_ctx.all = all; 1994 break; 1995 default: 1996 return -EINVAL; 1997 } 1998 return 0; 1999 } 2000 2001 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp, 2002 const char __user *buffer, 2003 size_t count, loff_t *ppos, 2004 int ctype) 2005 { 2006 struct seq_file *m = filp->private_data; 2007 struct nix_hw *nix_hw = m->private; 2008 struct rvu *rvu = nix_hw->rvu; 2009 char *cmd_buf, *ctype_string; 2010 int nixlf, id = 0, ret; 2011 bool all = false; 2012 2013 if ((*ppos != 0) || !count) 2014 return -EINVAL; 2015 2016 switch (ctype) { 2017 case NIX_AQ_CTYPE_SQ: 2018 ctype_string = "sq"; 2019 break; 2020 case NIX_AQ_CTYPE_RQ: 2021 ctype_string = "rq"; 2022 break; 2023 case NIX_AQ_CTYPE_CQ: 2024 ctype_string = "cq"; 2025 break; 2026 default: 2027 return -EINVAL; 2028 } 2029 2030 cmd_buf = kzalloc(count + 1, GFP_KERNEL); 2031 2032 if (!cmd_buf) 2033 return count; 2034 2035 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer, 2036 &nixlf, &id, &all); 2037 if (ret < 0) { 2038 dev_info(rvu->dev, 2039 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n", 2040 ctype_string, ctype_string); 2041 goto done; 2042 } else { 2043 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype, 2044 ctype_string, m); 2045 } 2046 done: 2047 kfree(cmd_buf); 2048 return ret ? ret : count; 2049 } 2050 2051 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp, 2052 const char __user *buffer, 2053 size_t count, loff_t *ppos) 2054 { 2055 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos, 2056 NIX_AQ_CTYPE_SQ); 2057 } 2058 2059 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused) 2060 { 2061 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ); 2062 } 2063 2064 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write); 2065 2066 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp, 2067 const char __user *buffer, 2068 size_t count, loff_t *ppos) 2069 { 2070 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos, 2071 NIX_AQ_CTYPE_RQ); 2072 } 2073 2074 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused) 2075 { 2076 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ); 2077 } 2078 2079 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write); 2080 2081 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp, 2082 const char __user *buffer, 2083 size_t count, loff_t *ppos) 2084 { 2085 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos, 2086 NIX_AQ_CTYPE_CQ); 2087 } 2088 2089 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused) 2090 { 2091 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ); 2092 } 2093 2094 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write); 2095 2096 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize, 2097 unsigned long *bmap, char *qtype) 2098 { 2099 char *buf; 2100 2101 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 2102 if (!buf) 2103 return; 2104 2105 bitmap_print_to_pagebuf(false, buf, bmap, qsize); 2106 seq_printf(filp, "%s context count : %d\n", qtype, qsize); 2107 seq_printf(filp, "%s context ena/dis bitmap : %s\n", 2108 qtype, buf); 2109 kfree(buf); 2110 } 2111 2112 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf) 2113 { 2114 if (!pfvf->cq_ctx) 2115 seq_puts(filp, "cq context is not initialized\n"); 2116 else 2117 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap, 2118 "cq"); 2119 2120 if (!pfvf->rq_ctx) 2121 seq_puts(filp, "rq context is not initialized\n"); 2122 else 2123 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap, 2124 "rq"); 2125 2126 if (!pfvf->sq_ctx) 2127 seq_puts(filp, "sq context is not initialized\n"); 2128 else 2129 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap, 2130 "sq"); 2131 } 2132 2133 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp, 2134 const char __user *buffer, 2135 size_t count, loff_t *ppos) 2136 { 2137 return rvu_dbg_qsize_write(filp, buffer, count, ppos, 2138 BLKTYPE_NIX); 2139 } 2140 2141 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused) 2142 { 2143 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX); 2144 } 2145 2146 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write); 2147 2148 static void print_band_prof_ctx(struct seq_file *m, 2149 struct nix_bandprof_s *prof) 2150 { 2151 char *str; 2152 2153 switch (prof->pc_mode) { 2154 case NIX_RX_PC_MODE_VLAN: 2155 str = "VLAN"; 2156 break; 2157 case NIX_RX_PC_MODE_DSCP: 2158 str = "DSCP"; 2159 break; 2160 case NIX_RX_PC_MODE_GEN: 2161 str = "Generic"; 2162 break; 2163 case NIX_RX_PC_MODE_RSVD: 2164 str = "Reserved"; 2165 break; 2166 } 2167 seq_printf(m, "W0: pc_mode\t\t%s\n", str); 2168 str = (prof->icolor == 3) ? "Color blind" : 2169 (prof->icolor == 0) ? "Green" : 2170 (prof->icolor == 1) ? "Yellow" : "Red"; 2171 seq_printf(m, "W0: icolor\t\t%s\n", str); 2172 seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena); 2173 seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent); 2174 seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent); 2175 seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent); 2176 seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent); 2177 seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa); 2178 seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa); 2179 seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa); 2180 2181 seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa); 2182 str = (prof->lmode == 0) ? "byte" : "packet"; 2183 seq_printf(m, "W1: lmode\t\t%s\n", str); 2184 seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect); 2185 seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv); 2186 seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent); 2187 seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa); 2188 str = (prof->gc_action == 0) ? "PASS" : 2189 (prof->gc_action == 1) ? "DROP" : "RED"; 2190 seq_printf(m, "W1: gc_action\t\t%s\n", str); 2191 str = (prof->yc_action == 0) ? "PASS" : 2192 (prof->yc_action == 1) ? "DROP" : "RED"; 2193 seq_printf(m, "W1: yc_action\t\t%s\n", str); 2194 str = (prof->rc_action == 0) ? "PASS" : 2195 (prof->rc_action == 1) ? "DROP" : "RED"; 2196 seq_printf(m, "W1: rc_action\t\t%s\n", str); 2197 seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo); 2198 seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id); 2199 seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en); 2200 2201 seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts); 2202 seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum); 2203 seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum); 2204 seq_printf(m, "W4: green_pkt_pass\t%lld\n", 2205 (u64)prof->green_pkt_pass); 2206 seq_printf(m, "W5: yellow_pkt_pass\t%lld\n", 2207 (u64)prof->yellow_pkt_pass); 2208 seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass); 2209 seq_printf(m, "W7: green_octs_pass\t%lld\n", 2210 (u64)prof->green_octs_pass); 2211 seq_printf(m, "W8: yellow_octs_pass\t%lld\n", 2212 (u64)prof->yellow_octs_pass); 2213 seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass); 2214 seq_printf(m, "W10: green_pkt_drop\t%lld\n", 2215 (u64)prof->green_pkt_drop); 2216 seq_printf(m, "W11: yellow_pkt_drop\t%lld\n", 2217 (u64)prof->yellow_pkt_drop); 2218 seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop); 2219 seq_printf(m, "W13: green_octs_drop\t%lld\n", 2220 (u64)prof->green_octs_drop); 2221 seq_printf(m, "W14: yellow_octs_drop\t%lld\n", 2222 (u64)prof->yellow_octs_drop); 2223 seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop); 2224 seq_puts(m, "==============================\n"); 2225 } 2226 2227 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused) 2228 { 2229 struct nix_hw *nix_hw = m->private; 2230 struct nix_cn10k_aq_enq_req aq_req; 2231 struct nix_cn10k_aq_enq_rsp aq_rsp; 2232 struct rvu *rvu = nix_hw->rvu; 2233 struct nix_ipolicer *ipolicer; 2234 int layer, prof_idx, idx, rc; 2235 u16 pcifunc; 2236 char *str; 2237 2238 /* Ingress policers do not exist on all platforms */ 2239 if (!nix_hw->ipolicer) 2240 return 0; 2241 2242 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 2243 if (layer == BAND_PROF_INVAL_LAYER) 2244 continue; 2245 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" : 2246 (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top"; 2247 2248 seq_printf(m, "\n%s bandwidth profiles\n", str); 2249 seq_puts(m, "=======================\n"); 2250 2251 ipolicer = &nix_hw->ipolicer[layer]; 2252 2253 for (idx = 0; idx < ipolicer->band_prof.max; idx++) { 2254 if (is_rsrc_free(&ipolicer->band_prof, idx)) 2255 continue; 2256 2257 prof_idx = (idx & 0x3FFF) | (layer << 14); 2258 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 2259 0x00, NIX_AQ_CTYPE_BANDPROF, 2260 prof_idx); 2261 if (rc) { 2262 dev_err(rvu->dev, 2263 "%s: Failed to fetch context of %s profile %d, err %d\n", 2264 __func__, str, idx, rc); 2265 return 0; 2266 } 2267 seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx); 2268 pcifunc = ipolicer->pfvf_map[idx]; 2269 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) 2270 seq_printf(m, "Allocated to :: PF %d\n", 2271 rvu_get_pf(pcifunc)); 2272 else 2273 seq_printf(m, "Allocated to :: PF %d VF %d\n", 2274 rvu_get_pf(pcifunc), 2275 (pcifunc & RVU_PFVF_FUNC_MASK) - 1); 2276 print_band_prof_ctx(m, &aq_rsp.prof); 2277 } 2278 } 2279 return 0; 2280 } 2281 2282 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL); 2283 2284 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused) 2285 { 2286 struct nix_hw *nix_hw = m->private; 2287 struct nix_ipolicer *ipolicer; 2288 int layer; 2289 char *str; 2290 2291 /* Ingress policers do not exist on all platforms */ 2292 if (!nix_hw->ipolicer) 2293 return 0; 2294 2295 seq_puts(m, "\nBandwidth profile resource free count\n"); 2296 seq_puts(m, "=====================================\n"); 2297 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 2298 if (layer == BAND_PROF_INVAL_LAYER) 2299 continue; 2300 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" : 2301 (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top "; 2302 2303 ipolicer = &nix_hw->ipolicer[layer]; 2304 seq_printf(m, "%s :: Max: %4d Free: %4d\n", str, 2305 ipolicer->band_prof.max, 2306 rvu_rsrc_free_count(&ipolicer->band_prof)); 2307 } 2308 seq_puts(m, "=====================================\n"); 2309 2310 return 0; 2311 } 2312 2313 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL); 2314 2315 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr) 2316 { 2317 struct nix_hw *nix_hw; 2318 2319 if (!is_block_implemented(rvu->hw, blkaddr)) 2320 return; 2321 2322 if (blkaddr == BLKADDR_NIX0) { 2323 rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root); 2324 nix_hw = &rvu->hw->nix[0]; 2325 } else { 2326 rvu->rvu_dbg.nix = debugfs_create_dir("nix1", 2327 rvu->rvu_dbg.root); 2328 nix_hw = &rvu->hw->nix[1]; 2329 } 2330 2331 debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, 2332 &rvu_dbg_nix_sq_ctx_fops); 2333 debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, 2334 &rvu_dbg_nix_rq_ctx_fops); 2335 debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, 2336 &rvu_dbg_nix_cq_ctx_fops); 2337 debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw, 2338 &rvu_dbg_nix_ndc_tx_cache_fops); 2339 debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw, 2340 &rvu_dbg_nix_ndc_rx_cache_fops); 2341 debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw, 2342 &rvu_dbg_nix_ndc_tx_hits_miss_fops); 2343 debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw, 2344 &rvu_dbg_nix_ndc_rx_hits_miss_fops); 2345 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu, 2346 &rvu_dbg_nix_qsize_fops); 2347 debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, 2348 &rvu_dbg_nix_band_prof_ctx_fops); 2349 debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw, 2350 &rvu_dbg_nix_band_prof_rsrc_fops); 2351 } 2352 2353 static void rvu_dbg_npa_init(struct rvu *rvu) 2354 { 2355 rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root); 2356 2357 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu, 2358 &rvu_dbg_npa_qsize_fops); 2359 debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu, 2360 &rvu_dbg_npa_aura_ctx_fops); 2361 debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu, 2362 &rvu_dbg_npa_pool_ctx_fops); 2363 debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu, 2364 &rvu_dbg_npa_ndc_cache_fops); 2365 debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu, 2366 &rvu_dbg_npa_ndc_hits_miss_fops); 2367 } 2368 2369 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \ 2370 ({ \ 2371 u64 cnt; \ 2372 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \ 2373 NIX_STATS_RX, &(cnt)); \ 2374 if (!err) \ 2375 seq_printf(s, "%s: %llu\n", name, cnt); \ 2376 cnt; \ 2377 }) 2378 2379 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \ 2380 ({ \ 2381 u64 cnt; \ 2382 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \ 2383 NIX_STATS_TX, &(cnt)); \ 2384 if (!err) \ 2385 seq_printf(s, "%s: %llu\n", name, cnt); \ 2386 cnt; \ 2387 }) 2388 2389 static int cgx_print_stats(struct seq_file *s, int lmac_id) 2390 { 2391 struct cgx_link_user_info linfo; 2392 struct mac_ops *mac_ops; 2393 void *cgxd = s->private; 2394 u64 ucast, mcast, bcast; 2395 int stat = 0, err = 0; 2396 u64 tx_stat, rx_stat; 2397 struct rvu *rvu; 2398 2399 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM, 2400 PCI_DEVID_OCTEONTX2_RVU_AF, NULL)); 2401 if (!rvu) 2402 return -ENODEV; 2403 2404 mac_ops = get_mac_ops(cgxd); 2405 /* There can be no CGX devices at all */ 2406 if (!mac_ops) 2407 return 0; 2408 2409 /* Link status */ 2410 seq_puts(s, "\n=======Link Status======\n\n"); 2411 err = cgx_get_link_info(cgxd, lmac_id, &linfo); 2412 if (err) 2413 seq_puts(s, "Failed to read link status\n"); 2414 seq_printf(s, "\nLink is %s %d Mbps\n\n", 2415 linfo.link_up ? "UP" : "DOWN", linfo.speed); 2416 2417 /* Rx stats */ 2418 seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n", 2419 mac_ops->name); 2420 ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames"); 2421 if (err) 2422 return err; 2423 mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames"); 2424 if (err) 2425 return err; 2426 bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames"); 2427 if (err) 2428 return err; 2429 seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast); 2430 PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes"); 2431 if (err) 2432 return err; 2433 PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops"); 2434 if (err) 2435 return err; 2436 PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors"); 2437 if (err) 2438 return err; 2439 2440 /* Tx stats */ 2441 seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n", 2442 mac_ops->name); 2443 ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames"); 2444 if (err) 2445 return err; 2446 mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames"); 2447 if (err) 2448 return err; 2449 bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames"); 2450 if (err) 2451 return err; 2452 seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast); 2453 PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes"); 2454 if (err) 2455 return err; 2456 PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops"); 2457 if (err) 2458 return err; 2459 2460 /* Rx stats */ 2461 seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name); 2462 while (stat < mac_ops->rx_stats_cnt) { 2463 err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat); 2464 if (err) 2465 return err; 2466 if (is_rvu_otx2(rvu)) 2467 seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], 2468 rx_stat); 2469 else 2470 seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat], 2471 rx_stat); 2472 stat++; 2473 } 2474 2475 /* Tx stats */ 2476 stat = 0; 2477 seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name); 2478 while (stat < mac_ops->tx_stats_cnt) { 2479 err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat); 2480 if (err) 2481 return err; 2482 2483 if (is_rvu_otx2(rvu)) 2484 seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], 2485 tx_stat); 2486 else 2487 seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat], 2488 tx_stat); 2489 stat++; 2490 } 2491 2492 return err; 2493 } 2494 2495 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id) 2496 { 2497 struct dentry *current_dir; 2498 char *buf; 2499 2500 current_dir = filp->file->f_path.dentry->d_parent; 2501 buf = strrchr(current_dir->d_name.name, 'c'); 2502 if (!buf) 2503 return -EINVAL; 2504 2505 return kstrtoint(buf + 1, 10, lmac_id); 2506 } 2507 2508 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) 2509 { 2510 int lmac_id, err; 2511 2512 err = rvu_dbg_derive_lmacid(filp, &lmac_id); 2513 if (!err) 2514 return cgx_print_stats(filp, lmac_id); 2515 2516 return err; 2517 } 2518 2519 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL); 2520 2521 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id) 2522 { 2523 struct pci_dev *pdev = NULL; 2524 void *cgxd = s->private; 2525 char *bcast, *mcast; 2526 u16 index, domain; 2527 u8 dmac[ETH_ALEN]; 2528 struct rvu *rvu; 2529 u64 cfg, mac; 2530 int pf; 2531 2532 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM, 2533 PCI_DEVID_OCTEONTX2_RVU_AF, NULL)); 2534 if (!rvu) 2535 return -ENODEV; 2536 2537 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id); 2538 domain = 2; 2539 2540 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0); 2541 if (!pdev) 2542 return 0; 2543 2544 cfg = cgx_read_dmac_ctrl(cgxd, lmac_id); 2545 bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT"; 2546 mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT"; 2547 2548 seq_puts(s, 2549 "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n"); 2550 seq_printf(s, "%s PF%d %9s %9s", 2551 dev_name(&pdev->dev), pf, bcast, mcast); 2552 if (cfg & CGX_DMAC_CAM_ACCEPT) 2553 seq_printf(s, "%12s\n\n", "UNICAST"); 2554 else 2555 seq_printf(s, "%16s\n\n", "PROMISCUOUS"); 2556 2557 seq_puts(s, "\nDMAC-INDEX ADDRESS\n"); 2558 2559 for (index = 0 ; index < 32 ; index++) { 2560 cfg = cgx_read_dmac_entry(cgxd, index); 2561 /* Display enabled dmac entries associated with current lmac */ 2562 if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) && 2563 FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) { 2564 mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg); 2565 u64_to_ether_addr(mac, dmac); 2566 seq_printf(s, "%7d %pM\n", index, dmac); 2567 } 2568 } 2569 2570 pci_dev_put(pdev); 2571 return 0; 2572 } 2573 2574 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused) 2575 { 2576 int err, lmac_id; 2577 2578 err = rvu_dbg_derive_lmacid(filp, &lmac_id); 2579 if (!err) 2580 return cgx_print_dmac_flt(filp, lmac_id); 2581 2582 return err; 2583 } 2584 2585 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL); 2586 2587 static void rvu_dbg_cgx_init(struct rvu *rvu) 2588 { 2589 struct mac_ops *mac_ops; 2590 unsigned long lmac_bmap; 2591 int i, lmac_id; 2592 char dname[20]; 2593 void *cgx; 2594 2595 if (!cgx_get_cgxcnt_max()) 2596 return; 2597 2598 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); 2599 if (!mac_ops) 2600 return; 2601 2602 rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name, 2603 rvu->rvu_dbg.root); 2604 2605 for (i = 0; i < cgx_get_cgxcnt_max(); i++) { 2606 cgx = rvu_cgx_pdata(i, rvu); 2607 if (!cgx) 2608 continue; 2609 lmac_bmap = cgx_get_lmac_bmap(cgx); 2610 /* cgx debugfs dir */ 2611 sprintf(dname, "%s%d", mac_ops->name, i); 2612 rvu->rvu_dbg.cgx = debugfs_create_dir(dname, 2613 rvu->rvu_dbg.cgx_root); 2614 2615 for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) { 2616 /* lmac debugfs dir */ 2617 sprintf(dname, "lmac%d", lmac_id); 2618 rvu->rvu_dbg.lmac = 2619 debugfs_create_dir(dname, rvu->rvu_dbg.cgx); 2620 2621 debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac, 2622 cgx, &rvu_dbg_cgx_stat_fops); 2623 debugfs_create_file("mac_filter", 0600, 2624 rvu->rvu_dbg.lmac, cgx, 2625 &rvu_dbg_cgx_dmac_flt_fops); 2626 } 2627 } 2628 } 2629 2630 /* NPC debugfs APIs */ 2631 static void rvu_print_npc_mcam_info(struct seq_file *s, 2632 u16 pcifunc, int blkaddr) 2633 { 2634 struct rvu *rvu = s->private; 2635 int entry_acnt, entry_ecnt; 2636 int cntr_acnt, cntr_ecnt; 2637 2638 rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr, 2639 &entry_acnt, &entry_ecnt); 2640 rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr, 2641 &cntr_acnt, &cntr_ecnt); 2642 if (!entry_acnt && !cntr_acnt) 2643 return; 2644 2645 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) 2646 seq_printf(s, "\n\t\t Device \t\t: PF%d\n", 2647 rvu_get_pf(pcifunc)); 2648 else 2649 seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n", 2650 rvu_get_pf(pcifunc), 2651 (pcifunc & RVU_PFVF_FUNC_MASK) - 1); 2652 2653 if (entry_acnt) { 2654 seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt); 2655 seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt); 2656 } 2657 if (cntr_acnt) { 2658 seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt); 2659 seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt); 2660 } 2661 } 2662 2663 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued) 2664 { 2665 struct rvu *rvu = filp->private; 2666 int pf, vf, numvfs, blkaddr; 2667 struct npc_mcam *mcam; 2668 u16 pcifunc, counters; 2669 u64 cfg; 2670 2671 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2672 if (blkaddr < 0) 2673 return -ENODEV; 2674 2675 mcam = &rvu->hw->mcam; 2676 counters = rvu->hw->npc_counters; 2677 2678 seq_puts(filp, "\nNPC MCAM info:\n"); 2679 /* MCAM keywidth on receive and transmit sides */ 2680 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX)); 2681 cfg = (cfg >> 32) & 0x07; 2682 seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ? 2683 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ? 2684 "224bits" : "448bits")); 2685 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX)); 2686 cfg = (cfg >> 32) & 0x07; 2687 seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ? 2688 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ? 2689 "224bits" : "448bits")); 2690 2691 mutex_lock(&mcam->lock); 2692 /* MCAM entries */ 2693 seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries); 2694 seq_printf(filp, "\t\t Reserved \t: %d\n", 2695 mcam->total_entries - mcam->bmap_entries); 2696 seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt); 2697 2698 /* MCAM counters */ 2699 seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters); 2700 seq_printf(filp, "\t\t Reserved \t: %d\n", 2701 counters - mcam->counters.max); 2702 seq_printf(filp, "\t\t Available \t: %d\n", 2703 rvu_rsrc_free_count(&mcam->counters)); 2704 2705 if (mcam->bmap_entries == mcam->bmap_fcnt) { 2706 mutex_unlock(&mcam->lock); 2707 return 0; 2708 } 2709 2710 seq_puts(filp, "\n\t\t Current allocation\n"); 2711 seq_puts(filp, "\t\t====================\n"); 2712 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 2713 pcifunc = (pf << RVU_PFVF_PF_SHIFT); 2714 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr); 2715 2716 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 2717 numvfs = (cfg >> 12) & 0xFF; 2718 for (vf = 0; vf < numvfs; vf++) { 2719 pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1); 2720 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr); 2721 } 2722 } 2723 2724 mutex_unlock(&mcam->lock); 2725 return 0; 2726 } 2727 2728 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL); 2729 2730 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp, 2731 void *unused) 2732 { 2733 struct rvu *rvu = filp->private; 2734 struct npc_mcam *mcam; 2735 int blkaddr; 2736 2737 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2738 if (blkaddr < 0) 2739 return -ENODEV; 2740 2741 mcam = &rvu->hw->mcam; 2742 2743 seq_puts(filp, "\nNPC MCAM RX miss action stats\n"); 2744 seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr, 2745 rvu_read64(rvu, blkaddr, 2746 NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr))); 2747 2748 return 0; 2749 } 2750 2751 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL); 2752 2753 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, 2754 struct rvu_npc_mcam_rule *rule) 2755 { 2756 u8 bit; 2757 2758 for_each_set_bit(bit, (unsigned long *)&rule->features, 64) { 2759 seq_printf(s, "\t%s ", npc_get_field_name(bit)); 2760 switch (bit) { 2761 case NPC_LXMB: 2762 if (rule->lxmb == 1) 2763 seq_puts(s, "\tL2M nibble is set\n"); 2764 else 2765 seq_puts(s, "\tL2B nibble is set\n"); 2766 break; 2767 case NPC_DMAC: 2768 seq_printf(s, "%pM ", rule->packet.dmac); 2769 seq_printf(s, "mask %pM\n", rule->mask.dmac); 2770 break; 2771 case NPC_SMAC: 2772 seq_printf(s, "%pM ", rule->packet.smac); 2773 seq_printf(s, "mask %pM\n", rule->mask.smac); 2774 break; 2775 case NPC_ETYPE: 2776 seq_printf(s, "0x%x ", ntohs(rule->packet.etype)); 2777 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype)); 2778 break; 2779 case NPC_OUTER_VID: 2780 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci)); 2781 seq_printf(s, "mask 0x%x\n", 2782 ntohs(rule->mask.vlan_tci)); 2783 break; 2784 case NPC_TOS: 2785 seq_printf(s, "%d ", rule->packet.tos); 2786 seq_printf(s, "mask 0x%x\n", rule->mask.tos); 2787 break; 2788 case NPC_SIP_IPV4: 2789 seq_printf(s, "%pI4 ", &rule->packet.ip4src); 2790 seq_printf(s, "mask %pI4\n", &rule->mask.ip4src); 2791 break; 2792 case NPC_DIP_IPV4: 2793 seq_printf(s, "%pI4 ", &rule->packet.ip4dst); 2794 seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst); 2795 break; 2796 case NPC_SIP_IPV6: 2797 seq_printf(s, "%pI6 ", rule->packet.ip6src); 2798 seq_printf(s, "mask %pI6\n", rule->mask.ip6src); 2799 break; 2800 case NPC_DIP_IPV6: 2801 seq_printf(s, "%pI6 ", rule->packet.ip6dst); 2802 seq_printf(s, "mask %pI6\n", rule->mask.ip6dst); 2803 break; 2804 case NPC_IPFRAG_IPV6: 2805 seq_printf(s, "0x%x ", rule->packet.next_header); 2806 seq_printf(s, "mask 0x%x\n", rule->mask.next_header); 2807 break; 2808 case NPC_IPFRAG_IPV4: 2809 seq_printf(s, "0x%x ", rule->packet.ip_flag); 2810 seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag); 2811 break; 2812 case NPC_SPORT_TCP: 2813 case NPC_SPORT_UDP: 2814 case NPC_SPORT_SCTP: 2815 seq_printf(s, "%d ", ntohs(rule->packet.sport)); 2816 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport)); 2817 break; 2818 case NPC_DPORT_TCP: 2819 case NPC_DPORT_UDP: 2820 case NPC_DPORT_SCTP: 2821 seq_printf(s, "%d ", ntohs(rule->packet.dport)); 2822 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport)); 2823 break; 2824 default: 2825 seq_puts(s, "\n"); 2826 break; 2827 } 2828 } 2829 } 2830 2831 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s, 2832 struct rvu_npc_mcam_rule *rule) 2833 { 2834 if (is_npc_intf_tx(rule->intf)) { 2835 switch (rule->tx_action.op) { 2836 case NIX_TX_ACTIONOP_DROP: 2837 seq_puts(s, "\taction: Drop\n"); 2838 break; 2839 case NIX_TX_ACTIONOP_UCAST_DEFAULT: 2840 seq_puts(s, "\taction: Unicast to default channel\n"); 2841 break; 2842 case NIX_TX_ACTIONOP_UCAST_CHAN: 2843 seq_printf(s, "\taction: Unicast to channel %d\n", 2844 rule->tx_action.index); 2845 break; 2846 case NIX_TX_ACTIONOP_MCAST: 2847 seq_puts(s, "\taction: Multicast\n"); 2848 break; 2849 case NIX_TX_ACTIONOP_DROP_VIOL: 2850 seq_puts(s, "\taction: Lockdown Violation Drop\n"); 2851 break; 2852 default: 2853 break; 2854 } 2855 } else { 2856 switch (rule->rx_action.op) { 2857 case NIX_RX_ACTIONOP_DROP: 2858 seq_puts(s, "\taction: Drop\n"); 2859 break; 2860 case NIX_RX_ACTIONOP_UCAST: 2861 seq_printf(s, "\taction: Direct to queue %d\n", 2862 rule->rx_action.index); 2863 break; 2864 case NIX_RX_ACTIONOP_RSS: 2865 seq_puts(s, "\taction: RSS\n"); 2866 break; 2867 case NIX_RX_ACTIONOP_UCAST_IPSEC: 2868 seq_puts(s, "\taction: Unicast ipsec\n"); 2869 break; 2870 case NIX_RX_ACTIONOP_MCAST: 2871 seq_puts(s, "\taction: Multicast\n"); 2872 break; 2873 default: 2874 break; 2875 } 2876 } 2877 } 2878 2879 static const char *rvu_dbg_get_intf_name(int intf) 2880 { 2881 switch (intf) { 2882 case NIX_INTFX_RX(0): 2883 return "NIX0_RX"; 2884 case NIX_INTFX_RX(1): 2885 return "NIX1_RX"; 2886 case NIX_INTFX_TX(0): 2887 return "NIX0_TX"; 2888 case NIX_INTFX_TX(1): 2889 return "NIX1_TX"; 2890 default: 2891 break; 2892 } 2893 2894 return "unknown"; 2895 } 2896 2897 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused) 2898 { 2899 struct rvu_npc_mcam_rule *iter; 2900 struct rvu *rvu = s->private; 2901 struct npc_mcam *mcam; 2902 int pf, vf = -1; 2903 bool enabled; 2904 int blkaddr; 2905 u16 target; 2906 u64 hits; 2907 2908 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2909 if (blkaddr < 0) 2910 return 0; 2911 2912 mcam = &rvu->hw->mcam; 2913 2914 mutex_lock(&mcam->lock); 2915 list_for_each_entry(iter, &mcam->mcam_rules, list) { 2916 pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 2917 seq_printf(s, "\n\tInstalled by: PF%d ", pf); 2918 2919 if (iter->owner & RVU_PFVF_FUNC_MASK) { 2920 vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1; 2921 seq_printf(s, "VF%d", vf); 2922 } 2923 seq_puts(s, "\n"); 2924 2925 seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ? 2926 "RX" : "TX"); 2927 seq_printf(s, "\tinterface: %s\n", 2928 rvu_dbg_get_intf_name(iter->intf)); 2929 seq_printf(s, "\tmcam entry: %d\n", iter->entry); 2930 2931 rvu_dbg_npc_mcam_show_flows(s, iter); 2932 if (is_npc_intf_rx(iter->intf)) { 2933 target = iter->rx_action.pf_func; 2934 pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 2935 seq_printf(s, "\tForward to: PF%d ", pf); 2936 2937 if (target & RVU_PFVF_FUNC_MASK) { 2938 vf = (target & RVU_PFVF_FUNC_MASK) - 1; 2939 seq_printf(s, "VF%d", vf); 2940 } 2941 seq_puts(s, "\n"); 2942 seq_printf(s, "\tchannel: 0x%x\n", iter->chan); 2943 seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask); 2944 } 2945 2946 rvu_dbg_npc_mcam_show_action(s, iter); 2947 2948 enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry); 2949 seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no"); 2950 2951 if (!iter->has_cntr) 2952 continue; 2953 seq_printf(s, "\tcounter: %d\n", iter->cntr); 2954 2955 hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr)); 2956 seq_printf(s, "\thits: %lld\n", hits); 2957 } 2958 mutex_unlock(&mcam->lock); 2959 2960 return 0; 2961 } 2962 2963 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL); 2964 2965 static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused) 2966 { 2967 struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 }; 2968 struct npc_exact_table_entry *cam_entry; 2969 struct npc_exact_table *table; 2970 struct rvu *rvu = s->private; 2971 int i, j; 2972 2973 u8 bitmap = 0; 2974 2975 table = rvu->hw->table; 2976 2977 mutex_lock(&table->lock); 2978 2979 /* Check if there is at least one entry in mem table */ 2980 if (!table->mem_tbl_entry_cnt) 2981 goto dump_cam_table; 2982 2983 /* Print table headers */ 2984 seq_puts(s, "\n\tExact Match MEM Table\n"); 2985 seq_puts(s, "Index\t"); 2986 2987 for (i = 0; i < table->mem_table.ways; i++) { 2988 mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i], 2989 struct npc_exact_table_entry, list); 2990 2991 seq_printf(s, "Way-%d\t\t\t\t\t", i); 2992 } 2993 2994 seq_puts(s, "\n"); 2995 for (i = 0; i < table->mem_table.ways; i++) 2996 seq_puts(s, "\tChan MAC \t"); 2997 2998 seq_puts(s, "\n\n"); 2999 3000 /* Print mem table entries */ 3001 for (i = 0; i < table->mem_table.depth; i++) { 3002 bitmap = 0; 3003 for (j = 0; j < table->mem_table.ways; j++) { 3004 if (!mem_entry[j]) 3005 continue; 3006 3007 if (mem_entry[j]->index != i) 3008 continue; 3009 3010 bitmap |= BIT(j); 3011 } 3012 3013 /* No valid entries */ 3014 if (!bitmap) 3015 continue; 3016 3017 seq_printf(s, "%d\t", i); 3018 for (j = 0; j < table->mem_table.ways; j++) { 3019 if (!(bitmap & BIT(j))) { 3020 seq_puts(s, "nil\t\t\t\t\t"); 3021 continue; 3022 } 3023 3024 seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan, 3025 mem_entry[j]->mac); 3026 mem_entry[j] = list_next_entry(mem_entry[j], list); 3027 } 3028 seq_puts(s, "\n"); 3029 } 3030 3031 dump_cam_table: 3032 3033 if (!table->cam_tbl_entry_cnt) 3034 goto done; 3035 3036 seq_puts(s, "\n\tExact Match CAM Table\n"); 3037 seq_puts(s, "index\tchan\tMAC\n"); 3038 3039 /* Traverse cam table entries */ 3040 list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) { 3041 seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan, 3042 cam_entry->mac); 3043 } 3044 3045 done: 3046 mutex_unlock(&table->lock); 3047 return 0; 3048 } 3049 3050 RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL); 3051 3052 static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused) 3053 { 3054 struct npc_exact_table *table; 3055 struct rvu *rvu = s->private; 3056 int i; 3057 3058 table = rvu->hw->table; 3059 3060 seq_puts(s, "\n\tExact Table Info\n"); 3061 seq_printf(s, "Exact Match Feature : %s\n", 3062 rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable"); 3063 if (!rvu->hw->cap.npc_exact_match_enabled) 3064 return 0; 3065 3066 seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n"); 3067 for (i = 0; i < table->num_drop_rules; i++) 3068 seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]); 3069 3070 seq_puts(s, "\nMcam Index\tPromisc Mode Status\n"); 3071 for (i = 0; i < table->num_drop_rules; i++) 3072 seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off"); 3073 3074 seq_puts(s, "\n\tMEM Table Info\n"); 3075 seq_printf(s, "Ways : %d\n", table->mem_table.ways); 3076 seq_printf(s, "Depth : %d\n", table->mem_table.depth); 3077 seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask); 3078 seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask); 3079 seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset); 3080 3081 seq_puts(s, "\n\tCAM Table Info\n"); 3082 seq_printf(s, "Depth : %d\n", table->cam_table.depth); 3083 3084 return 0; 3085 } 3086 3087 RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL); 3088 3089 static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused) 3090 { 3091 struct npc_exact_table *table; 3092 struct rvu *rvu = s->private; 3093 struct npc_key_field *field; 3094 u16 chan, pcifunc; 3095 int blkaddr, i; 3096 u64 cfg, cam1; 3097 char *str; 3098 3099 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3100 table = rvu->hw->table; 3101 3102 field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN]; 3103 3104 seq_puts(s, "\n\t Exact Hit on drop status\n"); 3105 seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n"); 3106 3107 for (i = 0; i < table->num_drop_rules; i++) { 3108 pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i); 3109 cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0)); 3110 3111 /* channel will be always in keyword 0 */ 3112 cam1 = rvu_read64(rvu, blkaddr, 3113 NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1)); 3114 chan = field->kw_mask[0] & cam1; 3115 3116 str = (cfg & 1) ? "enabled" : "disabled"; 3117 3118 seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i, 3119 rvu_read64(rvu, blkaddr, 3120 NPC_AF_MATCH_STATX(table->counter_idx[i])), 3121 chan, str); 3122 } 3123 3124 return 0; 3125 } 3126 3127 RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL); 3128 3129 static void rvu_dbg_npc_init(struct rvu *rvu) 3130 { 3131 rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root); 3132 3133 debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu, 3134 &rvu_dbg_npc_mcam_info_fops); 3135 debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu, 3136 &rvu_dbg_npc_mcam_rules_fops); 3137 3138 debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu, 3139 &rvu_dbg_npc_rx_miss_act_fops); 3140 3141 if (!rvu->hw->cap.npc_exact_match_enabled) 3142 return; 3143 3144 debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu, 3145 &rvu_dbg_npc_exact_entries_fops); 3146 3147 debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu, 3148 &rvu_dbg_npc_exact_info_fops); 3149 3150 debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu, 3151 &rvu_dbg_npc_exact_drop_cnt_fops); 3152 3153 } 3154 3155 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type) 3156 { 3157 struct cpt_ctx *ctx = filp->private; 3158 u64 busy_sts = 0, free_sts = 0; 3159 u32 e_min = 0, e_max = 0, e, i; 3160 u16 max_ses, max_ies, max_aes; 3161 struct rvu *rvu = ctx->rvu; 3162 int blkaddr = ctx->blkaddr; 3163 u64 reg; 3164 3165 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1); 3166 max_ses = reg & 0xffff; 3167 max_ies = (reg >> 16) & 0xffff; 3168 max_aes = (reg >> 32) & 0xffff; 3169 3170 switch (eng_type) { 3171 case CPT_AE_TYPE: 3172 e_min = max_ses + max_ies; 3173 e_max = max_ses + max_ies + max_aes; 3174 break; 3175 case CPT_SE_TYPE: 3176 e_min = 0; 3177 e_max = max_ses; 3178 break; 3179 case CPT_IE_TYPE: 3180 e_min = max_ses; 3181 e_max = max_ses + max_ies; 3182 break; 3183 default: 3184 return -EINVAL; 3185 } 3186 3187 for (e = e_min, i = 0; e < e_max; e++, i++) { 3188 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e)); 3189 if (reg & 0x1) 3190 busy_sts |= 1ULL << i; 3191 3192 if (reg & 0x2) 3193 free_sts |= 1ULL << i; 3194 } 3195 seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts); 3196 seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts); 3197 3198 return 0; 3199 } 3200 3201 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused) 3202 { 3203 return cpt_eng_sts_display(filp, CPT_AE_TYPE); 3204 } 3205 3206 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL); 3207 3208 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused) 3209 { 3210 return cpt_eng_sts_display(filp, CPT_SE_TYPE); 3211 } 3212 3213 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL); 3214 3215 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused) 3216 { 3217 return cpt_eng_sts_display(filp, CPT_IE_TYPE); 3218 } 3219 3220 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL); 3221 3222 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused) 3223 { 3224 struct cpt_ctx *ctx = filp->private; 3225 u16 max_ses, max_ies, max_aes; 3226 struct rvu *rvu = ctx->rvu; 3227 int blkaddr = ctx->blkaddr; 3228 u32 e_max, e; 3229 u64 reg; 3230 3231 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1); 3232 max_ses = reg & 0xffff; 3233 max_ies = (reg >> 16) & 0xffff; 3234 max_aes = (reg >> 32) & 0xffff; 3235 3236 e_max = max_ses + max_ies + max_aes; 3237 3238 seq_puts(filp, "===========================================\n"); 3239 for (e = 0; e < e_max; e++) { 3240 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e)); 3241 seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e, 3242 reg & 0xff); 3243 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e)); 3244 seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e, 3245 reg); 3246 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e)); 3247 seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e, 3248 reg); 3249 seq_puts(filp, "===========================================\n"); 3250 } 3251 return 0; 3252 } 3253 3254 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL); 3255 3256 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused) 3257 { 3258 struct cpt_ctx *ctx = filp->private; 3259 int blkaddr = ctx->blkaddr; 3260 struct rvu *rvu = ctx->rvu; 3261 struct rvu_block *block; 3262 struct rvu_hwinfo *hw; 3263 u64 reg; 3264 u32 lf; 3265 3266 hw = rvu->hw; 3267 block = &hw->block[blkaddr]; 3268 if (!block->lf.bmap) 3269 return -ENODEV; 3270 3271 seq_puts(filp, "===========================================\n"); 3272 for (lf = 0; lf < block->lf.max; lf++) { 3273 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf)); 3274 seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg); 3275 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf)); 3276 seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg); 3277 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf)); 3278 seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg); 3279 reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg | 3280 (lf << block->lfshift)); 3281 seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg); 3282 seq_puts(filp, "===========================================\n"); 3283 } 3284 return 0; 3285 } 3286 3287 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL); 3288 3289 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused) 3290 { 3291 struct cpt_ctx *ctx = filp->private; 3292 struct rvu *rvu = ctx->rvu; 3293 int blkaddr = ctx->blkaddr; 3294 u64 reg0, reg1; 3295 3296 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0)); 3297 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1)); 3298 seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1); 3299 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0)); 3300 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1)); 3301 seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1); 3302 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0)); 3303 seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0); 3304 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT); 3305 seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0); 3306 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT); 3307 seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0); 3308 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO); 3309 seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0); 3310 3311 return 0; 3312 } 3313 3314 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL); 3315 3316 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused) 3317 { 3318 struct cpt_ctx *ctx = filp->private; 3319 struct rvu *rvu = ctx->rvu; 3320 int blkaddr = ctx->blkaddr; 3321 u64 reg; 3322 3323 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC); 3324 seq_printf(filp, "CPT instruction requests %llu\n", reg); 3325 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC); 3326 seq_printf(filp, "CPT instruction latency %llu\n", reg); 3327 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC); 3328 seq_printf(filp, "CPT NCB read requests %llu\n", reg); 3329 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC); 3330 seq_printf(filp, "CPT NCB read latency %llu\n", reg); 3331 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC); 3332 seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg); 3333 reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC); 3334 seq_printf(filp, "CPT active cycles pc %llu\n", reg); 3335 reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT); 3336 seq_printf(filp, "CPT clock count pc %llu\n", reg); 3337 3338 return 0; 3339 } 3340 3341 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL); 3342 3343 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr) 3344 { 3345 struct cpt_ctx *ctx; 3346 3347 if (!is_block_implemented(rvu->hw, blkaddr)) 3348 return; 3349 3350 if (blkaddr == BLKADDR_CPT0) { 3351 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root); 3352 ctx = &rvu->rvu_dbg.cpt_ctx[0]; 3353 ctx->blkaddr = BLKADDR_CPT0; 3354 ctx->rvu = rvu; 3355 } else { 3356 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1", 3357 rvu->rvu_dbg.root); 3358 ctx = &rvu->rvu_dbg.cpt_ctx[1]; 3359 ctx->blkaddr = BLKADDR_CPT1; 3360 ctx->rvu = rvu; 3361 } 3362 3363 debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx, 3364 &rvu_dbg_cpt_pc_fops); 3365 debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx, 3366 &rvu_dbg_cpt_ae_sts_fops); 3367 debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx, 3368 &rvu_dbg_cpt_se_sts_fops); 3369 debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx, 3370 &rvu_dbg_cpt_ie_sts_fops); 3371 debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx, 3372 &rvu_dbg_cpt_engines_info_fops); 3373 debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx, 3374 &rvu_dbg_cpt_lfs_info_fops); 3375 debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx, 3376 &rvu_dbg_cpt_err_info_fops); 3377 } 3378 3379 static const char *rvu_get_dbg_dir_name(struct rvu *rvu) 3380 { 3381 if (!is_rvu_otx2(rvu)) 3382 return "cn10k"; 3383 else 3384 return "octeontx2"; 3385 } 3386 3387 void rvu_dbg_init(struct rvu *rvu) 3388 { 3389 rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL); 3390 3391 debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu, 3392 &rvu_dbg_rsrc_status_fops); 3393 3394 if (!is_rvu_otx2(rvu)) 3395 debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root, 3396 rvu, &rvu_dbg_lmtst_map_table_fops); 3397 3398 if (!cgx_get_cgxcnt_max()) 3399 goto create; 3400 3401 if (is_rvu_otx2(rvu)) 3402 debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root, 3403 rvu, &rvu_dbg_rvu_pf_cgx_map_fops); 3404 else 3405 debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root, 3406 rvu, &rvu_dbg_rvu_pf_cgx_map_fops); 3407 3408 create: 3409 rvu_dbg_npa_init(rvu); 3410 rvu_dbg_nix_init(rvu, BLKADDR_NIX0); 3411 3412 rvu_dbg_nix_init(rvu, BLKADDR_NIX1); 3413 rvu_dbg_cgx_init(rvu); 3414 rvu_dbg_npc_init(rvu); 3415 rvu_dbg_cpt_init(rvu, BLKADDR_CPT0); 3416 rvu_dbg_cpt_init(rvu, BLKADDR_CPT1); 3417 rvu_dbg_mcs_init(rvu); 3418 } 3419 3420 void rvu_dbg_exit(struct rvu *rvu) 3421 { 3422 debugfs_remove_recursive(rvu->rvu_dbg.root); 3423 } 3424 3425 #endif /* CONFIG_DEBUG_FS */ 3426