1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* Copyright 2017-2019 NXP */ 3 4 #include <linux/ethtool_netlink.h> 5 #include <linux/net_tstamp.h> 6 #include <linux/module.h> 7 #include "enetc.h" 8 9 static const u32 enetc_si_regs[] = { 10 ENETC_SIMR, ENETC_SIPMAR0, ENETC_SIPMAR1, ENETC_SICBDRMR, 11 ENETC_SICBDRSR, ENETC_SICBDRBAR0, ENETC_SICBDRBAR1, ENETC_SICBDRPIR, 12 ENETC_SICBDRCIR, ENETC_SICBDRLENR, ENETC_SICAPR0, ENETC_SICAPR1, 13 ENETC_SIUEFDCR 14 }; 15 16 static const u32 enetc_txbdr_regs[] = { 17 ENETC_TBMR, ENETC_TBSR, ENETC_TBBAR0, ENETC_TBBAR1, 18 ENETC_TBPIR, ENETC_TBCIR, ENETC_TBLENR, ENETC_TBIER, ENETC_TBICR0, 19 ENETC_TBICR1 20 }; 21 22 static const u32 enetc_rxbdr_regs[] = { 23 ENETC_RBMR, ENETC_RBSR, ENETC_RBBSR, ENETC_RBCIR, ENETC_RBBAR0, 24 ENETC_RBBAR1, ENETC_RBPIR, ENETC_RBLENR, ENETC_RBIER, ENETC_RBICR0, 25 ENETC_RBICR1 26 }; 27 28 static const u32 enetc_port_regs[] = { 29 ENETC_PMR, ENETC_PSR, ENETC_PSIPMR, ENETC_PSIPMAR0(0), 30 ENETC_PSIPMAR1(0), ENETC_PTXMBAR, ENETC_PCAPR0, ENETC_PCAPR1, 31 ENETC_PSICFGR0(0), ENETC_PRFSCAPR, ENETC_PTCMSDUR(0), 32 ENETC_PM0_CMD_CFG, ENETC_PM0_MAXFRM, ENETC_PM0_IF_MODE 33 }; 34 35 static int enetc_get_reglen(struct net_device *ndev) 36 { 37 struct enetc_ndev_priv *priv = netdev_priv(ndev); 38 struct enetc_hw *hw = &priv->si->hw; 39 int len; 40 41 len = ARRAY_SIZE(enetc_si_regs); 42 len += ARRAY_SIZE(enetc_txbdr_regs) * priv->num_tx_rings; 43 len += ARRAY_SIZE(enetc_rxbdr_regs) * priv->num_rx_rings; 44 45 if (hw->port) 46 len += ARRAY_SIZE(enetc_port_regs); 47 48 len *= sizeof(u32) * 2; /* store 2 entries per reg: addr and value */ 49 50 return len; 51 } 52 53 static void enetc_get_regs(struct net_device *ndev, struct ethtool_regs *regs, 54 void *regbuf) 55 { 56 struct enetc_ndev_priv *priv = netdev_priv(ndev); 57 struct enetc_hw *hw = &priv->si->hw; 58 u32 *buf = (u32 *)regbuf; 59 int i, j; 60 u32 addr; 61 62 for (i = 0; i < ARRAY_SIZE(enetc_si_regs); i++) { 63 *buf++ = enetc_si_regs[i]; 64 *buf++ = enetc_rd(hw, enetc_si_regs[i]); 65 } 66 67 for (i = 0; i < priv->num_tx_rings; i++) { 68 for (j = 0; j < ARRAY_SIZE(enetc_txbdr_regs); j++) { 69 addr = ENETC_BDR(TX, i, enetc_txbdr_regs[j]); 70 71 *buf++ = addr; 72 *buf++ = enetc_rd(hw, addr); 73 } 74 } 75 76 for (i = 0; i < priv->num_rx_rings; i++) { 77 for (j = 0; j < ARRAY_SIZE(enetc_rxbdr_regs); j++) { 78 addr = ENETC_BDR(RX, i, enetc_rxbdr_regs[j]); 79 80 *buf++ = addr; 81 *buf++ = enetc_rd(hw, addr); 82 } 83 } 84 85 if (!hw->port) 86 return; 87 88 for (i = 0; i < ARRAY_SIZE(enetc_port_regs); i++) { 89 addr = ENETC_PORT_BASE + enetc_port_regs[i]; 90 *buf++ = addr; 91 *buf++ = enetc_rd(hw, addr); 92 } 93 } 94 95 static const struct { 96 int reg; 97 char name[ETH_GSTRING_LEN]; 98 } enetc_si_counters[] = { 99 { ENETC_SIROCT, "SI rx octets" }, 100 { ENETC_SIRFRM, "SI rx frames" }, 101 { ENETC_SIRUCA, "SI rx u-cast frames" }, 102 { ENETC_SIRMCA, "SI rx m-cast frames" }, 103 { ENETC_SITOCT, "SI tx octets" }, 104 { ENETC_SITFRM, "SI tx frames" }, 105 { ENETC_SITUCA, "SI tx u-cast frames" }, 106 { ENETC_SITMCA, "SI tx m-cast frames" }, 107 { ENETC_RBDCR(0), "Rx ring 0 discarded frames" }, 108 { ENETC_RBDCR(1), "Rx ring 1 discarded frames" }, 109 { ENETC_RBDCR(2), "Rx ring 2 discarded frames" }, 110 { ENETC_RBDCR(3), "Rx ring 3 discarded frames" }, 111 { ENETC_RBDCR(4), "Rx ring 4 discarded frames" }, 112 { ENETC_RBDCR(5), "Rx ring 5 discarded frames" }, 113 { ENETC_RBDCR(6), "Rx ring 6 discarded frames" }, 114 { ENETC_RBDCR(7), "Rx ring 7 discarded frames" }, 115 { ENETC_RBDCR(8), "Rx ring 8 discarded frames" }, 116 { ENETC_RBDCR(9), "Rx ring 9 discarded frames" }, 117 { ENETC_RBDCR(10), "Rx ring 10 discarded frames" }, 118 { ENETC_RBDCR(11), "Rx ring 11 discarded frames" }, 119 { ENETC_RBDCR(12), "Rx ring 12 discarded frames" }, 120 { ENETC_RBDCR(13), "Rx ring 13 discarded frames" }, 121 { ENETC_RBDCR(14), "Rx ring 14 discarded frames" }, 122 { ENETC_RBDCR(15), "Rx ring 15 discarded frames" }, 123 }; 124 125 static const struct { 126 int reg; 127 char name[ETH_GSTRING_LEN]; 128 } enetc_port_counters[] = { 129 { ENETC_PM_REOCT(0), "MAC rx ethernet octets" }, 130 { ENETC_PM_RALN(0), "MAC rx alignment errors" }, 131 { ENETC_PM_RXPF(0), "MAC rx valid pause frames" }, 132 { ENETC_PM_RFRM(0), "MAC rx valid frames" }, 133 { ENETC_PM_RFCS(0), "MAC rx fcs errors" }, 134 { ENETC_PM_RVLAN(0), "MAC rx VLAN frames" }, 135 { ENETC_PM_RERR(0), "MAC rx frame errors" }, 136 { ENETC_PM_RUCA(0), "MAC rx unicast frames" }, 137 { ENETC_PM_RMCA(0), "MAC rx multicast frames" }, 138 { ENETC_PM_RBCA(0), "MAC rx broadcast frames" }, 139 { ENETC_PM_RDRP(0), "MAC rx dropped packets" }, 140 { ENETC_PM_RPKT(0), "MAC rx packets" }, 141 { ENETC_PM_RUND(0), "MAC rx undersized packets" }, 142 { ENETC_PM_R64(0), "MAC rx 64 byte packets" }, 143 { ENETC_PM_R127(0), "MAC rx 65-127 byte packets" }, 144 { ENETC_PM_R255(0), "MAC rx 128-255 byte packets" }, 145 { ENETC_PM_R511(0), "MAC rx 256-511 byte packets" }, 146 { ENETC_PM_R1023(0), "MAC rx 512-1023 byte packets" }, 147 { ENETC_PM_R1522(0), "MAC rx 1024-1522 byte packets" }, 148 { ENETC_PM_R1523X(0), "MAC rx 1523 to max-octet packets" }, 149 { ENETC_PM_ROVR(0), "MAC rx oversized packets" }, 150 { ENETC_PM_RJBR(0), "MAC rx jabber packets" }, 151 { ENETC_PM_RFRG(0), "MAC rx fragment packets" }, 152 { ENETC_PM_RCNP(0), "MAC rx control packets" }, 153 { ENETC_PM_RDRNTP(0), "MAC rx fifo drop" }, 154 { ENETC_PM_TEOCT(0), "MAC tx ethernet octets" }, 155 { ENETC_PM_TOCT(0), "MAC tx octets" }, 156 { ENETC_PM_TCRSE(0), "MAC tx carrier sense errors" }, 157 { ENETC_PM_TXPF(0), "MAC tx valid pause frames" }, 158 { ENETC_PM_TFRM(0), "MAC tx frames" }, 159 { ENETC_PM_TFCS(0), "MAC tx fcs errors" }, 160 { ENETC_PM_TVLAN(0), "MAC tx VLAN frames" }, 161 { ENETC_PM_TERR(0), "MAC tx frame errors" }, 162 { ENETC_PM_TUCA(0), "MAC tx unicast frames" }, 163 { ENETC_PM_TMCA(0), "MAC tx multicast frames" }, 164 { ENETC_PM_TBCA(0), "MAC tx broadcast frames" }, 165 { ENETC_PM_TPKT(0), "MAC tx packets" }, 166 { ENETC_PM_TUND(0), "MAC tx undersized packets" }, 167 { ENETC_PM_T64(0), "MAC tx 64 byte packets" }, 168 { ENETC_PM_T127(0), "MAC tx 65-127 byte packets" }, 169 { ENETC_PM_T255(0), "MAC tx 128-255 byte packets" }, 170 { ENETC_PM_T511(0), "MAC tx 256-511 byte packets" }, 171 { ENETC_PM_T1023(0), "MAC tx 512-1023 byte packets" }, 172 { ENETC_PM_T1522(0), "MAC tx 1024-1522 byte packets" }, 173 { ENETC_PM_T1523X(0), "MAC tx 1523 to max-octet packets" }, 174 { ENETC_PM_TCNP(0), "MAC tx control packets" }, 175 { ENETC_PM_TDFR(0), "MAC tx deferred packets" }, 176 { ENETC_PM_TMCOL(0), "MAC tx multiple collisions" }, 177 { ENETC_PM_TSCOL(0), "MAC tx single collisions" }, 178 { ENETC_PM_TLCOL(0), "MAC tx late collisions" }, 179 { ENETC_PM_TECOL(0), "MAC tx excessive collisions" }, 180 { ENETC_UFDMF, "SI MAC nomatch u-cast discards" }, 181 { ENETC_MFDMF, "SI MAC nomatch m-cast discards" }, 182 { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" }, 183 { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" }, 184 { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" }, 185 { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" }, 186 { ENETC_PFDMSAPR, "SI pruning discarded frames" }, 187 { ENETC_PICDR(0), "ICM DR0 discarded frames" }, 188 { ENETC_PICDR(1), "ICM DR1 discarded frames" }, 189 { ENETC_PICDR(2), "ICM DR2 discarded frames" }, 190 { ENETC_PICDR(3), "ICM DR3 discarded frames" }, 191 }; 192 193 static const char rx_ring_stats[][ETH_GSTRING_LEN] = { 194 "Rx ring %2d frames", 195 "Rx ring %2d alloc errors", 196 "Rx ring %2d XDP drops", 197 "Rx ring %2d recycles", 198 "Rx ring %2d recycle failures", 199 "Rx ring %2d redirects", 200 "Rx ring %2d redirect failures", 201 }; 202 203 static const char tx_ring_stats[][ETH_GSTRING_LEN] = { 204 "Tx ring %2d frames", 205 "Tx ring %2d XDP frames", 206 "Tx ring %2d XDP drops", 207 "Tx window drop %2d frames", 208 }; 209 210 static int enetc_get_sset_count(struct net_device *ndev, int sset) 211 { 212 struct enetc_ndev_priv *priv = netdev_priv(ndev); 213 int len; 214 215 if (sset != ETH_SS_STATS) 216 return -EOPNOTSUPP; 217 218 len = ARRAY_SIZE(enetc_si_counters) + 219 ARRAY_SIZE(tx_ring_stats) * priv->num_tx_rings + 220 ARRAY_SIZE(rx_ring_stats) * priv->num_rx_rings; 221 222 if (!enetc_si_is_pf(priv->si)) 223 return len; 224 225 len += ARRAY_SIZE(enetc_port_counters); 226 227 return len; 228 } 229 230 static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data) 231 { 232 struct enetc_ndev_priv *priv = netdev_priv(ndev); 233 u8 *p = data; 234 int i, j; 235 236 switch (stringset) { 237 case ETH_SS_STATS: 238 for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) { 239 strscpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN); 240 p += ETH_GSTRING_LEN; 241 } 242 for (i = 0; i < priv->num_tx_rings; i++) { 243 for (j = 0; j < ARRAY_SIZE(tx_ring_stats); j++) { 244 snprintf(p, ETH_GSTRING_LEN, tx_ring_stats[j], 245 i); 246 p += ETH_GSTRING_LEN; 247 } 248 } 249 for (i = 0; i < priv->num_rx_rings; i++) { 250 for (j = 0; j < ARRAY_SIZE(rx_ring_stats); j++) { 251 snprintf(p, ETH_GSTRING_LEN, rx_ring_stats[j], 252 i); 253 p += ETH_GSTRING_LEN; 254 } 255 } 256 257 if (!enetc_si_is_pf(priv->si)) 258 break; 259 260 for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) { 261 strscpy(p, enetc_port_counters[i].name, 262 ETH_GSTRING_LEN); 263 p += ETH_GSTRING_LEN; 264 } 265 break; 266 } 267 } 268 269 static void enetc_get_ethtool_stats(struct net_device *ndev, 270 struct ethtool_stats *stats, u64 *data) 271 { 272 struct enetc_ndev_priv *priv = netdev_priv(ndev); 273 struct enetc_hw *hw = &priv->si->hw; 274 int i, o = 0; 275 276 for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) 277 data[o++] = enetc_rd64(hw, enetc_si_counters[i].reg); 278 279 for (i = 0; i < priv->num_tx_rings; i++) { 280 data[o++] = priv->tx_ring[i]->stats.packets; 281 data[o++] = priv->tx_ring[i]->stats.xdp_tx; 282 data[o++] = priv->tx_ring[i]->stats.xdp_tx_drops; 283 data[o++] = priv->tx_ring[i]->stats.win_drop; 284 } 285 286 for (i = 0; i < priv->num_rx_rings; i++) { 287 data[o++] = priv->rx_ring[i]->stats.packets; 288 data[o++] = priv->rx_ring[i]->stats.rx_alloc_errs; 289 data[o++] = priv->rx_ring[i]->stats.xdp_drops; 290 data[o++] = priv->rx_ring[i]->stats.recycles; 291 data[o++] = priv->rx_ring[i]->stats.recycle_failures; 292 data[o++] = priv->rx_ring[i]->stats.xdp_redirect; 293 data[o++] = priv->rx_ring[i]->stats.xdp_redirect_failures; 294 } 295 296 if (!enetc_si_is_pf(priv->si)) 297 return; 298 299 for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) 300 data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg); 301 } 302 303 static void enetc_pause_stats(struct enetc_hw *hw, int mac, 304 struct ethtool_pause_stats *pause_stats) 305 { 306 pause_stats->tx_pause_frames = enetc_port_rd(hw, ENETC_PM_TXPF(mac)); 307 pause_stats->rx_pause_frames = enetc_port_rd(hw, ENETC_PM_RXPF(mac)); 308 } 309 310 static void enetc_get_pause_stats(struct net_device *ndev, 311 struct ethtool_pause_stats *pause_stats) 312 { 313 struct enetc_ndev_priv *priv = netdev_priv(ndev); 314 struct enetc_hw *hw = &priv->si->hw; 315 struct enetc_si *si = priv->si; 316 317 switch (pause_stats->src) { 318 case ETHTOOL_MAC_STATS_SRC_EMAC: 319 enetc_pause_stats(hw, 0, pause_stats); 320 break; 321 case ETHTOOL_MAC_STATS_SRC_PMAC: 322 if (si->hw_features & ENETC_SI_F_QBU) 323 enetc_pause_stats(hw, 1, pause_stats); 324 break; 325 case ETHTOOL_MAC_STATS_SRC_AGGREGATE: 326 ethtool_aggregate_pause_stats(ndev, pause_stats); 327 break; 328 } 329 } 330 331 static void enetc_mac_stats(struct enetc_hw *hw, int mac, 332 struct ethtool_eth_mac_stats *s) 333 { 334 s->FramesTransmittedOK = enetc_port_rd(hw, ENETC_PM_TFRM(mac)); 335 s->SingleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TSCOL(mac)); 336 s->MultipleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TMCOL(mac)); 337 s->FramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RFRM(mac)); 338 s->FrameCheckSequenceErrors = enetc_port_rd(hw, ENETC_PM_RFCS(mac)); 339 s->AlignmentErrors = enetc_port_rd(hw, ENETC_PM_RALN(mac)); 340 s->OctetsTransmittedOK = enetc_port_rd(hw, ENETC_PM_TEOCT(mac)); 341 s->FramesWithDeferredXmissions = enetc_port_rd(hw, ENETC_PM_TDFR(mac)); 342 s->LateCollisions = enetc_port_rd(hw, ENETC_PM_TLCOL(mac)); 343 s->FramesAbortedDueToXSColls = enetc_port_rd(hw, ENETC_PM_TECOL(mac)); 344 s->FramesLostDueToIntMACXmitError = enetc_port_rd(hw, ENETC_PM_TERR(mac)); 345 s->CarrierSenseErrors = enetc_port_rd(hw, ENETC_PM_TCRSE(mac)); 346 s->OctetsReceivedOK = enetc_port_rd(hw, ENETC_PM_REOCT(mac)); 347 s->FramesLostDueToIntMACRcvError = enetc_port_rd(hw, ENETC_PM_RDRNTP(mac)); 348 s->MulticastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TMCA(mac)); 349 s->BroadcastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TBCA(mac)); 350 s->MulticastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RMCA(mac)); 351 s->BroadcastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RBCA(mac)); 352 } 353 354 static void enetc_ctrl_stats(struct enetc_hw *hw, int mac, 355 struct ethtool_eth_ctrl_stats *s) 356 { 357 s->MACControlFramesTransmitted = enetc_port_rd(hw, ENETC_PM_TCNP(mac)); 358 s->MACControlFramesReceived = enetc_port_rd(hw, ENETC_PM_RCNP(mac)); 359 } 360 361 static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = { 362 { 64, 64 }, 363 { 65, 127 }, 364 { 128, 255 }, 365 { 256, 511 }, 366 { 512, 1023 }, 367 { 1024, 1522 }, 368 { 1523, ENETC_MAC_MAXFRM_SIZE }, 369 {}, 370 }; 371 372 static void enetc_rmon_stats(struct enetc_hw *hw, int mac, 373 struct ethtool_rmon_stats *s) 374 { 375 s->undersize_pkts = enetc_port_rd(hw, ENETC_PM_RUND(mac)); 376 s->oversize_pkts = enetc_port_rd(hw, ENETC_PM_ROVR(mac)); 377 s->fragments = enetc_port_rd(hw, ENETC_PM_RFRG(mac)); 378 s->jabbers = enetc_port_rd(hw, ENETC_PM_RJBR(mac)); 379 380 s->hist[0] = enetc_port_rd(hw, ENETC_PM_R64(mac)); 381 s->hist[1] = enetc_port_rd(hw, ENETC_PM_R127(mac)); 382 s->hist[2] = enetc_port_rd(hw, ENETC_PM_R255(mac)); 383 s->hist[3] = enetc_port_rd(hw, ENETC_PM_R511(mac)); 384 s->hist[4] = enetc_port_rd(hw, ENETC_PM_R1023(mac)); 385 s->hist[5] = enetc_port_rd(hw, ENETC_PM_R1522(mac)); 386 s->hist[6] = enetc_port_rd(hw, ENETC_PM_R1523X(mac)); 387 388 s->hist_tx[0] = enetc_port_rd(hw, ENETC_PM_T64(mac)); 389 s->hist_tx[1] = enetc_port_rd(hw, ENETC_PM_T127(mac)); 390 s->hist_tx[2] = enetc_port_rd(hw, ENETC_PM_T255(mac)); 391 s->hist_tx[3] = enetc_port_rd(hw, ENETC_PM_T511(mac)); 392 s->hist_tx[4] = enetc_port_rd(hw, ENETC_PM_T1023(mac)); 393 s->hist_tx[5] = enetc_port_rd(hw, ENETC_PM_T1522(mac)); 394 s->hist_tx[6] = enetc_port_rd(hw, ENETC_PM_T1523X(mac)); 395 } 396 397 static void enetc_get_eth_mac_stats(struct net_device *ndev, 398 struct ethtool_eth_mac_stats *mac_stats) 399 { 400 struct enetc_ndev_priv *priv = netdev_priv(ndev); 401 struct enetc_hw *hw = &priv->si->hw; 402 struct enetc_si *si = priv->si; 403 404 switch (mac_stats->src) { 405 case ETHTOOL_MAC_STATS_SRC_EMAC: 406 enetc_mac_stats(hw, 0, mac_stats); 407 break; 408 case ETHTOOL_MAC_STATS_SRC_PMAC: 409 if (si->hw_features & ENETC_SI_F_QBU) 410 enetc_mac_stats(hw, 1, mac_stats); 411 break; 412 case ETHTOOL_MAC_STATS_SRC_AGGREGATE: 413 ethtool_aggregate_mac_stats(ndev, mac_stats); 414 break; 415 } 416 } 417 418 static void enetc_get_eth_ctrl_stats(struct net_device *ndev, 419 struct ethtool_eth_ctrl_stats *ctrl_stats) 420 { 421 struct enetc_ndev_priv *priv = netdev_priv(ndev); 422 struct enetc_hw *hw = &priv->si->hw; 423 struct enetc_si *si = priv->si; 424 425 switch (ctrl_stats->src) { 426 case ETHTOOL_MAC_STATS_SRC_EMAC: 427 enetc_ctrl_stats(hw, 0, ctrl_stats); 428 break; 429 case ETHTOOL_MAC_STATS_SRC_PMAC: 430 if (si->hw_features & ENETC_SI_F_QBU) 431 enetc_ctrl_stats(hw, 1, ctrl_stats); 432 break; 433 case ETHTOOL_MAC_STATS_SRC_AGGREGATE: 434 ethtool_aggregate_ctrl_stats(ndev, ctrl_stats); 435 break; 436 } 437 } 438 439 static void enetc_get_rmon_stats(struct net_device *ndev, 440 struct ethtool_rmon_stats *rmon_stats, 441 const struct ethtool_rmon_hist_range **ranges) 442 { 443 struct enetc_ndev_priv *priv = netdev_priv(ndev); 444 struct enetc_hw *hw = &priv->si->hw; 445 struct enetc_si *si = priv->si; 446 447 *ranges = enetc_rmon_ranges; 448 449 switch (rmon_stats->src) { 450 case ETHTOOL_MAC_STATS_SRC_EMAC: 451 enetc_rmon_stats(hw, 0, rmon_stats); 452 break; 453 case ETHTOOL_MAC_STATS_SRC_PMAC: 454 if (si->hw_features & ENETC_SI_F_QBU) 455 enetc_rmon_stats(hw, 1, rmon_stats); 456 break; 457 case ETHTOOL_MAC_STATS_SRC_AGGREGATE: 458 ethtool_aggregate_rmon_stats(ndev, rmon_stats); 459 break; 460 } 461 } 462 463 #define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \ 464 RXH_IP_DST) 465 #define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3) 466 static int enetc_get_rsshash(struct ethtool_rxnfc *rxnfc) 467 { 468 static const u32 rsshash[] = { 469 [TCP_V4_FLOW] = ENETC_RSSHASH_L4, 470 [UDP_V4_FLOW] = ENETC_RSSHASH_L4, 471 [SCTP_V4_FLOW] = ENETC_RSSHASH_L4, 472 [AH_ESP_V4_FLOW] = ENETC_RSSHASH_L3, 473 [IPV4_FLOW] = ENETC_RSSHASH_L3, 474 [TCP_V6_FLOW] = ENETC_RSSHASH_L4, 475 [UDP_V6_FLOW] = ENETC_RSSHASH_L4, 476 [SCTP_V6_FLOW] = ENETC_RSSHASH_L4, 477 [AH_ESP_V6_FLOW] = ENETC_RSSHASH_L3, 478 [IPV6_FLOW] = ENETC_RSSHASH_L3, 479 [ETHER_FLOW] = 0, 480 }; 481 482 if (rxnfc->flow_type >= ARRAY_SIZE(rsshash)) 483 return -EINVAL; 484 485 rxnfc->data = rsshash[rxnfc->flow_type]; 486 487 return 0; 488 } 489 490 /* current HW spec does byte reversal on everything including MAC addresses */ 491 static void ether_addr_copy_swap(u8 *dst, const u8 *src) 492 { 493 int i; 494 495 for (i = 0; i < ETH_ALEN; i++) 496 dst[i] = src[ETH_ALEN - i - 1]; 497 } 498 499 static int enetc_set_cls_entry(struct enetc_si *si, 500 struct ethtool_rx_flow_spec *fs, bool en) 501 { 502 struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m; 503 struct ethtool_usrip4_spec *l3ip4_h, *l3ip4_m; 504 struct ethhdr *eth_h, *eth_m; 505 struct enetc_cmd_rfse rfse = { {0} }; 506 507 if (!en) 508 goto done; 509 510 switch (fs->flow_type & 0xff) { 511 case TCP_V4_FLOW: 512 l4ip4_h = &fs->h_u.tcp_ip4_spec; 513 l4ip4_m = &fs->m_u.tcp_ip4_spec; 514 goto l4ip4; 515 case UDP_V4_FLOW: 516 l4ip4_h = &fs->h_u.udp_ip4_spec; 517 l4ip4_m = &fs->m_u.udp_ip4_spec; 518 goto l4ip4; 519 case SCTP_V4_FLOW: 520 l4ip4_h = &fs->h_u.sctp_ip4_spec; 521 l4ip4_m = &fs->m_u.sctp_ip4_spec; 522 l4ip4: 523 rfse.sip_h[0] = l4ip4_h->ip4src; 524 rfse.sip_m[0] = l4ip4_m->ip4src; 525 rfse.dip_h[0] = l4ip4_h->ip4dst; 526 rfse.dip_m[0] = l4ip4_m->ip4dst; 527 rfse.sport_h = ntohs(l4ip4_h->psrc); 528 rfse.sport_m = ntohs(l4ip4_m->psrc); 529 rfse.dport_h = ntohs(l4ip4_h->pdst); 530 rfse.dport_m = ntohs(l4ip4_m->pdst); 531 if (l4ip4_m->tos) 532 netdev_warn(si->ndev, "ToS field is not supported and was ignored\n"); 533 rfse.ethtype_h = ETH_P_IP; /* IPv4 */ 534 rfse.ethtype_m = 0xffff; 535 break; 536 case IP_USER_FLOW: 537 l3ip4_h = &fs->h_u.usr_ip4_spec; 538 l3ip4_m = &fs->m_u.usr_ip4_spec; 539 540 rfse.sip_h[0] = l3ip4_h->ip4src; 541 rfse.sip_m[0] = l3ip4_m->ip4src; 542 rfse.dip_h[0] = l3ip4_h->ip4dst; 543 rfse.dip_m[0] = l3ip4_m->ip4dst; 544 if (l3ip4_m->tos) 545 netdev_warn(si->ndev, "ToS field is not supported and was ignored\n"); 546 rfse.ethtype_h = ETH_P_IP; /* IPv4 */ 547 rfse.ethtype_m = 0xffff; 548 break; 549 case ETHER_FLOW: 550 eth_h = &fs->h_u.ether_spec; 551 eth_m = &fs->m_u.ether_spec; 552 553 ether_addr_copy_swap(rfse.smac_h, eth_h->h_source); 554 ether_addr_copy_swap(rfse.smac_m, eth_m->h_source); 555 ether_addr_copy_swap(rfse.dmac_h, eth_h->h_dest); 556 ether_addr_copy_swap(rfse.dmac_m, eth_m->h_dest); 557 rfse.ethtype_h = ntohs(eth_h->h_proto); 558 rfse.ethtype_m = ntohs(eth_m->h_proto); 559 break; 560 default: 561 return -EOPNOTSUPP; 562 } 563 564 rfse.mode |= ENETC_RFSE_EN; 565 if (fs->ring_cookie != RX_CLS_FLOW_DISC) { 566 rfse.mode |= ENETC_RFSE_MODE_BD; 567 rfse.result = fs->ring_cookie; 568 } 569 done: 570 return enetc_set_fs_entry(si, &rfse, fs->location); 571 } 572 573 static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc, 574 u32 *rule_locs) 575 { 576 struct enetc_ndev_priv *priv = netdev_priv(ndev); 577 int i, j; 578 579 switch (rxnfc->cmd) { 580 case ETHTOOL_GRXRINGS: 581 rxnfc->data = priv->num_rx_rings; 582 break; 583 case ETHTOOL_GRXFH: 584 /* get RSS hash config */ 585 return enetc_get_rsshash(rxnfc); 586 case ETHTOOL_GRXCLSRLCNT: 587 /* total number of entries */ 588 rxnfc->data = priv->si->num_fs_entries; 589 /* number of entries in use */ 590 rxnfc->rule_cnt = 0; 591 for (i = 0; i < priv->si->num_fs_entries; i++) 592 if (priv->cls_rules[i].used) 593 rxnfc->rule_cnt++; 594 break; 595 case ETHTOOL_GRXCLSRULE: 596 if (rxnfc->fs.location >= priv->si->num_fs_entries) 597 return -EINVAL; 598 599 /* get entry x */ 600 rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs; 601 break; 602 case ETHTOOL_GRXCLSRLALL: 603 /* total number of entries */ 604 rxnfc->data = priv->si->num_fs_entries; 605 /* array of indexes of used entries */ 606 j = 0; 607 for (i = 0; i < priv->si->num_fs_entries; i++) { 608 if (!priv->cls_rules[i].used) 609 continue; 610 if (j == rxnfc->rule_cnt) 611 return -EMSGSIZE; 612 rule_locs[j++] = i; 613 } 614 /* number of entries in use */ 615 rxnfc->rule_cnt = j; 616 break; 617 default: 618 return -EOPNOTSUPP; 619 } 620 621 return 0; 622 } 623 624 static int enetc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc) 625 { 626 struct enetc_ndev_priv *priv = netdev_priv(ndev); 627 int err; 628 629 switch (rxnfc->cmd) { 630 case ETHTOOL_SRXCLSRLINS: 631 if (rxnfc->fs.location >= priv->si->num_fs_entries) 632 return -EINVAL; 633 634 if (rxnfc->fs.ring_cookie >= priv->num_rx_rings && 635 rxnfc->fs.ring_cookie != RX_CLS_FLOW_DISC) 636 return -EINVAL; 637 638 err = enetc_set_cls_entry(priv->si, &rxnfc->fs, true); 639 if (err) 640 return err; 641 priv->cls_rules[rxnfc->fs.location].fs = rxnfc->fs; 642 priv->cls_rules[rxnfc->fs.location].used = 1; 643 break; 644 case ETHTOOL_SRXCLSRLDEL: 645 if (rxnfc->fs.location >= priv->si->num_fs_entries) 646 return -EINVAL; 647 648 err = enetc_set_cls_entry(priv->si, &rxnfc->fs, false); 649 if (err) 650 return err; 651 priv->cls_rules[rxnfc->fs.location].used = 0; 652 break; 653 default: 654 return -EOPNOTSUPP; 655 } 656 657 return 0; 658 } 659 660 static u32 enetc_get_rxfh_key_size(struct net_device *ndev) 661 { 662 struct enetc_ndev_priv *priv = netdev_priv(ndev); 663 664 /* return the size of the RX flow hash key. PF only */ 665 return (priv->si->hw.port) ? ENETC_RSSHASH_KEY_SIZE : 0; 666 } 667 668 static u32 enetc_get_rxfh_indir_size(struct net_device *ndev) 669 { 670 struct enetc_ndev_priv *priv = netdev_priv(ndev); 671 672 /* return the size of the RX flow hash indirection table */ 673 return priv->si->num_rss; 674 } 675 676 static int enetc_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key, 677 u8 *hfunc) 678 { 679 struct enetc_ndev_priv *priv = netdev_priv(ndev); 680 struct enetc_hw *hw = &priv->si->hw; 681 int err = 0, i; 682 683 /* return hash function */ 684 if (hfunc) 685 *hfunc = ETH_RSS_HASH_TOP; 686 687 /* return hash key */ 688 if (key && hw->port) 689 for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++) 690 ((u32 *)key)[i] = enetc_port_rd(hw, ENETC_PRSSK(i)); 691 692 /* return RSS table */ 693 if (indir) 694 err = enetc_get_rss_table(priv->si, indir, priv->si->num_rss); 695 696 return err; 697 } 698 699 void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes) 700 { 701 int i; 702 703 for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++) 704 enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]); 705 } 706 EXPORT_SYMBOL_GPL(enetc_set_rss_key); 707 708 static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir, 709 const u8 *key, const u8 hfunc) 710 { 711 struct enetc_ndev_priv *priv = netdev_priv(ndev); 712 struct enetc_hw *hw = &priv->si->hw; 713 int err = 0; 714 715 /* set hash key, if PF */ 716 if (key && hw->port) 717 enetc_set_rss_key(hw, key); 718 719 /* set RSS table */ 720 if (indir) 721 err = enetc_set_rss_table(priv->si, indir, priv->si->num_rss); 722 723 return err; 724 } 725 726 static void enetc_get_ringparam(struct net_device *ndev, 727 struct ethtool_ringparam *ring, 728 struct kernel_ethtool_ringparam *kernel_ring, 729 struct netlink_ext_ack *extack) 730 { 731 struct enetc_ndev_priv *priv = netdev_priv(ndev); 732 733 ring->rx_pending = priv->rx_bd_count; 734 ring->tx_pending = priv->tx_bd_count; 735 736 /* do some h/w sanity checks for BDR length */ 737 if (netif_running(ndev)) { 738 struct enetc_hw *hw = &priv->si->hw; 739 u32 val = enetc_rxbdr_rd(hw, 0, ENETC_RBLENR); 740 741 if (val != priv->rx_bd_count) 742 netif_err(priv, hw, ndev, "RxBDR[RBLENR] = %d!\n", val); 743 744 val = enetc_txbdr_rd(hw, 0, ENETC_TBLENR); 745 746 if (val != priv->tx_bd_count) 747 netif_err(priv, hw, ndev, "TxBDR[TBLENR] = %d!\n", val); 748 } 749 } 750 751 static int enetc_get_coalesce(struct net_device *ndev, 752 struct ethtool_coalesce *ic, 753 struct kernel_ethtool_coalesce *kernel_coal, 754 struct netlink_ext_ack *extack) 755 { 756 struct enetc_ndev_priv *priv = netdev_priv(ndev); 757 struct enetc_int_vector *v = priv->int_vector[0]; 758 759 ic->tx_coalesce_usecs = enetc_cycles_to_usecs(priv->tx_ictt); 760 ic->rx_coalesce_usecs = enetc_cycles_to_usecs(v->rx_ictt); 761 762 ic->tx_max_coalesced_frames = ENETC_TXIC_PKTTHR; 763 ic->rx_max_coalesced_frames = ENETC_RXIC_PKTTHR; 764 765 ic->use_adaptive_rx_coalesce = priv->ic_mode & ENETC_IC_RX_ADAPTIVE; 766 767 return 0; 768 } 769 770 static int enetc_set_coalesce(struct net_device *ndev, 771 struct ethtool_coalesce *ic, 772 struct kernel_ethtool_coalesce *kernel_coal, 773 struct netlink_ext_ack *extack) 774 { 775 struct enetc_ndev_priv *priv = netdev_priv(ndev); 776 u32 rx_ictt, tx_ictt; 777 int i, ic_mode; 778 bool changed; 779 780 tx_ictt = enetc_usecs_to_cycles(ic->tx_coalesce_usecs); 781 rx_ictt = enetc_usecs_to_cycles(ic->rx_coalesce_usecs); 782 783 if (ic->rx_max_coalesced_frames != ENETC_RXIC_PKTTHR) 784 return -EOPNOTSUPP; 785 786 if (ic->tx_max_coalesced_frames != ENETC_TXIC_PKTTHR) 787 return -EOPNOTSUPP; 788 789 ic_mode = ENETC_IC_NONE; 790 if (ic->use_adaptive_rx_coalesce) { 791 ic_mode |= ENETC_IC_RX_ADAPTIVE; 792 rx_ictt = 0x1; 793 } else { 794 ic_mode |= rx_ictt ? ENETC_IC_RX_MANUAL : 0; 795 } 796 797 ic_mode |= tx_ictt ? ENETC_IC_TX_MANUAL : 0; 798 799 /* commit the settings */ 800 changed = (ic_mode != priv->ic_mode) || (priv->tx_ictt != tx_ictt); 801 802 priv->ic_mode = ic_mode; 803 priv->tx_ictt = tx_ictt; 804 805 for (i = 0; i < priv->bdr_int_num; i++) { 806 struct enetc_int_vector *v = priv->int_vector[i]; 807 808 v->rx_ictt = rx_ictt; 809 v->rx_dim_en = !!(ic_mode & ENETC_IC_RX_ADAPTIVE); 810 } 811 812 if (netif_running(ndev) && changed) { 813 /* reconfigure the operation mode of h/w interrupts, 814 * traffic needs to be paused in the process 815 */ 816 enetc_stop(ndev); 817 enetc_start(ndev); 818 } 819 820 return 0; 821 } 822 823 static int enetc_get_ts_info(struct net_device *ndev, 824 struct ethtool_ts_info *info) 825 { 826 int *phc_idx; 827 828 phc_idx = symbol_get(enetc_phc_index); 829 if (phc_idx) { 830 info->phc_index = *phc_idx; 831 symbol_put(enetc_phc_index); 832 } else { 833 info->phc_index = -1; 834 } 835 836 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK 837 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | 838 SOF_TIMESTAMPING_RX_HARDWARE | 839 SOF_TIMESTAMPING_RAW_HARDWARE | 840 SOF_TIMESTAMPING_TX_SOFTWARE | 841 SOF_TIMESTAMPING_RX_SOFTWARE | 842 SOF_TIMESTAMPING_SOFTWARE; 843 844 info->tx_types = (1 << HWTSTAMP_TX_OFF) | 845 (1 << HWTSTAMP_TX_ON) | 846 (1 << HWTSTAMP_TX_ONESTEP_SYNC); 847 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 848 (1 << HWTSTAMP_FILTER_ALL); 849 #else 850 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | 851 SOF_TIMESTAMPING_TX_SOFTWARE | 852 SOF_TIMESTAMPING_SOFTWARE; 853 #endif 854 return 0; 855 } 856 857 static void enetc_get_wol(struct net_device *dev, 858 struct ethtool_wolinfo *wol) 859 { 860 wol->supported = 0; 861 wol->wolopts = 0; 862 863 if (dev->phydev) 864 phy_ethtool_get_wol(dev->phydev, wol); 865 } 866 867 static int enetc_set_wol(struct net_device *dev, 868 struct ethtool_wolinfo *wol) 869 { 870 int ret; 871 872 if (!dev->phydev) 873 return -EOPNOTSUPP; 874 875 ret = phy_ethtool_set_wol(dev->phydev, wol); 876 if (!ret) 877 device_set_wakeup_enable(&dev->dev, wol->wolopts); 878 879 return ret; 880 } 881 882 static void enetc_get_pauseparam(struct net_device *dev, 883 struct ethtool_pauseparam *pause) 884 { 885 struct enetc_ndev_priv *priv = netdev_priv(dev); 886 887 phylink_ethtool_get_pauseparam(priv->phylink, pause); 888 } 889 890 static int enetc_set_pauseparam(struct net_device *dev, 891 struct ethtool_pauseparam *pause) 892 { 893 struct enetc_ndev_priv *priv = netdev_priv(dev); 894 895 return phylink_ethtool_set_pauseparam(priv->phylink, pause); 896 } 897 898 static int enetc_get_link_ksettings(struct net_device *dev, 899 struct ethtool_link_ksettings *cmd) 900 { 901 struct enetc_ndev_priv *priv = netdev_priv(dev); 902 903 if (!priv->phylink) 904 return -EOPNOTSUPP; 905 906 return phylink_ethtool_ksettings_get(priv->phylink, cmd); 907 } 908 909 static int enetc_set_link_ksettings(struct net_device *dev, 910 const struct ethtool_link_ksettings *cmd) 911 { 912 struct enetc_ndev_priv *priv = netdev_priv(dev); 913 914 if (!priv->phylink) 915 return -EOPNOTSUPP; 916 917 return phylink_ethtool_ksettings_set(priv->phylink, cmd); 918 } 919 920 static void enetc_get_mm_stats(struct net_device *ndev, 921 struct ethtool_mm_stats *s) 922 { 923 struct enetc_ndev_priv *priv = netdev_priv(ndev); 924 struct enetc_hw *hw = &priv->si->hw; 925 struct enetc_si *si = priv->si; 926 927 if (!(si->hw_features & ENETC_SI_F_QBU)) 928 return; 929 930 s->MACMergeFrameAssErrorCount = enetc_port_rd(hw, ENETC_MMFAECR); 931 s->MACMergeFrameSmdErrorCount = enetc_port_rd(hw, ENETC_MMFSECR); 932 s->MACMergeFrameAssOkCount = enetc_port_rd(hw, ENETC_MMFAOCR); 933 s->MACMergeFragCountRx = enetc_port_rd(hw, ENETC_MMFCRXR); 934 s->MACMergeFragCountTx = enetc_port_rd(hw, ENETC_MMFCTXR); 935 s->MACMergeHoldCount = enetc_port_rd(hw, ENETC_MMHCR); 936 } 937 938 static int enetc_get_mm(struct net_device *ndev, struct ethtool_mm_state *state) 939 { 940 struct enetc_ndev_priv *priv = netdev_priv(ndev); 941 struct enetc_si *si = priv->si; 942 struct enetc_hw *hw = &si->hw; 943 u32 lafs, rafs, val; 944 945 if (!(si->hw_features & ENETC_SI_F_QBU)) 946 return -EOPNOTSUPP; 947 948 mutex_lock(&priv->mm_lock); 949 950 val = enetc_port_rd(hw, ENETC_PFPMR); 951 state->pmac_enabled = !!(val & ENETC_PFPMR_PMACE); 952 953 val = enetc_port_rd(hw, ENETC_MMCSR); 954 955 switch (ENETC_MMCSR_GET_VSTS(val)) { 956 case 0: 957 state->verify_status = ETHTOOL_MM_VERIFY_STATUS_DISABLED; 958 break; 959 case 2: 960 state->verify_status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING; 961 break; 962 case 3: 963 state->verify_status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED; 964 break; 965 case 4: 966 state->verify_status = ETHTOOL_MM_VERIFY_STATUS_FAILED; 967 break; 968 case 5: 969 default: 970 state->verify_status = ETHTOOL_MM_VERIFY_STATUS_UNKNOWN; 971 break; 972 } 973 974 rafs = ENETC_MMCSR_GET_RAFS(val); 975 state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(rafs); 976 lafs = ENETC_MMCSR_GET_LAFS(val); 977 state->rx_min_frag_size = ethtool_mm_frag_size_add_to_min(lafs); 978 state->tx_enabled = !!(val & ENETC_MMCSR_LPE); /* mirror of MMCSR_ME */ 979 state->tx_active = !!(val & ENETC_MMCSR_LPA); 980 state->verify_enabled = !(val & ENETC_MMCSR_VDIS); 981 state->verify_time = ENETC_MMCSR_GET_VT(val); 982 /* A verifyTime of 128 ms would exceed the 7 bit width 983 * of the ENETC_MMCSR_VT field 984 */ 985 state->max_verify_time = 127; 986 987 mutex_unlock(&priv->mm_lock); 988 989 return 0; 990 } 991 992 static int enetc_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg, 993 struct netlink_ext_ack *extack) 994 { 995 struct enetc_ndev_priv *priv = netdev_priv(ndev); 996 struct enetc_hw *hw = &priv->si->hw; 997 struct enetc_si *si = priv->si; 998 u32 val, add_frag_size; 999 int err; 1000 1001 if (!(si->hw_features & ENETC_SI_F_QBU)) 1002 return -EOPNOTSUPP; 1003 1004 err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size, 1005 &add_frag_size, extack); 1006 if (err) 1007 return err; 1008 1009 mutex_lock(&priv->mm_lock); 1010 1011 val = enetc_port_rd(hw, ENETC_PFPMR); 1012 if (cfg->pmac_enabled) 1013 val |= ENETC_PFPMR_PMACE; 1014 else 1015 val &= ~ENETC_PFPMR_PMACE; 1016 enetc_port_wr(hw, ENETC_PFPMR, val); 1017 1018 val = enetc_port_rd(hw, ENETC_MMCSR); 1019 1020 if (cfg->verify_enabled) 1021 val &= ~ENETC_MMCSR_VDIS; 1022 else 1023 val |= ENETC_MMCSR_VDIS; 1024 1025 if (cfg->tx_enabled) 1026 priv->active_offloads |= ENETC_F_QBU; 1027 else 1028 priv->active_offloads &= ~ENETC_F_QBU; 1029 1030 /* If link is up, enable MAC Merge right away */ 1031 if (!!(priv->active_offloads & ENETC_F_QBU) && 1032 !(val & ENETC_MMCSR_LINK_FAIL)) 1033 val |= ENETC_MMCSR_ME; 1034 1035 val &= ~ENETC_MMCSR_VT_MASK; 1036 val |= ENETC_MMCSR_VT(cfg->verify_time); 1037 1038 val &= ~ENETC_MMCSR_RAFS_MASK; 1039 val |= ENETC_MMCSR_RAFS(add_frag_size); 1040 1041 enetc_port_wr(hw, ENETC_MMCSR, val); 1042 1043 mutex_unlock(&priv->mm_lock); 1044 1045 return 0; 1046 } 1047 1048 /* When the link is lost, the verification state machine goes to the FAILED 1049 * state and doesn't restart on its own after a new link up event. 1050 * According to 802.3 Figure 99-8 - Verify state diagram, the LINK_FAIL bit 1051 * should have been sufficient to re-trigger verification, but for ENETC it 1052 * doesn't. As a workaround, we need to toggle the Merge Enable bit to 1053 * re-trigger verification when link comes up. 1054 */ 1055 void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link) 1056 { 1057 struct enetc_hw *hw = &priv->si->hw; 1058 u32 val; 1059 1060 mutex_lock(&priv->mm_lock); 1061 1062 val = enetc_port_rd(hw, ENETC_MMCSR); 1063 1064 if (link) { 1065 val &= ~ENETC_MMCSR_LINK_FAIL; 1066 if (priv->active_offloads & ENETC_F_QBU) 1067 val |= ENETC_MMCSR_ME; 1068 } else { 1069 val |= ENETC_MMCSR_LINK_FAIL; 1070 if (priv->active_offloads & ENETC_F_QBU) 1071 val &= ~ENETC_MMCSR_ME; 1072 } 1073 1074 enetc_port_wr(hw, ENETC_MMCSR, val); 1075 1076 mutex_unlock(&priv->mm_lock); 1077 } 1078 EXPORT_SYMBOL_GPL(enetc_mm_link_state_update); 1079 1080 static const struct ethtool_ops enetc_pf_ethtool_ops = { 1081 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 1082 ETHTOOL_COALESCE_MAX_FRAMES | 1083 ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 1084 .get_regs_len = enetc_get_reglen, 1085 .get_regs = enetc_get_regs, 1086 .get_sset_count = enetc_get_sset_count, 1087 .get_strings = enetc_get_strings, 1088 .get_ethtool_stats = enetc_get_ethtool_stats, 1089 .get_pause_stats = enetc_get_pause_stats, 1090 .get_rmon_stats = enetc_get_rmon_stats, 1091 .get_eth_ctrl_stats = enetc_get_eth_ctrl_stats, 1092 .get_eth_mac_stats = enetc_get_eth_mac_stats, 1093 .get_rxnfc = enetc_get_rxnfc, 1094 .set_rxnfc = enetc_set_rxnfc, 1095 .get_rxfh_key_size = enetc_get_rxfh_key_size, 1096 .get_rxfh_indir_size = enetc_get_rxfh_indir_size, 1097 .get_rxfh = enetc_get_rxfh, 1098 .set_rxfh = enetc_set_rxfh, 1099 .get_ringparam = enetc_get_ringparam, 1100 .get_coalesce = enetc_get_coalesce, 1101 .set_coalesce = enetc_set_coalesce, 1102 .get_link_ksettings = enetc_get_link_ksettings, 1103 .set_link_ksettings = enetc_set_link_ksettings, 1104 .get_link = ethtool_op_get_link, 1105 .get_ts_info = enetc_get_ts_info, 1106 .get_wol = enetc_get_wol, 1107 .set_wol = enetc_set_wol, 1108 .get_pauseparam = enetc_get_pauseparam, 1109 .set_pauseparam = enetc_set_pauseparam, 1110 .get_mm = enetc_get_mm, 1111 .set_mm = enetc_set_mm, 1112 .get_mm_stats = enetc_get_mm_stats, 1113 }; 1114 1115 static const struct ethtool_ops enetc_vf_ethtool_ops = { 1116 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 1117 ETHTOOL_COALESCE_MAX_FRAMES | 1118 ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 1119 .get_regs_len = enetc_get_reglen, 1120 .get_regs = enetc_get_regs, 1121 .get_sset_count = enetc_get_sset_count, 1122 .get_strings = enetc_get_strings, 1123 .get_ethtool_stats = enetc_get_ethtool_stats, 1124 .get_rxnfc = enetc_get_rxnfc, 1125 .set_rxnfc = enetc_set_rxnfc, 1126 .get_rxfh_indir_size = enetc_get_rxfh_indir_size, 1127 .get_rxfh = enetc_get_rxfh, 1128 .set_rxfh = enetc_set_rxfh, 1129 .get_ringparam = enetc_get_ringparam, 1130 .get_coalesce = enetc_get_coalesce, 1131 .set_coalesce = enetc_set_coalesce, 1132 .get_link = ethtool_op_get_link, 1133 .get_ts_info = enetc_get_ts_info, 1134 }; 1135 1136 void enetc_set_ethtool_ops(struct net_device *ndev) 1137 { 1138 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1139 1140 if (enetc_si_is_pf(priv->si)) 1141 ndev->ethtool_ops = &enetc_pf_ethtool_ops; 1142 else 1143 ndev->ethtool_ops = &enetc_vf_ethtool_ops; 1144 } 1145 EXPORT_SYMBOL_GPL(enetc_set_ethtool_ops); 1146