1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Broadcom BCM7xxx System Port Ethernet MAC driver 4 * 5 * Copyright (C) 2014 Broadcom Corporation 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/init.h> 11 #include <linux/interrupt.h> 12 #include <linux/module.h> 13 #include <linux/kernel.h> 14 #include <linux/netdevice.h> 15 #include <linux/etherdevice.h> 16 #include <linux/platform_device.h> 17 #include <linux/of.h> 18 #include <linux/of_net.h> 19 #include <linux/of_mdio.h> 20 #include <linux/phy.h> 21 #include <linux/phy_fixed.h> 22 #include <net/dsa.h> 23 #include <net/ip.h> 24 #include <net/ipv6.h> 25 26 #include "bcmsysport.h" 27 28 /* I/O accessors register helpers */ 29 #define BCM_SYSPORT_IO_MACRO(name, offset) \ 30 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \ 31 { \ 32 u32 reg = readl_relaxed(priv->base + offset + off); \ 33 return reg; \ 34 } \ 35 static inline void name##_writel(struct bcm_sysport_priv *priv, \ 36 u32 val, u32 off) \ 37 { \ 38 writel_relaxed(val, priv->base + offset + off); \ 39 } \ 40 41 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET); 42 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET); 43 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET); 44 BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET); 45 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET); 46 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET); 47 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET); 48 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET); 49 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET); 50 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET); 51 52 /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact 53 * same layout, except it has been moved by 4 bytes up, *sigh* 54 */ 55 static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off) 56 { 57 if (priv->is_lite && off >= RDMA_STATUS) 58 off += 4; 59 return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off); 60 } 61 62 static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off) 63 { 64 if (priv->is_lite && off >= RDMA_STATUS) 65 off += 4; 66 writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off); 67 } 68 69 static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit) 70 { 71 if (!priv->is_lite) { 72 return BIT(bit); 73 } else { 74 if (bit >= ACB_ALGO) 75 return BIT(bit + 1); 76 else 77 return BIT(bit); 78 } 79 } 80 81 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied 82 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths. 83 */ 84 #define BCM_SYSPORT_INTR_L2(which) \ 85 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \ 86 u32 mask) \ 87 { \ 88 priv->irq##which##_mask &= ~(mask); \ 89 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ 90 } \ 91 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \ 92 u32 mask) \ 93 { \ 94 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \ 95 priv->irq##which##_mask |= (mask); \ 96 } \ 97 98 BCM_SYSPORT_INTR_L2(0) 99 BCM_SYSPORT_INTR_L2(1) 100 101 /* Register accesses to GISB/RBUS registers are expensive (few hundred 102 * nanoseconds), so keep the check for 64-bits explicit here to save 103 * one register write per-packet on 32-bits platforms. 104 */ 105 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv, 106 void __iomem *d, 107 dma_addr_t addr) 108 { 109 #ifdef CONFIG_PHYS_ADDR_T_64BIT 110 writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK, 111 d + DESC_ADDR_HI_STATUS_LEN); 112 #endif 113 writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO); 114 } 115 116 /* Ethtool operations */ 117 static void bcm_sysport_set_rx_csum(struct net_device *dev, 118 netdev_features_t wanted) 119 { 120 struct bcm_sysport_priv *priv = netdev_priv(dev); 121 u32 reg; 122 123 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); 124 reg = rxchk_readl(priv, RXCHK_CONTROL); 125 /* Clear L2 header checks, which would prevent BPDUs 126 * from being received. 127 */ 128 reg &= ~RXCHK_L2_HDR_DIS; 129 if (priv->rx_chk_en) 130 reg |= RXCHK_EN; 131 else 132 reg &= ~RXCHK_EN; 133 134 /* If UniMAC forwards CRC, we need to skip over it to get 135 * a valid CHK bit to be set in the per-packet status word 136 */ 137 if (priv->rx_chk_en && priv->crc_fwd) 138 reg |= RXCHK_SKIP_FCS; 139 else 140 reg &= ~RXCHK_SKIP_FCS; 141 142 /* If Broadcom tags are enabled (e.g: using a switch), make 143 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom 144 * tag after the Ethernet MAC Source Address. 145 */ 146 if (netdev_uses_dsa(dev)) 147 reg |= RXCHK_BRCM_TAG_EN; 148 else 149 reg &= ~RXCHK_BRCM_TAG_EN; 150 151 rxchk_writel(priv, reg, RXCHK_CONTROL); 152 } 153 154 static void bcm_sysport_set_tx_csum(struct net_device *dev, 155 netdev_features_t wanted) 156 { 157 struct bcm_sysport_priv *priv = netdev_priv(dev); 158 u32 reg; 159 160 /* Hardware transmit checksum requires us to enable the Transmit status 161 * block prepended to the packet contents 162 */ 163 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); 164 reg = tdma_readl(priv, TDMA_CONTROL); 165 if (priv->tsb_en) 166 reg |= tdma_control_bit(priv, TSB_EN); 167 else 168 reg &= ~tdma_control_bit(priv, TSB_EN); 169 tdma_writel(priv, reg, TDMA_CONTROL); 170 } 171 172 static int bcm_sysport_set_features(struct net_device *dev, 173 netdev_features_t features) 174 { 175 struct bcm_sysport_priv *priv = netdev_priv(dev); 176 177 /* Read CRC forward */ 178 if (!priv->is_lite) 179 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); 180 else 181 priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) & 182 GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT); 183 184 bcm_sysport_set_rx_csum(dev, features); 185 bcm_sysport_set_tx_csum(dev, features); 186 187 return 0; 188 } 189 190 /* Hardware counters must be kept in sync because the order/offset 191 * is important here (order in structure declaration = order in hardware) 192 */ 193 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { 194 /* general stats */ 195 STAT_NETDEV64(rx_packets), 196 STAT_NETDEV64(tx_packets), 197 STAT_NETDEV64(rx_bytes), 198 STAT_NETDEV64(tx_bytes), 199 STAT_NETDEV(rx_errors), 200 STAT_NETDEV(tx_errors), 201 STAT_NETDEV(rx_dropped), 202 STAT_NETDEV(tx_dropped), 203 STAT_NETDEV(multicast), 204 /* UniMAC RSV counters */ 205 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), 206 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), 207 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), 208 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), 209 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), 210 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), 211 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), 212 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), 213 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), 214 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), 215 STAT_MIB_RX("rx_pkts", mib.rx.pkt), 216 STAT_MIB_RX("rx_bytes", mib.rx.bytes), 217 STAT_MIB_RX("rx_multicast", mib.rx.mca), 218 STAT_MIB_RX("rx_broadcast", mib.rx.bca), 219 STAT_MIB_RX("rx_fcs", mib.rx.fcs), 220 STAT_MIB_RX("rx_control", mib.rx.cf), 221 STAT_MIB_RX("rx_pause", mib.rx.pf), 222 STAT_MIB_RX("rx_unknown", mib.rx.uo), 223 STAT_MIB_RX("rx_align", mib.rx.aln), 224 STAT_MIB_RX("rx_outrange", mib.rx.flr), 225 STAT_MIB_RX("rx_code", mib.rx.cde), 226 STAT_MIB_RX("rx_carrier", mib.rx.fcr), 227 STAT_MIB_RX("rx_oversize", mib.rx.ovr), 228 STAT_MIB_RX("rx_jabber", mib.rx.jbr), 229 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue), 230 STAT_MIB_RX("rx_good_pkts", mib.rx.pok), 231 STAT_MIB_RX("rx_unicast", mib.rx.uc), 232 STAT_MIB_RX("rx_ppp", mib.rx.ppp), 233 STAT_MIB_RX("rx_crc", mib.rx.rcrc), 234 /* UniMAC TSV counters */ 235 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), 236 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), 237 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), 238 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), 239 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), 240 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), 241 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), 242 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), 243 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), 244 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), 245 STAT_MIB_TX("tx_pkts", mib.tx.pkts), 246 STAT_MIB_TX("tx_multicast", mib.tx.mca), 247 STAT_MIB_TX("tx_broadcast", mib.tx.bca), 248 STAT_MIB_TX("tx_pause", mib.tx.pf), 249 STAT_MIB_TX("tx_control", mib.tx.cf), 250 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs), 251 STAT_MIB_TX("tx_oversize", mib.tx.ovr), 252 STAT_MIB_TX("tx_defer", mib.tx.drf), 253 STAT_MIB_TX("tx_excess_defer", mib.tx.edf), 254 STAT_MIB_TX("tx_single_col", mib.tx.scl), 255 STAT_MIB_TX("tx_multi_col", mib.tx.mcl), 256 STAT_MIB_TX("tx_late_col", mib.tx.lcl), 257 STAT_MIB_TX("tx_excess_col", mib.tx.ecl), 258 STAT_MIB_TX("tx_frags", mib.tx.frg), 259 STAT_MIB_TX("tx_total_col", mib.tx.ncl), 260 STAT_MIB_TX("tx_jabber", mib.tx.jbr), 261 STAT_MIB_TX("tx_bytes", mib.tx.bytes), 262 STAT_MIB_TX("tx_good_pkts", mib.tx.pok), 263 STAT_MIB_TX("tx_unicast", mib.tx.uc), 264 /* UniMAC RUNT counters */ 265 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt), 266 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), 267 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), 268 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes), 269 /* RXCHK misc statistics */ 270 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR), 271 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc, 272 RXCHK_OTHER_DISC_CNTR), 273 /* RBUF misc statistics */ 274 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), 275 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), 276 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 277 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed), 278 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed), 279 STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb), 280 STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed), 281 /* Per TX-queue statistics are dynamically appended */ 282 }; 283 284 #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) 285 286 static void bcm_sysport_get_drvinfo(struct net_device *dev, 287 struct ethtool_drvinfo *info) 288 { 289 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 290 strlcpy(info->version, "0.1", sizeof(info->version)); 291 strlcpy(info->bus_info, "platform", sizeof(info->bus_info)); 292 } 293 294 static u32 bcm_sysport_get_msglvl(struct net_device *dev) 295 { 296 struct bcm_sysport_priv *priv = netdev_priv(dev); 297 298 return priv->msg_enable; 299 } 300 301 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable) 302 { 303 struct bcm_sysport_priv *priv = netdev_priv(dev); 304 305 priv->msg_enable = enable; 306 } 307 308 static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type) 309 { 310 switch (type) { 311 case BCM_SYSPORT_STAT_NETDEV: 312 case BCM_SYSPORT_STAT_NETDEV64: 313 case BCM_SYSPORT_STAT_RXCHK: 314 case BCM_SYSPORT_STAT_RBUF: 315 case BCM_SYSPORT_STAT_SOFT: 316 return true; 317 default: 318 return false; 319 } 320 } 321 322 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) 323 { 324 struct bcm_sysport_priv *priv = netdev_priv(dev); 325 const struct bcm_sysport_stats *s; 326 unsigned int i, j; 327 328 switch (string_set) { 329 case ETH_SS_STATS: 330 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 331 s = &bcm_sysport_gstrings_stats[i]; 332 if (priv->is_lite && 333 !bcm_sysport_lite_stat_valid(s->type)) 334 continue; 335 j++; 336 } 337 /* Include per-queue statistics */ 338 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; 339 default: 340 return -EOPNOTSUPP; 341 } 342 } 343 344 static void bcm_sysport_get_strings(struct net_device *dev, 345 u32 stringset, u8 *data) 346 { 347 struct bcm_sysport_priv *priv = netdev_priv(dev); 348 const struct bcm_sysport_stats *s; 349 char buf[128]; 350 int i, j; 351 352 switch (stringset) { 353 case ETH_SS_STATS: 354 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 355 s = &bcm_sysport_gstrings_stats[i]; 356 if (priv->is_lite && 357 !bcm_sysport_lite_stat_valid(s->type)) 358 continue; 359 360 memcpy(data + j * ETH_GSTRING_LEN, s->stat_string, 361 ETH_GSTRING_LEN); 362 j++; 363 } 364 365 for (i = 0; i < dev->num_tx_queues; i++) { 366 snprintf(buf, sizeof(buf), "txq%d_packets", i); 367 memcpy(data + j * ETH_GSTRING_LEN, buf, 368 ETH_GSTRING_LEN); 369 j++; 370 371 snprintf(buf, sizeof(buf), "txq%d_bytes", i); 372 memcpy(data + j * ETH_GSTRING_LEN, buf, 373 ETH_GSTRING_LEN); 374 j++; 375 } 376 break; 377 default: 378 break; 379 } 380 } 381 382 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) 383 { 384 int i, j = 0; 385 386 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 387 const struct bcm_sysport_stats *s; 388 u8 offset = 0; 389 u32 val = 0; 390 char *p; 391 392 s = &bcm_sysport_gstrings_stats[i]; 393 switch (s->type) { 394 case BCM_SYSPORT_STAT_NETDEV: 395 case BCM_SYSPORT_STAT_NETDEV64: 396 case BCM_SYSPORT_STAT_SOFT: 397 continue; 398 case BCM_SYSPORT_STAT_MIB_RX: 399 case BCM_SYSPORT_STAT_MIB_TX: 400 case BCM_SYSPORT_STAT_RUNT: 401 if (priv->is_lite) 402 continue; 403 404 if (s->type != BCM_SYSPORT_STAT_MIB_RX) 405 offset = UMAC_MIB_STAT_OFFSET; 406 val = umac_readl(priv, UMAC_MIB_START + j + offset); 407 break; 408 case BCM_SYSPORT_STAT_RXCHK: 409 val = rxchk_readl(priv, s->reg_offset); 410 if (val == ~0) 411 rxchk_writel(priv, 0, s->reg_offset); 412 break; 413 case BCM_SYSPORT_STAT_RBUF: 414 val = rbuf_readl(priv, s->reg_offset); 415 if (val == ~0) 416 rbuf_writel(priv, 0, s->reg_offset); 417 break; 418 } 419 420 j += s->stat_sizeof; 421 p = (char *)priv + s->stat_offset; 422 *(u32 *)p = val; 423 } 424 425 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); 426 } 427 428 static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv, 429 u64 *tx_bytes, u64 *tx_packets) 430 { 431 struct bcm_sysport_tx_ring *ring; 432 u64 bytes = 0, packets = 0; 433 unsigned int start; 434 unsigned int q; 435 436 for (q = 0; q < priv->netdev->num_tx_queues; q++) { 437 ring = &priv->tx_rings[q]; 438 do { 439 start = u64_stats_fetch_begin_irq(&priv->syncp); 440 bytes = ring->bytes; 441 packets = ring->packets; 442 } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); 443 444 *tx_bytes += bytes; 445 *tx_packets += packets; 446 } 447 } 448 449 static void bcm_sysport_get_stats(struct net_device *dev, 450 struct ethtool_stats *stats, u64 *data) 451 { 452 struct bcm_sysport_priv *priv = netdev_priv(dev); 453 struct bcm_sysport_stats64 *stats64 = &priv->stats64; 454 struct u64_stats_sync *syncp = &priv->syncp; 455 struct bcm_sysport_tx_ring *ring; 456 u64 tx_bytes = 0, tx_packets = 0; 457 unsigned int start; 458 int i, j; 459 460 if (netif_running(dev)) { 461 bcm_sysport_update_mib_counters(priv); 462 bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets); 463 stats64->tx_bytes = tx_bytes; 464 stats64->tx_packets = tx_packets; 465 } 466 467 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 468 const struct bcm_sysport_stats *s; 469 char *p; 470 471 s = &bcm_sysport_gstrings_stats[i]; 472 if (s->type == BCM_SYSPORT_STAT_NETDEV) 473 p = (char *)&dev->stats; 474 else if (s->type == BCM_SYSPORT_STAT_NETDEV64) 475 p = (char *)stats64; 476 else 477 p = (char *)priv; 478 479 if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type)) 480 continue; 481 p += s->stat_offset; 482 483 if (s->stat_sizeof == sizeof(u64) && 484 s->type == BCM_SYSPORT_STAT_NETDEV64) { 485 do { 486 start = u64_stats_fetch_begin_irq(syncp); 487 data[i] = *(u64 *)p; 488 } while (u64_stats_fetch_retry_irq(syncp, start)); 489 } else 490 data[i] = *(u32 *)p; 491 j++; 492 } 493 494 /* For SYSTEMPORT Lite since we have holes in our statistics, j would 495 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it 496 * needs to point to how many total statistics we have minus the 497 * number of per TX queue statistics 498 */ 499 j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) - 500 dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; 501 502 for (i = 0; i < dev->num_tx_queues; i++) { 503 ring = &priv->tx_rings[i]; 504 data[j] = ring->packets; 505 j++; 506 data[j] = ring->bytes; 507 j++; 508 } 509 } 510 511 static void bcm_sysport_get_wol(struct net_device *dev, 512 struct ethtool_wolinfo *wol) 513 { 514 struct bcm_sysport_priv *priv = netdev_priv(dev); 515 516 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; 517 wol->wolopts = priv->wolopts; 518 519 if (!(priv->wolopts & WAKE_MAGICSECURE)) 520 return; 521 522 memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); 523 } 524 525 static int bcm_sysport_set_wol(struct net_device *dev, 526 struct ethtool_wolinfo *wol) 527 { 528 struct bcm_sysport_priv *priv = netdev_priv(dev); 529 struct device *kdev = &priv->pdev->dev; 530 u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; 531 532 if (!device_can_wakeup(kdev)) 533 return -ENOTSUPP; 534 535 if (wol->wolopts & ~supported) 536 return -EINVAL; 537 538 if (wol->wolopts & WAKE_MAGICSECURE) 539 memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); 540 541 /* Flag the device and relevant IRQ as wakeup capable */ 542 if (wol->wolopts) { 543 device_set_wakeup_enable(kdev, 1); 544 if (priv->wol_irq_disabled) 545 enable_irq_wake(priv->wol_irq); 546 priv->wol_irq_disabled = 0; 547 } else { 548 device_set_wakeup_enable(kdev, 0); 549 /* Avoid unbalanced disable_irq_wake calls */ 550 if (!priv->wol_irq_disabled) 551 disable_irq_wake(priv->wol_irq); 552 priv->wol_irq_disabled = 1; 553 } 554 555 priv->wolopts = wol->wolopts; 556 557 return 0; 558 } 559 560 static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv, 561 u32 usecs, u32 pkts) 562 { 563 u32 reg; 564 565 reg = rdma_readl(priv, RDMA_MBDONE_INTR); 566 reg &= ~(RDMA_INTR_THRESH_MASK | 567 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT); 568 reg |= pkts; 569 reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT; 570 rdma_writel(priv, reg, RDMA_MBDONE_INTR); 571 } 572 573 static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring, 574 struct ethtool_coalesce *ec) 575 { 576 struct bcm_sysport_priv *priv = ring->priv; 577 u32 reg; 578 579 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index)); 580 reg &= ~(RING_INTR_THRESH_MASK | 581 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT); 582 reg |= ec->tx_max_coalesced_frames; 583 reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) << 584 RING_TIMEOUT_SHIFT; 585 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index)); 586 } 587 588 static int bcm_sysport_get_coalesce(struct net_device *dev, 589 struct ethtool_coalesce *ec) 590 { 591 struct bcm_sysport_priv *priv = netdev_priv(dev); 592 u32 reg; 593 594 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0)); 595 596 ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000; 597 ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK; 598 599 reg = rdma_readl(priv, RDMA_MBDONE_INTR); 600 601 ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000; 602 ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK; 603 ec->use_adaptive_rx_coalesce = priv->dim.use_dim; 604 605 return 0; 606 } 607 608 static int bcm_sysport_set_coalesce(struct net_device *dev, 609 struct ethtool_coalesce *ec) 610 { 611 struct bcm_sysport_priv *priv = netdev_priv(dev); 612 struct dim_cq_moder moder; 613 u32 usecs, pkts; 614 unsigned int i; 615 616 /* Base system clock is 125Mhz, DMA timeout is this reference clock 617 * divided by 1024, which yield roughly 8.192 us, our maximum value has 618 * to fit in the RING_TIMEOUT_MASK (16 bits). 619 */ 620 if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK || 621 ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 || 622 ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK || 623 ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1) 624 return -EINVAL; 625 626 if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) || 627 (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) || 628 ec->use_adaptive_tx_coalesce) 629 return -EINVAL; 630 631 for (i = 0; i < dev->num_tx_queues; i++) 632 bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec); 633 634 priv->rx_coalesce_usecs = ec->rx_coalesce_usecs; 635 priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; 636 usecs = priv->rx_coalesce_usecs; 637 pkts = priv->rx_max_coalesced_frames; 638 639 if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) { 640 moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode); 641 usecs = moder.usec; 642 pkts = moder.pkts; 643 } 644 645 priv->dim.use_dim = ec->use_adaptive_rx_coalesce; 646 647 /* Apply desired coalescing parameters */ 648 bcm_sysport_set_rx_coalesce(priv, usecs, pkts); 649 650 return 0; 651 } 652 653 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) 654 { 655 dev_consume_skb_any(cb->skb); 656 cb->skb = NULL; 657 dma_unmap_addr_set(cb, dma_addr, 0); 658 } 659 660 static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, 661 struct bcm_sysport_cb *cb) 662 { 663 struct device *kdev = &priv->pdev->dev; 664 struct net_device *ndev = priv->netdev; 665 struct sk_buff *skb, *rx_skb; 666 dma_addr_t mapping; 667 668 /* Allocate a new SKB for a new packet */ 669 skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); 670 if (!skb) { 671 priv->mib.alloc_rx_buff_failed++; 672 netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); 673 return NULL; 674 } 675 676 mapping = dma_map_single(kdev, skb->data, 677 RX_BUF_LENGTH, DMA_FROM_DEVICE); 678 if (dma_mapping_error(kdev, mapping)) { 679 priv->mib.rx_dma_failed++; 680 dev_kfree_skb_any(skb); 681 netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); 682 return NULL; 683 } 684 685 /* Grab the current SKB on the ring */ 686 rx_skb = cb->skb; 687 if (likely(rx_skb)) 688 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 689 RX_BUF_LENGTH, DMA_FROM_DEVICE); 690 691 /* Put the new SKB on the ring */ 692 cb->skb = skb; 693 dma_unmap_addr_set(cb, dma_addr, mapping); 694 dma_desc_set_addr(priv, cb->bd_addr, mapping); 695 696 netif_dbg(priv, rx_status, ndev, "RX refill\n"); 697 698 /* Return the current SKB to the caller */ 699 return rx_skb; 700 } 701 702 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) 703 { 704 struct bcm_sysport_cb *cb; 705 struct sk_buff *skb; 706 unsigned int i; 707 708 for (i = 0; i < priv->num_rx_bds; i++) { 709 cb = &priv->rx_cbs[i]; 710 skb = bcm_sysport_rx_refill(priv, cb); 711 if (skb) 712 dev_kfree_skb(skb); 713 if (!cb->skb) 714 return -ENOMEM; 715 } 716 717 return 0; 718 } 719 720 /* Poll the hardware for up to budget packets to process */ 721 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, 722 unsigned int budget) 723 { 724 struct bcm_sysport_stats64 *stats64 = &priv->stats64; 725 struct net_device *ndev = priv->netdev; 726 unsigned int processed = 0, to_process; 727 unsigned int processed_bytes = 0; 728 struct bcm_sysport_cb *cb; 729 struct sk_buff *skb; 730 unsigned int p_index; 731 u16 len, status; 732 struct bcm_rsb *rsb; 733 734 /* Clear status before servicing to reduce spurious interrupts */ 735 intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR); 736 737 /* Determine how much we should process since last call, SYSTEMPORT Lite 738 * groups the producer and consumer indexes into the same 32-bit 739 * which we access using RDMA_CONS_INDEX 740 */ 741 if (!priv->is_lite) 742 p_index = rdma_readl(priv, RDMA_PROD_INDEX); 743 else 744 p_index = rdma_readl(priv, RDMA_CONS_INDEX); 745 p_index &= RDMA_PROD_INDEX_MASK; 746 747 to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK; 748 749 netif_dbg(priv, rx_status, ndev, 750 "p_index=%d rx_c_index=%d to_process=%d\n", 751 p_index, priv->rx_c_index, to_process); 752 753 while ((processed < to_process) && (processed < budget)) { 754 cb = &priv->rx_cbs[priv->rx_read_ptr]; 755 skb = bcm_sysport_rx_refill(priv, cb); 756 757 758 /* We do not have a backing SKB, so we do not a corresponding 759 * DMA mapping for this incoming packet since 760 * bcm_sysport_rx_refill always either has both skb and mapping 761 * or none. 762 */ 763 if (unlikely(!skb)) { 764 netif_err(priv, rx_err, ndev, "out of memory!\n"); 765 ndev->stats.rx_dropped++; 766 ndev->stats.rx_errors++; 767 goto next; 768 } 769 770 /* Extract the Receive Status Block prepended */ 771 rsb = (struct bcm_rsb *)skb->data; 772 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; 773 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & 774 DESC_STATUS_MASK; 775 776 netif_dbg(priv, rx_status, ndev, 777 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", 778 p_index, priv->rx_c_index, priv->rx_read_ptr, 779 len, status); 780 781 if (unlikely(len > RX_BUF_LENGTH)) { 782 netif_err(priv, rx_status, ndev, "oversized packet\n"); 783 ndev->stats.rx_length_errors++; 784 ndev->stats.rx_errors++; 785 dev_kfree_skb_any(skb); 786 goto next; 787 } 788 789 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { 790 netif_err(priv, rx_status, ndev, "fragmented packet!\n"); 791 ndev->stats.rx_dropped++; 792 ndev->stats.rx_errors++; 793 dev_kfree_skb_any(skb); 794 goto next; 795 } 796 797 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) { 798 netif_err(priv, rx_err, ndev, "error packet\n"); 799 if (status & RX_STATUS_OVFLOW) 800 ndev->stats.rx_over_errors++; 801 ndev->stats.rx_dropped++; 802 ndev->stats.rx_errors++; 803 dev_kfree_skb_any(skb); 804 goto next; 805 } 806 807 skb_put(skb, len); 808 809 /* Hardware validated our checksum */ 810 if (likely(status & DESC_L4_CSUM)) 811 skb->ip_summed = CHECKSUM_UNNECESSARY; 812 813 /* Hardware pre-pends packets with 2bytes before Ethernet 814 * header plus we have the Receive Status Block, strip off all 815 * of this from the SKB. 816 */ 817 skb_pull(skb, sizeof(*rsb) + 2); 818 len -= (sizeof(*rsb) + 2); 819 processed_bytes += len; 820 821 /* UniMAC may forward CRC */ 822 if (priv->crc_fwd) { 823 skb_trim(skb, len - ETH_FCS_LEN); 824 len -= ETH_FCS_LEN; 825 } 826 827 skb->protocol = eth_type_trans(skb, ndev); 828 ndev->stats.rx_packets++; 829 ndev->stats.rx_bytes += len; 830 u64_stats_update_begin(&priv->syncp); 831 stats64->rx_packets++; 832 stats64->rx_bytes += len; 833 u64_stats_update_end(&priv->syncp); 834 835 napi_gro_receive(&priv->napi, skb); 836 next: 837 processed++; 838 priv->rx_read_ptr++; 839 840 if (priv->rx_read_ptr == priv->num_rx_bds) 841 priv->rx_read_ptr = 0; 842 } 843 844 priv->dim.packets = processed; 845 priv->dim.bytes = processed_bytes; 846 847 return processed; 848 } 849 850 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, 851 struct bcm_sysport_cb *cb, 852 unsigned int *bytes_compl, 853 unsigned int *pkts_compl) 854 { 855 struct bcm_sysport_priv *priv = ring->priv; 856 struct device *kdev = &priv->pdev->dev; 857 858 if (cb->skb) { 859 *bytes_compl += cb->skb->len; 860 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 861 dma_unmap_len(cb, dma_len), 862 DMA_TO_DEVICE); 863 (*pkts_compl)++; 864 bcm_sysport_free_cb(cb); 865 /* SKB fragment */ 866 } else if (dma_unmap_addr(cb, dma_addr)) { 867 *bytes_compl += dma_unmap_len(cb, dma_len); 868 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr), 869 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); 870 dma_unmap_addr_set(cb, dma_addr, 0); 871 } 872 } 873 874 /* Reclaim queued SKBs for transmission completion, lockless version */ 875 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 876 struct bcm_sysport_tx_ring *ring) 877 { 878 unsigned int pkts_compl = 0, bytes_compl = 0; 879 struct net_device *ndev = priv->netdev; 880 unsigned int txbds_processed = 0; 881 struct bcm_sysport_cb *cb; 882 unsigned int txbds_ready; 883 unsigned int c_index; 884 u32 hw_ind; 885 886 /* Clear status before servicing to reduce spurious interrupts */ 887 if (!ring->priv->is_lite) 888 intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR); 889 else 890 intrl2_0_writel(ring->priv, BIT(ring->index + 891 INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR); 892 893 /* Compute how many descriptors have been processed since last call */ 894 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); 895 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; 896 txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK; 897 898 netif_dbg(priv, tx_done, ndev, 899 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", 900 ring->index, ring->c_index, c_index, txbds_ready); 901 902 while (txbds_processed < txbds_ready) { 903 cb = &ring->cbs[ring->clean_index]; 904 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl); 905 906 ring->desc_count++; 907 txbds_processed++; 908 909 if (likely(ring->clean_index < ring->size - 1)) 910 ring->clean_index++; 911 else 912 ring->clean_index = 0; 913 } 914 915 u64_stats_update_begin(&priv->syncp); 916 ring->packets += pkts_compl; 917 ring->bytes += bytes_compl; 918 u64_stats_update_end(&priv->syncp); 919 920 ring->c_index = c_index; 921 922 netif_dbg(priv, tx_done, ndev, 923 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", 924 ring->index, ring->c_index, pkts_compl, bytes_compl); 925 926 return pkts_compl; 927 } 928 929 /* Locked version of the per-ring TX reclaim routine */ 930 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 931 struct bcm_sysport_tx_ring *ring) 932 { 933 struct netdev_queue *txq; 934 unsigned int released; 935 unsigned long flags; 936 937 txq = netdev_get_tx_queue(priv->netdev, ring->index); 938 939 spin_lock_irqsave(&ring->lock, flags); 940 released = __bcm_sysport_tx_reclaim(priv, ring); 941 if (released) 942 netif_tx_wake_queue(txq); 943 944 spin_unlock_irqrestore(&ring->lock, flags); 945 946 return released; 947 } 948 949 /* Locked version of the per-ring TX reclaim, but does not wake the queue */ 950 static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv, 951 struct bcm_sysport_tx_ring *ring) 952 { 953 unsigned long flags; 954 955 spin_lock_irqsave(&ring->lock, flags); 956 __bcm_sysport_tx_reclaim(priv, ring); 957 spin_unlock_irqrestore(&ring->lock, flags); 958 } 959 960 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) 961 { 962 struct bcm_sysport_tx_ring *ring = 963 container_of(napi, struct bcm_sysport_tx_ring, napi); 964 unsigned int work_done = 0; 965 966 work_done = bcm_sysport_tx_reclaim(ring->priv, ring); 967 968 if (work_done == 0) { 969 napi_complete(napi); 970 /* re-enable TX interrupt */ 971 if (!ring->priv->is_lite) 972 intrl2_1_mask_clear(ring->priv, BIT(ring->index)); 973 else 974 intrl2_0_mask_clear(ring->priv, BIT(ring->index + 975 INTRL2_0_TDMA_MBDONE_SHIFT)); 976 977 return 0; 978 } 979 980 return budget; 981 } 982 983 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) 984 { 985 unsigned int q; 986 987 for (q = 0; q < priv->netdev->num_tx_queues; q++) 988 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]); 989 } 990 991 static int bcm_sysport_poll(struct napi_struct *napi, int budget) 992 { 993 struct bcm_sysport_priv *priv = 994 container_of(napi, struct bcm_sysport_priv, napi); 995 struct dim_sample dim_sample; 996 unsigned int work_done = 0; 997 998 work_done = bcm_sysport_desc_rx(priv, budget); 999 1000 priv->rx_c_index += work_done; 1001 priv->rx_c_index &= RDMA_CONS_INDEX_MASK; 1002 1003 /* SYSTEMPORT Lite groups the producer/consumer index, producer is 1004 * maintained by HW, but writes to it will be ignore while RDMA 1005 * is active 1006 */ 1007 if (!priv->is_lite) 1008 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); 1009 else 1010 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX); 1011 1012 if (work_done < budget) { 1013 napi_complete_done(napi, work_done); 1014 /* re-enable RX interrupts */ 1015 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE); 1016 } 1017 1018 if (priv->dim.use_dim) { 1019 dim_update_sample(priv->dim.event_ctr, priv->dim.packets, 1020 priv->dim.bytes, &dim_sample); 1021 net_dim(&priv->dim.dim, dim_sample); 1022 } 1023 1024 return work_done; 1025 } 1026 1027 static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable) 1028 { 1029 u32 reg, bit; 1030 1031 reg = umac_readl(priv, UMAC_MPD_CTRL); 1032 if (enable) 1033 reg |= MPD_EN; 1034 else 1035 reg &= ~MPD_EN; 1036 umac_writel(priv, reg, UMAC_MPD_CTRL); 1037 1038 if (priv->is_lite) 1039 bit = RBUF_ACPI_EN_LITE; 1040 else 1041 bit = RBUF_ACPI_EN; 1042 1043 reg = rbuf_readl(priv, RBUF_CONTROL); 1044 if (enable) 1045 reg |= bit; 1046 else 1047 reg &= ~bit; 1048 rbuf_writel(priv, reg, RBUF_CONTROL); 1049 } 1050 1051 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) 1052 { 1053 unsigned int index; 1054 u32 reg; 1055 1056 /* Disable RXCHK, active filters and Broadcom tag matching */ 1057 reg = rxchk_readl(priv, RXCHK_CONTROL); 1058 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK << 1059 RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN); 1060 rxchk_writel(priv, reg, RXCHK_CONTROL); 1061 1062 /* Make sure we restore correct CID index in case HW lost 1063 * its context during deep idle state 1064 */ 1065 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { 1066 rxchk_writel(priv, priv->filters_loc[index] << 1067 RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index)); 1068 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index)); 1069 } 1070 1071 /* Clear the MagicPacket detection logic */ 1072 mpd_enable_set(priv, false); 1073 1074 reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS); 1075 if (reg & INTRL2_0_MPD) 1076 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n"); 1077 1078 if (reg & INTRL2_0_BRCM_MATCH_TAG) { 1079 reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) & 1080 RXCHK_BRCM_TAG_MATCH_MASK; 1081 netdev_info(priv->netdev, 1082 "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg); 1083 } 1084 1085 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n"); 1086 } 1087 1088 static void bcm_sysport_dim_work(struct work_struct *work) 1089 { 1090 struct dim *dim = container_of(work, struct dim, work); 1091 struct bcm_sysport_net_dim *ndim = 1092 container_of(dim, struct bcm_sysport_net_dim, dim); 1093 struct bcm_sysport_priv *priv = 1094 container_of(ndim, struct bcm_sysport_priv, dim); 1095 struct dim_cq_moder cur_profile = net_dim_get_rx_moderation(dim->mode, 1096 dim->profile_ix); 1097 1098 bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts); 1099 dim->state = DIM_START_MEASURE; 1100 } 1101 1102 /* RX and misc interrupt routine */ 1103 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) 1104 { 1105 struct net_device *dev = dev_id; 1106 struct bcm_sysport_priv *priv = netdev_priv(dev); 1107 struct bcm_sysport_tx_ring *txr; 1108 unsigned int ring, ring_bit; 1109 1110 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & 1111 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); 1112 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 1113 1114 if (unlikely(priv->irq0_stat == 0)) { 1115 netdev_warn(priv->netdev, "spurious RX interrupt\n"); 1116 return IRQ_NONE; 1117 } 1118 1119 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) { 1120 priv->dim.event_ctr++; 1121 if (likely(napi_schedule_prep(&priv->napi))) { 1122 /* disable RX interrupts */ 1123 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE); 1124 __napi_schedule_irqoff(&priv->napi); 1125 } 1126 } 1127 1128 /* TX ring is full, perform a full reclaim since we do not know 1129 * which one would trigger this interrupt 1130 */ 1131 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL) 1132 bcm_sysport_tx_reclaim_all(priv); 1133 1134 if (!priv->is_lite) 1135 goto out; 1136 1137 for (ring = 0; ring < dev->num_tx_queues; ring++) { 1138 ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT); 1139 if (!(priv->irq0_stat & ring_bit)) 1140 continue; 1141 1142 txr = &priv->tx_rings[ring]; 1143 1144 if (likely(napi_schedule_prep(&txr->napi))) { 1145 intrl2_0_mask_set(priv, ring_bit); 1146 __napi_schedule(&txr->napi); 1147 } 1148 } 1149 out: 1150 return IRQ_HANDLED; 1151 } 1152 1153 /* TX interrupt service routine */ 1154 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id) 1155 { 1156 struct net_device *dev = dev_id; 1157 struct bcm_sysport_priv *priv = netdev_priv(dev); 1158 struct bcm_sysport_tx_ring *txr; 1159 unsigned int ring; 1160 1161 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & 1162 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); 1163 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1164 1165 if (unlikely(priv->irq1_stat == 0)) { 1166 netdev_warn(priv->netdev, "spurious TX interrupt\n"); 1167 return IRQ_NONE; 1168 } 1169 1170 for (ring = 0; ring < dev->num_tx_queues; ring++) { 1171 if (!(priv->irq1_stat & BIT(ring))) 1172 continue; 1173 1174 txr = &priv->tx_rings[ring]; 1175 1176 if (likely(napi_schedule_prep(&txr->napi))) { 1177 intrl2_1_mask_set(priv, BIT(ring)); 1178 __napi_schedule_irqoff(&txr->napi); 1179 } 1180 } 1181 1182 return IRQ_HANDLED; 1183 } 1184 1185 static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id) 1186 { 1187 struct bcm_sysport_priv *priv = dev_id; 1188 1189 pm_wakeup_event(&priv->pdev->dev, 0); 1190 1191 return IRQ_HANDLED; 1192 } 1193 1194 #ifdef CONFIG_NET_POLL_CONTROLLER 1195 static void bcm_sysport_poll_controller(struct net_device *dev) 1196 { 1197 struct bcm_sysport_priv *priv = netdev_priv(dev); 1198 1199 disable_irq(priv->irq0); 1200 bcm_sysport_rx_isr(priv->irq0, priv); 1201 enable_irq(priv->irq0); 1202 1203 if (!priv->is_lite) { 1204 disable_irq(priv->irq1); 1205 bcm_sysport_tx_isr(priv->irq1, priv); 1206 enable_irq(priv->irq1); 1207 } 1208 } 1209 #endif 1210 1211 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, 1212 struct net_device *dev) 1213 { 1214 struct bcm_sysport_priv *priv = netdev_priv(dev); 1215 struct sk_buff *nskb; 1216 struct bcm_tsb *tsb; 1217 u32 csum_info; 1218 u8 ip_proto; 1219 u16 csum_start; 1220 __be16 ip_ver; 1221 1222 /* Re-allocate SKB if needed */ 1223 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) { 1224 nskb = skb_realloc_headroom(skb, sizeof(*tsb)); 1225 if (!nskb) { 1226 dev_kfree_skb_any(skb); 1227 priv->mib.tx_realloc_tsb_failed++; 1228 dev->stats.tx_errors++; 1229 dev->stats.tx_dropped++; 1230 return NULL; 1231 } 1232 dev_consume_skb_any(skb); 1233 skb = nskb; 1234 priv->mib.tx_realloc_tsb++; 1235 } 1236 1237 tsb = skb_push(skb, sizeof(*tsb)); 1238 /* Zero-out TSB by default */ 1239 memset(tsb, 0, sizeof(*tsb)); 1240 1241 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1242 ip_ver = skb->protocol; 1243 switch (ip_ver) { 1244 case htons(ETH_P_IP): 1245 ip_proto = ip_hdr(skb)->protocol; 1246 break; 1247 case htons(ETH_P_IPV6): 1248 ip_proto = ipv6_hdr(skb)->nexthdr; 1249 break; 1250 default: 1251 return skb; 1252 } 1253 1254 /* Get the checksum offset and the L4 (transport) offset */ 1255 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb); 1256 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK; 1257 csum_info |= (csum_start << L4_PTR_SHIFT); 1258 1259 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { 1260 csum_info |= L4_LENGTH_VALID; 1261 if (ip_proto == IPPROTO_UDP && 1262 ip_ver == htons(ETH_P_IP)) 1263 csum_info |= L4_UDP; 1264 } else { 1265 csum_info = 0; 1266 } 1267 1268 tsb->l4_ptr_dest_map = csum_info; 1269 } 1270 1271 return skb; 1272 } 1273 1274 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, 1275 struct net_device *dev) 1276 { 1277 struct bcm_sysport_priv *priv = netdev_priv(dev); 1278 struct device *kdev = &priv->pdev->dev; 1279 struct bcm_sysport_tx_ring *ring; 1280 struct bcm_sysport_cb *cb; 1281 struct netdev_queue *txq; 1282 u32 len_status, addr_lo; 1283 unsigned int skb_len; 1284 unsigned long flags; 1285 dma_addr_t mapping; 1286 u16 queue; 1287 int ret; 1288 1289 queue = skb_get_queue_mapping(skb); 1290 txq = netdev_get_tx_queue(dev, queue); 1291 ring = &priv->tx_rings[queue]; 1292 1293 /* lock against tx reclaim in BH context and TX ring full interrupt */ 1294 spin_lock_irqsave(&ring->lock, flags); 1295 if (unlikely(ring->desc_count == 0)) { 1296 netif_tx_stop_queue(txq); 1297 netdev_err(dev, "queue %d awake and ring full!\n", queue); 1298 ret = NETDEV_TX_BUSY; 1299 goto out; 1300 } 1301 1302 /* Insert TSB and checksum infos */ 1303 if (priv->tsb_en) { 1304 skb = bcm_sysport_insert_tsb(skb, dev); 1305 if (!skb) { 1306 ret = NETDEV_TX_OK; 1307 goto out; 1308 } 1309 } 1310 1311 skb_len = skb->len; 1312 1313 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); 1314 if (dma_mapping_error(kdev, mapping)) { 1315 priv->mib.tx_dma_failed++; 1316 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n", 1317 skb->data, skb_len); 1318 ret = NETDEV_TX_OK; 1319 goto out; 1320 } 1321 1322 /* Remember the SKB for future freeing */ 1323 cb = &ring->cbs[ring->curr_desc]; 1324 cb->skb = skb; 1325 dma_unmap_addr_set(cb, dma_addr, mapping); 1326 dma_unmap_len_set(cb, dma_len, skb_len); 1327 1328 addr_lo = lower_32_bits(mapping); 1329 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK; 1330 len_status |= (skb_len << DESC_LEN_SHIFT); 1331 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) << 1332 DESC_STATUS_SHIFT; 1333 if (skb->ip_summed == CHECKSUM_PARTIAL) 1334 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT); 1335 1336 ring->curr_desc++; 1337 if (ring->curr_desc == ring->size) 1338 ring->curr_desc = 0; 1339 ring->desc_count--; 1340 1341 /* Ports are latched, so write upper address first */ 1342 tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index)); 1343 tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index)); 1344 1345 /* Check ring space and update SW control flow */ 1346 if (ring->desc_count == 0) 1347 netif_tx_stop_queue(txq); 1348 1349 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n", 1350 ring->index, ring->desc_count, ring->curr_desc); 1351 1352 ret = NETDEV_TX_OK; 1353 out: 1354 spin_unlock_irqrestore(&ring->lock, flags); 1355 return ret; 1356 } 1357 1358 static void bcm_sysport_tx_timeout(struct net_device *dev) 1359 { 1360 netdev_warn(dev, "transmit timeout!\n"); 1361 1362 netif_trans_update(dev); 1363 dev->stats.tx_errors++; 1364 1365 netif_tx_wake_all_queues(dev); 1366 } 1367 1368 /* phylib adjust link callback */ 1369 static void bcm_sysport_adj_link(struct net_device *dev) 1370 { 1371 struct bcm_sysport_priv *priv = netdev_priv(dev); 1372 struct phy_device *phydev = dev->phydev; 1373 unsigned int changed = 0; 1374 u32 cmd_bits = 0, reg; 1375 1376 if (priv->old_link != phydev->link) { 1377 changed = 1; 1378 priv->old_link = phydev->link; 1379 } 1380 1381 if (priv->old_duplex != phydev->duplex) { 1382 changed = 1; 1383 priv->old_duplex = phydev->duplex; 1384 } 1385 1386 if (priv->is_lite) 1387 goto out; 1388 1389 switch (phydev->speed) { 1390 case SPEED_2500: 1391 cmd_bits = CMD_SPEED_2500; 1392 break; 1393 case SPEED_1000: 1394 cmd_bits = CMD_SPEED_1000; 1395 break; 1396 case SPEED_100: 1397 cmd_bits = CMD_SPEED_100; 1398 break; 1399 case SPEED_10: 1400 cmd_bits = CMD_SPEED_10; 1401 break; 1402 default: 1403 break; 1404 } 1405 cmd_bits <<= CMD_SPEED_SHIFT; 1406 1407 if (phydev->duplex == DUPLEX_HALF) 1408 cmd_bits |= CMD_HD_EN; 1409 1410 if (priv->old_pause != phydev->pause) { 1411 changed = 1; 1412 priv->old_pause = phydev->pause; 1413 } 1414 1415 if (!phydev->pause) 1416 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; 1417 1418 if (!changed) 1419 return; 1420 1421 if (phydev->link) { 1422 reg = umac_readl(priv, UMAC_CMD); 1423 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | 1424 CMD_HD_EN | CMD_RX_PAUSE_IGNORE | 1425 CMD_TX_PAUSE_IGNORE); 1426 reg |= cmd_bits; 1427 umac_writel(priv, reg, UMAC_CMD); 1428 } 1429 out: 1430 if (changed) 1431 phy_print_status(phydev); 1432 } 1433 1434 static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv, 1435 void (*cb)(struct work_struct *work)) 1436 { 1437 struct bcm_sysport_net_dim *dim = &priv->dim; 1438 1439 INIT_WORK(&dim->dim.work, cb); 1440 dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 1441 dim->event_ctr = 0; 1442 dim->packets = 0; 1443 dim->bytes = 0; 1444 } 1445 1446 static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv) 1447 { 1448 struct bcm_sysport_net_dim *dim = &priv->dim; 1449 struct dim_cq_moder moder; 1450 u32 usecs, pkts; 1451 1452 usecs = priv->rx_coalesce_usecs; 1453 pkts = priv->rx_max_coalesced_frames; 1454 1455 /* If DIM was enabled, re-apply default parameters */ 1456 if (dim->use_dim) { 1457 moder = net_dim_get_def_rx_moderation(dim->dim.mode); 1458 usecs = moder.usec; 1459 pkts = moder.pkts; 1460 } 1461 1462 bcm_sysport_set_rx_coalesce(priv, usecs, pkts); 1463 } 1464 1465 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, 1466 unsigned int index) 1467 { 1468 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; 1469 size_t size; 1470 u32 reg; 1471 1472 /* Simple descriptors partitioning for now */ 1473 size = 256; 1474 1475 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL); 1476 if (!ring->cbs) { 1477 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1478 return -ENOMEM; 1479 } 1480 1481 /* Initialize SW view of the ring */ 1482 spin_lock_init(&ring->lock); 1483 ring->priv = priv; 1484 netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); 1485 ring->index = index; 1486 ring->size = size; 1487 ring->clean_index = 0; 1488 ring->alloc_size = ring->size; 1489 ring->desc_count = ring->size; 1490 ring->curr_desc = 0; 1491 1492 /* Initialize HW ring */ 1493 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index)); 1494 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index)); 1495 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index)); 1496 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index)); 1497 1498 /* Configure QID and port mapping */ 1499 reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index)); 1500 reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT); 1501 if (ring->inspect) { 1502 reg |= ring->switch_queue & RING_QID_MASK; 1503 reg |= ring->switch_port << RING_PORT_ID_SHIFT; 1504 } else { 1505 reg |= RING_IGNORE_STATUS; 1506 } 1507 tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index)); 1508 tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index)); 1509 1510 /* Enable ACB algorithm 2 */ 1511 reg = tdma_readl(priv, TDMA_CONTROL); 1512 reg |= tdma_control_bit(priv, ACB_ALGO); 1513 tdma_writel(priv, reg, TDMA_CONTROL); 1514 1515 /* Do not use tdma_control_bit() here because TSB_SWAP1 collides 1516 * with the original definition of ACB_ALGO 1517 */ 1518 reg = tdma_readl(priv, TDMA_CONTROL); 1519 if (priv->is_lite) 1520 reg &= ~BIT(TSB_SWAP1); 1521 /* Set a correct TSB format based on host endian */ 1522 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 1523 reg |= tdma_control_bit(priv, TSB_SWAP0); 1524 else 1525 reg &= ~tdma_control_bit(priv, TSB_SWAP0); 1526 tdma_writel(priv, reg, TDMA_CONTROL); 1527 1528 /* Program the number of descriptors as MAX_THRESHOLD and half of 1529 * its size for the hysteresis trigger 1530 */ 1531 tdma_writel(priv, ring->size | 1532 1 << RING_HYST_THRESH_SHIFT, 1533 TDMA_DESC_RING_MAX_HYST(index)); 1534 1535 /* Enable the ring queue in the arbiter */ 1536 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN); 1537 reg |= (1 << index); 1538 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN); 1539 1540 napi_enable(&ring->napi); 1541 1542 netif_dbg(priv, hw, priv->netdev, 1543 "TDMA cfg, size=%d, switch q=%d,port=%d\n", 1544 ring->size, ring->switch_queue, 1545 ring->switch_port); 1546 1547 return 0; 1548 } 1549 1550 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, 1551 unsigned int index) 1552 { 1553 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; 1554 u32 reg; 1555 1556 /* Caller should stop the TDMA engine */ 1557 reg = tdma_readl(priv, TDMA_STATUS); 1558 if (!(reg & TDMA_DISABLED)) 1559 netdev_warn(priv->netdev, "TDMA not stopped!\n"); 1560 1561 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could 1562 * fail, so by checking this pointer we know whether the TX ring was 1563 * fully initialized or not. 1564 */ 1565 if (!ring->cbs) 1566 return; 1567 1568 napi_disable(&ring->napi); 1569 netif_napi_del(&ring->napi); 1570 1571 bcm_sysport_tx_clean(priv, ring); 1572 1573 kfree(ring->cbs); 1574 ring->cbs = NULL; 1575 ring->size = 0; 1576 ring->alloc_size = 0; 1577 1578 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n"); 1579 } 1580 1581 /* RDMA helper */ 1582 static inline int rdma_enable_set(struct bcm_sysport_priv *priv, 1583 unsigned int enable) 1584 { 1585 unsigned int timeout = 1000; 1586 u32 reg; 1587 1588 reg = rdma_readl(priv, RDMA_CONTROL); 1589 if (enable) 1590 reg |= RDMA_EN; 1591 else 1592 reg &= ~RDMA_EN; 1593 rdma_writel(priv, reg, RDMA_CONTROL); 1594 1595 /* Poll for RMDA disabling completion */ 1596 do { 1597 reg = rdma_readl(priv, RDMA_STATUS); 1598 if (!!(reg & RDMA_DISABLED) == !enable) 1599 return 0; 1600 usleep_range(1000, 2000); 1601 } while (timeout-- > 0); 1602 1603 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n"); 1604 1605 return -ETIMEDOUT; 1606 } 1607 1608 /* TDMA helper */ 1609 static inline int tdma_enable_set(struct bcm_sysport_priv *priv, 1610 unsigned int enable) 1611 { 1612 unsigned int timeout = 1000; 1613 u32 reg; 1614 1615 reg = tdma_readl(priv, TDMA_CONTROL); 1616 if (enable) 1617 reg |= tdma_control_bit(priv, TDMA_EN); 1618 else 1619 reg &= ~tdma_control_bit(priv, TDMA_EN); 1620 tdma_writel(priv, reg, TDMA_CONTROL); 1621 1622 /* Poll for TMDA disabling completion */ 1623 do { 1624 reg = tdma_readl(priv, TDMA_STATUS); 1625 if (!!(reg & TDMA_DISABLED) == !enable) 1626 return 0; 1627 1628 usleep_range(1000, 2000); 1629 } while (timeout-- > 0); 1630 1631 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n"); 1632 1633 return -ETIMEDOUT; 1634 } 1635 1636 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) 1637 { 1638 struct bcm_sysport_cb *cb; 1639 u32 reg; 1640 int ret; 1641 int i; 1642 1643 /* Initialize SW view of the RX ring */ 1644 priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC; 1645 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; 1646 priv->rx_c_index = 0; 1647 priv->rx_read_ptr = 0; 1648 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb), 1649 GFP_KERNEL); 1650 if (!priv->rx_cbs) { 1651 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1652 return -ENOMEM; 1653 } 1654 1655 for (i = 0; i < priv->num_rx_bds; i++) { 1656 cb = priv->rx_cbs + i; 1657 cb->bd_addr = priv->rx_bds + i * DESC_SIZE; 1658 } 1659 1660 ret = bcm_sysport_alloc_rx_bufs(priv); 1661 if (ret) { 1662 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n"); 1663 return ret; 1664 } 1665 1666 /* Initialize HW, ensure RDMA is disabled */ 1667 reg = rdma_readl(priv, RDMA_STATUS); 1668 if (!(reg & RDMA_DISABLED)) 1669 rdma_enable_set(priv, 0); 1670 1671 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO); 1672 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI); 1673 rdma_writel(priv, 0, RDMA_PROD_INDEX); 1674 rdma_writel(priv, 0, RDMA_CONS_INDEX); 1675 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT | 1676 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE); 1677 /* Operate the queue in ring mode */ 1678 rdma_writel(priv, 0, RDMA_START_ADDR_HI); 1679 rdma_writel(priv, 0, RDMA_START_ADDR_LO); 1680 rdma_writel(priv, 0, RDMA_END_ADDR_HI); 1681 rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO); 1682 1683 netif_dbg(priv, hw, priv->netdev, 1684 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n", 1685 priv->num_rx_bds, priv->rx_bds); 1686 1687 return 0; 1688 } 1689 1690 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv) 1691 { 1692 struct bcm_sysport_cb *cb; 1693 unsigned int i; 1694 u32 reg; 1695 1696 /* Caller should ensure RDMA is disabled */ 1697 reg = rdma_readl(priv, RDMA_STATUS); 1698 if (!(reg & RDMA_DISABLED)) 1699 netdev_warn(priv->netdev, "RDMA not stopped!\n"); 1700 1701 for (i = 0; i < priv->num_rx_bds; i++) { 1702 cb = &priv->rx_cbs[i]; 1703 if (dma_unmap_addr(cb, dma_addr)) 1704 dma_unmap_single(&priv->pdev->dev, 1705 dma_unmap_addr(cb, dma_addr), 1706 RX_BUF_LENGTH, DMA_FROM_DEVICE); 1707 bcm_sysport_free_cb(cb); 1708 } 1709 1710 kfree(priv->rx_cbs); 1711 priv->rx_cbs = NULL; 1712 1713 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n"); 1714 } 1715 1716 static void bcm_sysport_set_rx_mode(struct net_device *dev) 1717 { 1718 struct bcm_sysport_priv *priv = netdev_priv(dev); 1719 u32 reg; 1720 1721 if (priv->is_lite) 1722 return; 1723 1724 reg = umac_readl(priv, UMAC_CMD); 1725 if (dev->flags & IFF_PROMISC) 1726 reg |= CMD_PROMISC; 1727 else 1728 reg &= ~CMD_PROMISC; 1729 umac_writel(priv, reg, UMAC_CMD); 1730 1731 /* No support for ALLMULTI */ 1732 if (dev->flags & IFF_ALLMULTI) 1733 return; 1734 } 1735 1736 static inline void umac_enable_set(struct bcm_sysport_priv *priv, 1737 u32 mask, unsigned int enable) 1738 { 1739 u32 reg; 1740 1741 if (!priv->is_lite) { 1742 reg = umac_readl(priv, UMAC_CMD); 1743 if (enable) 1744 reg |= mask; 1745 else 1746 reg &= ~mask; 1747 umac_writel(priv, reg, UMAC_CMD); 1748 } else { 1749 reg = gib_readl(priv, GIB_CONTROL); 1750 if (enable) 1751 reg |= mask; 1752 else 1753 reg &= ~mask; 1754 gib_writel(priv, reg, GIB_CONTROL); 1755 } 1756 1757 /* UniMAC stops on a packet boundary, wait for a full-sized packet 1758 * to be processed (1 msec). 1759 */ 1760 if (enable == 0) 1761 usleep_range(1000, 2000); 1762 } 1763 1764 static inline void umac_reset(struct bcm_sysport_priv *priv) 1765 { 1766 u32 reg; 1767 1768 if (priv->is_lite) 1769 return; 1770 1771 reg = umac_readl(priv, UMAC_CMD); 1772 reg |= CMD_SW_RESET; 1773 umac_writel(priv, reg, UMAC_CMD); 1774 udelay(10); 1775 reg = umac_readl(priv, UMAC_CMD); 1776 reg &= ~CMD_SW_RESET; 1777 umac_writel(priv, reg, UMAC_CMD); 1778 } 1779 1780 static void umac_set_hw_addr(struct bcm_sysport_priv *priv, 1781 unsigned char *addr) 1782 { 1783 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | 1784 addr[3]; 1785 u32 mac1 = (addr[4] << 8) | addr[5]; 1786 1787 if (!priv->is_lite) { 1788 umac_writel(priv, mac0, UMAC_MAC0); 1789 umac_writel(priv, mac1, UMAC_MAC1); 1790 } else { 1791 gib_writel(priv, mac0, GIB_MAC0); 1792 gib_writel(priv, mac1, GIB_MAC1); 1793 } 1794 } 1795 1796 static void topctrl_flush(struct bcm_sysport_priv *priv) 1797 { 1798 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); 1799 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); 1800 mdelay(1); 1801 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 1802 topctrl_writel(priv, 0, TX_FLUSH_CNTL); 1803 } 1804 1805 static int bcm_sysport_change_mac(struct net_device *dev, void *p) 1806 { 1807 struct bcm_sysport_priv *priv = netdev_priv(dev); 1808 struct sockaddr *addr = p; 1809 1810 if (!is_valid_ether_addr(addr->sa_data)) 1811 return -EINVAL; 1812 1813 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1814 1815 /* interface is disabled, changes to MAC will be reflected on next 1816 * open call 1817 */ 1818 if (!netif_running(dev)) 1819 return 0; 1820 1821 umac_set_hw_addr(priv, dev->dev_addr); 1822 1823 return 0; 1824 } 1825 1826 static void bcm_sysport_get_stats64(struct net_device *dev, 1827 struct rtnl_link_stats64 *stats) 1828 { 1829 struct bcm_sysport_priv *priv = netdev_priv(dev); 1830 struct bcm_sysport_stats64 *stats64 = &priv->stats64; 1831 unsigned int start; 1832 1833 netdev_stats_to_stats64(stats, &dev->stats); 1834 1835 bcm_sysport_update_tx_stats(priv, &stats->tx_bytes, 1836 &stats->tx_packets); 1837 1838 do { 1839 start = u64_stats_fetch_begin_irq(&priv->syncp); 1840 stats->rx_packets = stats64->rx_packets; 1841 stats->rx_bytes = stats64->rx_bytes; 1842 } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); 1843 } 1844 1845 static void bcm_sysport_netif_start(struct net_device *dev) 1846 { 1847 struct bcm_sysport_priv *priv = netdev_priv(dev); 1848 1849 /* Enable NAPI */ 1850 bcm_sysport_init_dim(priv, bcm_sysport_dim_work); 1851 bcm_sysport_init_rx_coalesce(priv); 1852 napi_enable(&priv->napi); 1853 1854 /* Enable RX interrupt and TX ring full interrupt */ 1855 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 1856 1857 phy_start(dev->phydev); 1858 1859 /* Enable TX interrupts for the TXQs */ 1860 if (!priv->is_lite) 1861 intrl2_1_mask_clear(priv, 0xffffffff); 1862 else 1863 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK); 1864 } 1865 1866 static void rbuf_init(struct bcm_sysport_priv *priv) 1867 { 1868 u32 reg; 1869 1870 reg = rbuf_readl(priv, RBUF_CONTROL); 1871 reg |= RBUF_4B_ALGN | RBUF_RSB_EN; 1872 /* Set a correct RSB format on SYSTEMPORT Lite */ 1873 if (priv->is_lite) 1874 reg &= ~RBUF_RSB_SWAP1; 1875 1876 /* Set a correct RSB format based on host endian */ 1877 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 1878 reg |= RBUF_RSB_SWAP0; 1879 else 1880 reg &= ~RBUF_RSB_SWAP0; 1881 rbuf_writel(priv, reg, RBUF_CONTROL); 1882 } 1883 1884 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv) 1885 { 1886 intrl2_0_mask_set(priv, 0xffffffff); 1887 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1888 if (!priv->is_lite) { 1889 intrl2_1_mask_set(priv, 0xffffffff); 1890 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1891 } 1892 } 1893 1894 static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv) 1895 { 1896 u32 reg; 1897 1898 reg = gib_readl(priv, GIB_CONTROL); 1899 /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */ 1900 if (netdev_uses_dsa(priv->netdev)) { 1901 reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT); 1902 reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT; 1903 } 1904 reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT); 1905 reg |= 12 << GIB_IPG_LEN_SHIFT; 1906 gib_writel(priv, reg, GIB_CONTROL); 1907 } 1908 1909 static int bcm_sysport_open(struct net_device *dev) 1910 { 1911 struct bcm_sysport_priv *priv = netdev_priv(dev); 1912 struct phy_device *phydev; 1913 unsigned int i; 1914 int ret; 1915 1916 /* Reset UniMAC */ 1917 umac_reset(priv); 1918 1919 /* Flush TX and RX FIFOs at TOPCTRL level */ 1920 topctrl_flush(priv); 1921 1922 /* Disable the UniMAC RX/TX */ 1923 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0); 1924 1925 /* Enable RBUF 2bytes alignment and Receive Status Block */ 1926 rbuf_init(priv); 1927 1928 /* Set maximum frame length */ 1929 if (!priv->is_lite) 1930 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 1931 else 1932 gib_set_pad_extension(priv); 1933 1934 /* Apply features again in case we changed them while interface was 1935 * down 1936 */ 1937 bcm_sysport_set_features(dev, dev->features); 1938 1939 /* Set MAC address */ 1940 umac_set_hw_addr(priv, dev->dev_addr); 1941 1942 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 1943 0, priv->phy_interface); 1944 if (!phydev) { 1945 netdev_err(dev, "could not attach to PHY\n"); 1946 return -ENODEV; 1947 } 1948 1949 /* Reset house keeping link status */ 1950 priv->old_duplex = -1; 1951 priv->old_link = -1; 1952 priv->old_pause = -1; 1953 1954 /* mask all interrupts and request them */ 1955 bcm_sysport_mask_all_intrs(priv); 1956 1957 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev); 1958 if (ret) { 1959 netdev_err(dev, "failed to request RX interrupt\n"); 1960 goto out_phy_disconnect; 1961 } 1962 1963 if (!priv->is_lite) { 1964 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, 1965 dev->name, dev); 1966 if (ret) { 1967 netdev_err(dev, "failed to request TX interrupt\n"); 1968 goto out_free_irq0; 1969 } 1970 } 1971 1972 /* Initialize both hardware and software ring */ 1973 for (i = 0; i < dev->num_tx_queues; i++) { 1974 ret = bcm_sysport_init_tx_ring(priv, i); 1975 if (ret) { 1976 netdev_err(dev, "failed to initialize TX ring %d\n", 1977 i); 1978 goto out_free_tx_ring; 1979 } 1980 } 1981 1982 /* Initialize linked-list */ 1983 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); 1984 1985 /* Initialize RX ring */ 1986 ret = bcm_sysport_init_rx_ring(priv); 1987 if (ret) { 1988 netdev_err(dev, "failed to initialize RX ring\n"); 1989 goto out_free_rx_ring; 1990 } 1991 1992 /* Turn on RDMA */ 1993 ret = rdma_enable_set(priv, 1); 1994 if (ret) 1995 goto out_free_rx_ring; 1996 1997 /* Turn on TDMA */ 1998 ret = tdma_enable_set(priv, 1); 1999 if (ret) 2000 goto out_clear_rx_int; 2001 2002 /* Turn on UniMAC TX/RX */ 2003 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1); 2004 2005 bcm_sysport_netif_start(dev); 2006 2007 netif_tx_start_all_queues(dev); 2008 2009 return 0; 2010 2011 out_clear_rx_int: 2012 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 2013 out_free_rx_ring: 2014 bcm_sysport_fini_rx_ring(priv); 2015 out_free_tx_ring: 2016 for (i = 0; i < dev->num_tx_queues; i++) 2017 bcm_sysport_fini_tx_ring(priv, i); 2018 if (!priv->is_lite) 2019 free_irq(priv->irq1, dev); 2020 out_free_irq0: 2021 free_irq(priv->irq0, dev); 2022 out_phy_disconnect: 2023 phy_disconnect(phydev); 2024 return ret; 2025 } 2026 2027 static void bcm_sysport_netif_stop(struct net_device *dev) 2028 { 2029 struct bcm_sysport_priv *priv = netdev_priv(dev); 2030 2031 /* stop all software from updating hardware */ 2032 netif_tx_disable(dev); 2033 napi_disable(&priv->napi); 2034 cancel_work_sync(&priv->dim.dim.work); 2035 phy_stop(dev->phydev); 2036 2037 /* mask all interrupts */ 2038 bcm_sysport_mask_all_intrs(priv); 2039 } 2040 2041 static int bcm_sysport_stop(struct net_device *dev) 2042 { 2043 struct bcm_sysport_priv *priv = netdev_priv(dev); 2044 unsigned int i; 2045 int ret; 2046 2047 bcm_sysport_netif_stop(dev); 2048 2049 /* Disable UniMAC RX */ 2050 umac_enable_set(priv, CMD_RX_EN, 0); 2051 2052 ret = tdma_enable_set(priv, 0); 2053 if (ret) { 2054 netdev_err(dev, "timeout disabling RDMA\n"); 2055 return ret; 2056 } 2057 2058 /* Wait for a maximum packet size to be drained */ 2059 usleep_range(2000, 3000); 2060 2061 ret = rdma_enable_set(priv, 0); 2062 if (ret) { 2063 netdev_err(dev, "timeout disabling TDMA\n"); 2064 return ret; 2065 } 2066 2067 /* Disable UniMAC TX */ 2068 umac_enable_set(priv, CMD_TX_EN, 0); 2069 2070 /* Free RX/TX rings SW structures */ 2071 for (i = 0; i < dev->num_tx_queues; i++) 2072 bcm_sysport_fini_tx_ring(priv, i); 2073 bcm_sysport_fini_rx_ring(priv); 2074 2075 free_irq(priv->irq0, dev); 2076 if (!priv->is_lite) 2077 free_irq(priv->irq1, dev); 2078 2079 /* Disconnect from PHY */ 2080 phy_disconnect(dev->phydev); 2081 2082 return 0; 2083 } 2084 2085 static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv, 2086 u64 location) 2087 { 2088 unsigned int index; 2089 u32 reg; 2090 2091 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { 2092 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index)); 2093 reg >>= RXCHK_BRCM_TAG_CID_SHIFT; 2094 reg &= RXCHK_BRCM_TAG_CID_MASK; 2095 if (reg == location) 2096 return index; 2097 } 2098 2099 return -EINVAL; 2100 } 2101 2102 static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv, 2103 struct ethtool_rxnfc *nfc) 2104 { 2105 int index; 2106 2107 /* This is not a rule that we know about */ 2108 index = bcm_sysport_rule_find(priv, nfc->fs.location); 2109 if (index < 0) 2110 return -EOPNOTSUPP; 2111 2112 nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE; 2113 2114 return 0; 2115 } 2116 2117 static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv, 2118 struct ethtool_rxnfc *nfc) 2119 { 2120 unsigned int index; 2121 u32 reg; 2122 2123 /* We cannot match locations greater than what the classification ID 2124 * permits (256 entries) 2125 */ 2126 if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK) 2127 return -E2BIG; 2128 2129 /* We cannot support flows that are not destined for a wake-up */ 2130 if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE) 2131 return -EOPNOTSUPP; 2132 2133 /* All filters are already in use, we cannot match more rules */ 2134 if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) == 2135 RXCHK_BRCM_TAG_MAX) 2136 return -ENOSPC; 2137 2138 index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX); 2139 if (index > RXCHK_BRCM_TAG_MAX) 2140 return -ENOSPC; 2141 2142 /* Location is the classification ID, and index is the position 2143 * within one of our 8 possible filters to be programmed 2144 */ 2145 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index)); 2146 reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT); 2147 reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT; 2148 rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index)); 2149 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index)); 2150 2151 priv->filters_loc[index] = nfc->fs.location; 2152 set_bit(index, priv->filters); 2153 2154 return 0; 2155 } 2156 2157 static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv, 2158 u64 location) 2159 { 2160 int index; 2161 2162 /* This is not a rule that we know about */ 2163 index = bcm_sysport_rule_find(priv, location); 2164 if (index < 0) 2165 return -EOPNOTSUPP; 2166 2167 /* No need to disable this filter if it was enabled, this will 2168 * be taken care of during suspend time by bcm_sysport_suspend_to_wol 2169 */ 2170 clear_bit(index, priv->filters); 2171 priv->filters_loc[index] = 0; 2172 2173 return 0; 2174 } 2175 2176 static int bcm_sysport_get_rxnfc(struct net_device *dev, 2177 struct ethtool_rxnfc *nfc, u32 *rule_locs) 2178 { 2179 struct bcm_sysport_priv *priv = netdev_priv(dev); 2180 int ret = -EOPNOTSUPP; 2181 2182 switch (nfc->cmd) { 2183 case ETHTOOL_GRXCLSRULE: 2184 ret = bcm_sysport_rule_get(priv, nfc); 2185 break; 2186 default: 2187 break; 2188 } 2189 2190 return ret; 2191 } 2192 2193 static int bcm_sysport_set_rxnfc(struct net_device *dev, 2194 struct ethtool_rxnfc *nfc) 2195 { 2196 struct bcm_sysport_priv *priv = netdev_priv(dev); 2197 int ret = -EOPNOTSUPP; 2198 2199 switch (nfc->cmd) { 2200 case ETHTOOL_SRXCLSRLINS: 2201 ret = bcm_sysport_rule_set(priv, nfc); 2202 break; 2203 case ETHTOOL_SRXCLSRLDEL: 2204 ret = bcm_sysport_rule_del(priv, nfc->fs.location); 2205 break; 2206 default: 2207 break; 2208 } 2209 2210 return ret; 2211 } 2212 2213 static const struct ethtool_ops bcm_sysport_ethtool_ops = { 2214 .get_drvinfo = bcm_sysport_get_drvinfo, 2215 .get_msglevel = bcm_sysport_get_msglvl, 2216 .set_msglevel = bcm_sysport_set_msglvl, 2217 .get_link = ethtool_op_get_link, 2218 .get_strings = bcm_sysport_get_strings, 2219 .get_ethtool_stats = bcm_sysport_get_stats, 2220 .get_sset_count = bcm_sysport_get_sset_count, 2221 .get_wol = bcm_sysport_get_wol, 2222 .set_wol = bcm_sysport_set_wol, 2223 .get_coalesce = bcm_sysport_get_coalesce, 2224 .set_coalesce = bcm_sysport_set_coalesce, 2225 .get_link_ksettings = phy_ethtool_get_link_ksettings, 2226 .set_link_ksettings = phy_ethtool_set_link_ksettings, 2227 .get_rxnfc = bcm_sysport_get_rxnfc, 2228 .set_rxnfc = bcm_sysport_set_rxnfc, 2229 }; 2230 2231 static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, 2232 struct net_device *sb_dev) 2233 { 2234 struct bcm_sysport_priv *priv = netdev_priv(dev); 2235 u16 queue = skb_get_queue_mapping(skb); 2236 struct bcm_sysport_tx_ring *tx_ring; 2237 unsigned int q, port; 2238 2239 if (!netdev_uses_dsa(dev)) 2240 return netdev_pick_tx(dev, skb, NULL); 2241 2242 /* DSA tagging layer will have configured the correct queue */ 2243 q = BRCM_TAG_GET_QUEUE(queue); 2244 port = BRCM_TAG_GET_PORT(queue); 2245 tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues]; 2246 2247 if (unlikely(!tx_ring)) 2248 return netdev_pick_tx(dev, skb, NULL); 2249 2250 return tx_ring->index; 2251 } 2252 2253 static const struct net_device_ops bcm_sysport_netdev_ops = { 2254 .ndo_start_xmit = bcm_sysport_xmit, 2255 .ndo_tx_timeout = bcm_sysport_tx_timeout, 2256 .ndo_open = bcm_sysport_open, 2257 .ndo_stop = bcm_sysport_stop, 2258 .ndo_set_features = bcm_sysport_set_features, 2259 .ndo_set_rx_mode = bcm_sysport_set_rx_mode, 2260 .ndo_set_mac_address = bcm_sysport_change_mac, 2261 #ifdef CONFIG_NET_POLL_CONTROLLER 2262 .ndo_poll_controller = bcm_sysport_poll_controller, 2263 #endif 2264 .ndo_get_stats64 = bcm_sysport_get_stats64, 2265 .ndo_select_queue = bcm_sysport_select_queue, 2266 }; 2267 2268 static int bcm_sysport_map_queues(struct notifier_block *nb, 2269 struct dsa_notifier_register_info *info) 2270 { 2271 struct bcm_sysport_tx_ring *ring; 2272 struct bcm_sysport_priv *priv; 2273 struct net_device *slave_dev; 2274 unsigned int num_tx_queues; 2275 unsigned int q, qp, port; 2276 struct net_device *dev; 2277 2278 priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); 2279 if (priv->netdev != info->master) 2280 return 0; 2281 2282 dev = info->master; 2283 2284 /* We can't be setting up queue inspection for non directly attached 2285 * switches 2286 */ 2287 if (info->switch_number) 2288 return 0; 2289 2290 if (dev->netdev_ops != &bcm_sysport_netdev_ops) 2291 return 0; 2292 2293 port = info->port_number; 2294 slave_dev = info->info.dev; 2295 2296 /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a 2297 * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of 2298 * per-port (slave_dev) network devices queue, we achieve just that. 2299 * This need to happen now before any slave network device is used such 2300 * it accurately reflects the number of real TX queues. 2301 */ 2302 if (priv->is_lite) 2303 netif_set_real_num_tx_queues(slave_dev, 2304 slave_dev->num_tx_queues / 2); 2305 2306 num_tx_queues = slave_dev->real_num_tx_queues; 2307 2308 if (priv->per_port_num_tx_queues && 2309 priv->per_port_num_tx_queues != num_tx_queues) 2310 netdev_warn(slave_dev, "asymmetric number of per-port queues\n"); 2311 2312 priv->per_port_num_tx_queues = num_tx_queues; 2313 2314 for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues; 2315 q++) { 2316 ring = &priv->tx_rings[q]; 2317 2318 if (ring->inspect) 2319 continue; 2320 2321 /* Just remember the mapping actual programming done 2322 * during bcm_sysport_init_tx_ring 2323 */ 2324 ring->switch_queue = qp; 2325 ring->switch_port = port; 2326 ring->inspect = true; 2327 priv->ring_map[q + port * num_tx_queues] = ring; 2328 qp++; 2329 } 2330 2331 return 0; 2332 } 2333 2334 static int bcm_sysport_unmap_queues(struct notifier_block *nb, 2335 struct dsa_notifier_register_info *info) 2336 { 2337 struct bcm_sysport_tx_ring *ring; 2338 struct bcm_sysport_priv *priv; 2339 struct net_device *slave_dev; 2340 unsigned int num_tx_queues; 2341 struct net_device *dev; 2342 unsigned int q, port; 2343 2344 priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); 2345 if (priv->netdev != info->master) 2346 return 0; 2347 2348 dev = info->master; 2349 2350 if (dev->netdev_ops != &bcm_sysport_netdev_ops) 2351 return 0; 2352 2353 port = info->port_number; 2354 slave_dev = info->info.dev; 2355 2356 num_tx_queues = slave_dev->real_num_tx_queues; 2357 2358 for (q = 0; q < dev->num_tx_queues; q++) { 2359 ring = &priv->tx_rings[q]; 2360 2361 if (ring->switch_port != port) 2362 continue; 2363 2364 if (!ring->inspect) 2365 continue; 2366 2367 ring->inspect = false; 2368 priv->ring_map[q + port * num_tx_queues] = NULL; 2369 } 2370 2371 return 0; 2372 } 2373 2374 static int bcm_sysport_dsa_notifier(struct notifier_block *nb, 2375 unsigned long event, void *ptr) 2376 { 2377 int ret = NOTIFY_DONE; 2378 2379 switch (event) { 2380 case DSA_PORT_REGISTER: 2381 ret = bcm_sysport_map_queues(nb, ptr); 2382 break; 2383 case DSA_PORT_UNREGISTER: 2384 ret = bcm_sysport_unmap_queues(nb, ptr); 2385 break; 2386 } 2387 2388 return notifier_from_errno(ret); 2389 } 2390 2391 #define REV_FMT "v%2x.%02x" 2392 2393 static const struct bcm_sysport_hw_params bcm_sysport_params[] = { 2394 [SYSTEMPORT] = { 2395 .is_lite = false, 2396 .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS, 2397 }, 2398 [SYSTEMPORT_LITE] = { 2399 .is_lite = true, 2400 .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS, 2401 }, 2402 }; 2403 2404 static const struct of_device_id bcm_sysport_of_match[] = { 2405 { .compatible = "brcm,systemportlite-v1.00", 2406 .data = &bcm_sysport_params[SYSTEMPORT_LITE] }, 2407 { .compatible = "brcm,systemport-v1.00", 2408 .data = &bcm_sysport_params[SYSTEMPORT] }, 2409 { .compatible = "brcm,systemport", 2410 .data = &bcm_sysport_params[SYSTEMPORT] }, 2411 { /* sentinel */ } 2412 }; 2413 MODULE_DEVICE_TABLE(of, bcm_sysport_of_match); 2414 2415 static int bcm_sysport_probe(struct platform_device *pdev) 2416 { 2417 const struct bcm_sysport_hw_params *params; 2418 const struct of_device_id *of_id = NULL; 2419 struct bcm_sysport_priv *priv; 2420 struct device_node *dn; 2421 struct net_device *dev; 2422 const void *macaddr; 2423 struct resource *r; 2424 u32 txq, rxq; 2425 int ret; 2426 2427 dn = pdev->dev.of_node; 2428 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2429 of_id = of_match_node(bcm_sysport_of_match, dn); 2430 if (!of_id || !of_id->data) 2431 return -EINVAL; 2432 2433 /* Fairly quickly we need to know the type of adapter we have */ 2434 params = of_id->data; 2435 2436 /* Read the Transmit/Receive Queue properties */ 2437 if (of_property_read_u32(dn, "systemport,num-txq", &txq)) 2438 txq = TDMA_NUM_RINGS; 2439 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq)) 2440 rxq = 1; 2441 2442 /* Sanity check the number of transmit queues */ 2443 if (!txq || txq > TDMA_NUM_RINGS) 2444 return -EINVAL; 2445 2446 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq); 2447 if (!dev) 2448 return -ENOMEM; 2449 2450 /* Initialize private members */ 2451 priv = netdev_priv(dev); 2452 2453 /* Allocate number of TX rings */ 2454 priv->tx_rings = devm_kcalloc(&pdev->dev, txq, 2455 sizeof(struct bcm_sysport_tx_ring), 2456 GFP_KERNEL); 2457 if (!priv->tx_rings) 2458 return -ENOMEM; 2459 2460 priv->is_lite = params->is_lite; 2461 priv->num_rx_desc_words = params->num_rx_desc_words; 2462 2463 priv->irq0 = platform_get_irq(pdev, 0); 2464 if (!priv->is_lite) { 2465 priv->irq1 = platform_get_irq(pdev, 1); 2466 priv->wol_irq = platform_get_irq(pdev, 2); 2467 } else { 2468 priv->wol_irq = platform_get_irq(pdev, 1); 2469 } 2470 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { 2471 dev_err(&pdev->dev, "invalid interrupts\n"); 2472 ret = -EINVAL; 2473 goto err_free_netdev; 2474 } 2475 2476 priv->base = devm_ioremap_resource(&pdev->dev, r); 2477 if (IS_ERR(priv->base)) { 2478 ret = PTR_ERR(priv->base); 2479 goto err_free_netdev; 2480 } 2481 2482 priv->netdev = dev; 2483 priv->pdev = pdev; 2484 2485 priv->phy_interface = of_get_phy_mode(dn); 2486 /* Default to GMII interface mode */ 2487 if (priv->phy_interface < 0) 2488 priv->phy_interface = PHY_INTERFACE_MODE_GMII; 2489 2490 /* In the case of a fixed PHY, the DT node associated 2491 * to the PHY is the Ethernet MAC DT node. 2492 */ 2493 if (of_phy_is_fixed_link(dn)) { 2494 ret = of_phy_register_fixed_link(dn); 2495 if (ret) { 2496 dev_err(&pdev->dev, "failed to register fixed PHY\n"); 2497 goto err_free_netdev; 2498 } 2499 2500 priv->phy_dn = dn; 2501 } 2502 2503 /* Initialize netdevice members */ 2504 macaddr = of_get_mac_address(dn); 2505 if (IS_ERR(macaddr)) { 2506 dev_warn(&pdev->dev, "using random Ethernet MAC\n"); 2507 eth_hw_addr_random(dev); 2508 } else { 2509 ether_addr_copy(dev->dev_addr, macaddr); 2510 } 2511 2512 SET_NETDEV_DEV(dev, &pdev->dev); 2513 dev_set_drvdata(&pdev->dev, dev); 2514 dev->ethtool_ops = &bcm_sysport_ethtool_ops; 2515 dev->netdev_ops = &bcm_sysport_netdev_ops; 2516 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64); 2517 2518 dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | 2519 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2520 dev->hw_features |= dev->features; 2521 dev->vlan_features |= dev->features; 2522 2523 /* Request the WOL interrupt and advertise suspend if available */ 2524 priv->wol_irq_disabled = 1; 2525 ret = devm_request_irq(&pdev->dev, priv->wol_irq, 2526 bcm_sysport_wol_isr, 0, dev->name, priv); 2527 if (!ret) 2528 device_set_wakeup_capable(&pdev->dev, 1); 2529 2530 /* Set the needed headroom once and for all */ 2531 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); 2532 dev->needed_headroom += sizeof(struct bcm_tsb); 2533 2534 /* libphy will adjust the link state accordingly */ 2535 netif_carrier_off(dev); 2536 2537 priv->rx_max_coalesced_frames = 1; 2538 u64_stats_init(&priv->syncp); 2539 2540 priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier; 2541 2542 ret = register_dsa_notifier(&priv->dsa_notifier); 2543 if (ret) { 2544 dev_err(&pdev->dev, "failed to register DSA notifier\n"); 2545 goto err_deregister_fixed_link; 2546 } 2547 2548 ret = register_netdev(dev); 2549 if (ret) { 2550 dev_err(&pdev->dev, "failed to register net_device\n"); 2551 goto err_deregister_notifier; 2552 } 2553 2554 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; 2555 dev_info(&pdev->dev, 2556 "Broadcom SYSTEMPORT%s " REV_FMT 2557 " (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", 2558 priv->is_lite ? " Lite" : "", 2559 (priv->rev >> 8) & 0xff, priv->rev & 0xff, 2560 priv->irq0, priv->irq1, txq, rxq); 2561 2562 return 0; 2563 2564 err_deregister_notifier: 2565 unregister_dsa_notifier(&priv->dsa_notifier); 2566 err_deregister_fixed_link: 2567 if (of_phy_is_fixed_link(dn)) 2568 of_phy_deregister_fixed_link(dn); 2569 err_free_netdev: 2570 free_netdev(dev); 2571 return ret; 2572 } 2573 2574 static int bcm_sysport_remove(struct platform_device *pdev) 2575 { 2576 struct net_device *dev = dev_get_drvdata(&pdev->dev); 2577 struct bcm_sysport_priv *priv = netdev_priv(dev); 2578 struct device_node *dn = pdev->dev.of_node; 2579 2580 /* Not much to do, ndo_close has been called 2581 * and we use managed allocations 2582 */ 2583 unregister_dsa_notifier(&priv->dsa_notifier); 2584 unregister_netdev(dev); 2585 if (of_phy_is_fixed_link(dn)) 2586 of_phy_deregister_fixed_link(dn); 2587 free_netdev(dev); 2588 dev_set_drvdata(&pdev->dev, NULL); 2589 2590 return 0; 2591 } 2592 2593 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) 2594 { 2595 struct net_device *ndev = priv->netdev; 2596 unsigned int timeout = 1000; 2597 unsigned int index, i = 0; 2598 u32 reg; 2599 2600 reg = umac_readl(priv, UMAC_MPD_CTRL); 2601 if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) 2602 reg |= MPD_EN; 2603 reg &= ~PSW_EN; 2604 if (priv->wolopts & WAKE_MAGICSECURE) { 2605 /* Program the SecureOn password */ 2606 umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), 2607 UMAC_PSW_MS); 2608 umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), 2609 UMAC_PSW_LS); 2610 reg |= PSW_EN; 2611 } 2612 umac_writel(priv, reg, UMAC_MPD_CTRL); 2613 2614 if (priv->wolopts & WAKE_FILTER) { 2615 /* Turn on ACPI matching to steal packets from RBUF */ 2616 reg = rbuf_readl(priv, RBUF_CONTROL); 2617 if (priv->is_lite) 2618 reg |= RBUF_ACPI_EN_LITE; 2619 else 2620 reg |= RBUF_ACPI_EN; 2621 rbuf_writel(priv, reg, RBUF_CONTROL); 2622 2623 /* Enable RXCHK, active filters and Broadcom tag matching */ 2624 reg = rxchk_readl(priv, RXCHK_CONTROL); 2625 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK << 2626 RXCHK_BRCM_TAG_MATCH_SHIFT); 2627 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { 2628 reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i); 2629 i++; 2630 } 2631 reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN; 2632 rxchk_writel(priv, reg, RXCHK_CONTROL); 2633 } 2634 2635 /* Make sure RBUF entered WoL mode as result */ 2636 do { 2637 reg = rbuf_readl(priv, RBUF_STATUS); 2638 if (reg & RBUF_WOL_MODE) 2639 break; 2640 2641 udelay(10); 2642 } while (timeout-- > 0); 2643 2644 /* Do not leave the UniMAC RBUF matching only MPD packets */ 2645 if (!timeout) { 2646 mpd_enable_set(priv, false); 2647 netif_err(priv, wol, ndev, "failed to enter WOL mode\n"); 2648 return -ETIMEDOUT; 2649 } 2650 2651 /* UniMAC receive needs to be turned on */ 2652 umac_enable_set(priv, CMD_RX_EN, 1); 2653 2654 netif_dbg(priv, wol, ndev, "entered WOL mode\n"); 2655 2656 return 0; 2657 } 2658 2659 static int __maybe_unused bcm_sysport_suspend(struct device *d) 2660 { 2661 struct net_device *dev = dev_get_drvdata(d); 2662 struct bcm_sysport_priv *priv = netdev_priv(dev); 2663 unsigned int i; 2664 int ret = 0; 2665 u32 reg; 2666 2667 if (!netif_running(dev)) 2668 return 0; 2669 2670 netif_device_detach(dev); 2671 2672 bcm_sysport_netif_stop(dev); 2673 2674 phy_suspend(dev->phydev); 2675 2676 /* Disable UniMAC RX */ 2677 umac_enable_set(priv, CMD_RX_EN, 0); 2678 2679 ret = rdma_enable_set(priv, 0); 2680 if (ret) { 2681 netdev_err(dev, "RDMA timeout!\n"); 2682 return ret; 2683 } 2684 2685 /* Disable RXCHK if enabled */ 2686 if (priv->rx_chk_en) { 2687 reg = rxchk_readl(priv, RXCHK_CONTROL); 2688 reg &= ~RXCHK_EN; 2689 rxchk_writel(priv, reg, RXCHK_CONTROL); 2690 } 2691 2692 /* Flush RX pipe */ 2693 if (!priv->wolopts) 2694 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); 2695 2696 ret = tdma_enable_set(priv, 0); 2697 if (ret) { 2698 netdev_err(dev, "TDMA timeout!\n"); 2699 return ret; 2700 } 2701 2702 /* Wait for a packet boundary */ 2703 usleep_range(2000, 3000); 2704 2705 umac_enable_set(priv, CMD_TX_EN, 0); 2706 2707 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); 2708 2709 /* Free RX/TX rings SW structures */ 2710 for (i = 0; i < dev->num_tx_queues; i++) 2711 bcm_sysport_fini_tx_ring(priv, i); 2712 bcm_sysport_fini_rx_ring(priv); 2713 2714 /* Get prepared for Wake-on-LAN */ 2715 if (device_may_wakeup(d) && priv->wolopts) 2716 ret = bcm_sysport_suspend_to_wol(priv); 2717 2718 return ret; 2719 } 2720 2721 static int __maybe_unused bcm_sysport_resume(struct device *d) 2722 { 2723 struct net_device *dev = dev_get_drvdata(d); 2724 struct bcm_sysport_priv *priv = netdev_priv(dev); 2725 unsigned int i; 2726 int ret; 2727 2728 if (!netif_running(dev)) 2729 return 0; 2730 2731 umac_reset(priv); 2732 2733 /* We may have been suspended and never received a WOL event that 2734 * would turn off MPD detection, take care of that now 2735 */ 2736 bcm_sysport_resume_from_wol(priv); 2737 2738 /* Initialize both hardware and software ring */ 2739 for (i = 0; i < dev->num_tx_queues; i++) { 2740 ret = bcm_sysport_init_tx_ring(priv, i); 2741 if (ret) { 2742 netdev_err(dev, "failed to initialize TX ring %d\n", 2743 i); 2744 goto out_free_tx_rings; 2745 } 2746 } 2747 2748 /* Initialize linked-list */ 2749 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); 2750 2751 /* Initialize RX ring */ 2752 ret = bcm_sysport_init_rx_ring(priv); 2753 if (ret) { 2754 netdev_err(dev, "failed to initialize RX ring\n"); 2755 goto out_free_rx_ring; 2756 } 2757 2758 /* RX pipe enable */ 2759 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 2760 2761 ret = rdma_enable_set(priv, 1); 2762 if (ret) { 2763 netdev_err(dev, "failed to enable RDMA\n"); 2764 goto out_free_rx_ring; 2765 } 2766 2767 /* Restore enabled features */ 2768 bcm_sysport_set_features(dev, dev->features); 2769 2770 rbuf_init(priv); 2771 2772 /* Set maximum frame length */ 2773 if (!priv->is_lite) 2774 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 2775 else 2776 gib_set_pad_extension(priv); 2777 2778 /* Set MAC address */ 2779 umac_set_hw_addr(priv, dev->dev_addr); 2780 2781 umac_enable_set(priv, CMD_RX_EN, 1); 2782 2783 /* TX pipe enable */ 2784 topctrl_writel(priv, 0, TX_FLUSH_CNTL); 2785 2786 umac_enable_set(priv, CMD_TX_EN, 1); 2787 2788 ret = tdma_enable_set(priv, 1); 2789 if (ret) { 2790 netdev_err(dev, "TDMA timeout!\n"); 2791 goto out_free_rx_ring; 2792 } 2793 2794 phy_resume(dev->phydev); 2795 2796 bcm_sysport_netif_start(dev); 2797 2798 netif_device_attach(dev); 2799 2800 return 0; 2801 2802 out_free_rx_ring: 2803 bcm_sysport_fini_rx_ring(priv); 2804 out_free_tx_rings: 2805 for (i = 0; i < dev->num_tx_queues; i++) 2806 bcm_sysport_fini_tx_ring(priv, i); 2807 return ret; 2808 } 2809 2810 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops, 2811 bcm_sysport_suspend, bcm_sysport_resume); 2812 2813 static struct platform_driver bcm_sysport_driver = { 2814 .probe = bcm_sysport_probe, 2815 .remove = bcm_sysport_remove, 2816 .driver = { 2817 .name = "brcm-systemport", 2818 .of_match_table = bcm_sysport_of_match, 2819 .pm = &bcm_sysport_pm_ops, 2820 }, 2821 }; 2822 module_platform_driver(bcm_sysport_driver); 2823 2824 MODULE_AUTHOR("Broadcom Corporation"); 2825 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver"); 2826 MODULE_ALIAS("platform:brcm-systemport"); 2827 MODULE_LICENSE("GPL"); 2828