1 /* 2 * Broadcom BCM7xxx System Port Ethernet MAC driver 3 * 4 * Copyright (C) 2014 Broadcom Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/module.h> 16 #include <linux/kernel.h> 17 #include <linux/netdevice.h> 18 #include <linux/etherdevice.h> 19 #include <linux/platform_device.h> 20 #include <linux/of.h> 21 #include <linux/of_net.h> 22 #include <linux/of_mdio.h> 23 #include <linux/phy.h> 24 #include <linux/phy_fixed.h> 25 #include <net/dsa.h> 26 #include <net/ip.h> 27 #include <net/ipv6.h> 28 29 #include "bcmsysport.h" 30 31 /* I/O accessors register helpers */ 32 #define BCM_SYSPORT_IO_MACRO(name, offset) \ 33 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \ 34 { \ 35 u32 reg = readl_relaxed(priv->base + offset + off); \ 36 return reg; \ 37 } \ 38 static inline void name##_writel(struct bcm_sysport_priv *priv, \ 39 u32 val, u32 off) \ 40 { \ 41 writel_relaxed(val, priv->base + offset + off); \ 42 } \ 43 44 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET); 45 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET); 46 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET); 47 BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET); 48 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET); 49 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET); 50 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET); 51 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET); 52 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET); 53 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET); 54 55 /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact 56 * same layout, except it has been moved by 4 bytes up, *sigh* 57 */ 58 static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off) 59 { 60 if (priv->is_lite && off >= RDMA_STATUS) 61 off += 4; 62 return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off); 63 } 64 65 static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off) 66 { 67 if (priv->is_lite && off >= RDMA_STATUS) 68 off += 4; 69 writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off); 70 } 71 72 static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit) 73 { 74 if (!priv->is_lite) { 75 return BIT(bit); 76 } else { 77 if (bit >= ACB_ALGO) 78 return BIT(bit + 1); 79 else 80 return BIT(bit); 81 } 82 } 83 84 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied 85 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths. 86 */ 87 #define BCM_SYSPORT_INTR_L2(which) \ 88 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \ 89 u32 mask) \ 90 { \ 91 priv->irq##which##_mask &= ~(mask); \ 92 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ 93 } \ 94 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \ 95 u32 mask) \ 96 { \ 97 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \ 98 priv->irq##which##_mask |= (mask); \ 99 } \ 100 101 BCM_SYSPORT_INTR_L2(0) 102 BCM_SYSPORT_INTR_L2(1) 103 104 /* Register accesses to GISB/RBUS registers are expensive (few hundred 105 * nanoseconds), so keep the check for 64-bits explicit here to save 106 * one register write per-packet on 32-bits platforms. 107 */ 108 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv, 109 void __iomem *d, 110 dma_addr_t addr) 111 { 112 #ifdef CONFIG_PHYS_ADDR_T_64BIT 113 writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK, 114 d + DESC_ADDR_HI_STATUS_LEN); 115 #endif 116 writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO); 117 } 118 119 static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, 120 struct dma_desc *desc, 121 unsigned int port) 122 { 123 /* Ports are latched, so write upper address first */ 124 tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port)); 125 tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port)); 126 } 127 128 /* Ethtool operations */ 129 static void bcm_sysport_set_rx_csum(struct net_device *dev, 130 netdev_features_t wanted) 131 { 132 struct bcm_sysport_priv *priv = netdev_priv(dev); 133 u32 reg; 134 135 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); 136 reg = rxchk_readl(priv, RXCHK_CONTROL); 137 /* Clear L2 header checks, which would prevent BPDUs 138 * from being received. 139 */ 140 reg &= ~RXCHK_L2_HDR_DIS; 141 if (priv->rx_chk_en) 142 reg |= RXCHK_EN; 143 else 144 reg &= ~RXCHK_EN; 145 146 /* If UniMAC forwards CRC, we need to skip over it to get 147 * a valid CHK bit to be set in the per-packet status word 148 */ 149 if (priv->rx_chk_en && priv->crc_fwd) 150 reg |= RXCHK_SKIP_FCS; 151 else 152 reg &= ~RXCHK_SKIP_FCS; 153 154 /* If Broadcom tags are enabled (e.g: using a switch), make 155 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom 156 * tag after the Ethernet MAC Source Address. 157 */ 158 if (netdev_uses_dsa(dev)) 159 reg |= RXCHK_BRCM_TAG_EN; 160 else 161 reg &= ~RXCHK_BRCM_TAG_EN; 162 163 rxchk_writel(priv, reg, RXCHK_CONTROL); 164 } 165 166 static void bcm_sysport_set_tx_csum(struct net_device *dev, 167 netdev_features_t wanted) 168 { 169 struct bcm_sysport_priv *priv = netdev_priv(dev); 170 u32 reg; 171 172 /* Hardware transmit checksum requires us to enable the Transmit status 173 * block prepended to the packet contents 174 */ 175 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); 176 reg = tdma_readl(priv, TDMA_CONTROL); 177 if (priv->tsb_en) 178 reg |= tdma_control_bit(priv, TSB_EN); 179 else 180 reg &= ~tdma_control_bit(priv, TSB_EN); 181 tdma_writel(priv, reg, TDMA_CONTROL); 182 } 183 184 static int bcm_sysport_set_features(struct net_device *dev, 185 netdev_features_t features) 186 { 187 struct bcm_sysport_priv *priv = netdev_priv(dev); 188 189 /* Read CRC forward */ 190 if (!priv->is_lite) 191 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); 192 else 193 priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) & 194 GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT); 195 196 bcm_sysport_set_rx_csum(dev, features); 197 bcm_sysport_set_tx_csum(dev, features); 198 199 return 0; 200 } 201 202 /* Hardware counters must be kept in sync because the order/offset 203 * is important here (order in structure declaration = order in hardware) 204 */ 205 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { 206 /* general stats */ 207 STAT_NETDEV64(rx_packets), 208 STAT_NETDEV64(tx_packets), 209 STAT_NETDEV64(rx_bytes), 210 STAT_NETDEV64(tx_bytes), 211 STAT_NETDEV(rx_errors), 212 STAT_NETDEV(tx_errors), 213 STAT_NETDEV(rx_dropped), 214 STAT_NETDEV(tx_dropped), 215 STAT_NETDEV(multicast), 216 /* UniMAC RSV counters */ 217 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), 218 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), 219 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), 220 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), 221 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), 222 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), 223 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), 224 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), 225 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), 226 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), 227 STAT_MIB_RX("rx_pkts", mib.rx.pkt), 228 STAT_MIB_RX("rx_bytes", mib.rx.bytes), 229 STAT_MIB_RX("rx_multicast", mib.rx.mca), 230 STAT_MIB_RX("rx_broadcast", mib.rx.bca), 231 STAT_MIB_RX("rx_fcs", mib.rx.fcs), 232 STAT_MIB_RX("rx_control", mib.rx.cf), 233 STAT_MIB_RX("rx_pause", mib.rx.pf), 234 STAT_MIB_RX("rx_unknown", mib.rx.uo), 235 STAT_MIB_RX("rx_align", mib.rx.aln), 236 STAT_MIB_RX("rx_outrange", mib.rx.flr), 237 STAT_MIB_RX("rx_code", mib.rx.cde), 238 STAT_MIB_RX("rx_carrier", mib.rx.fcr), 239 STAT_MIB_RX("rx_oversize", mib.rx.ovr), 240 STAT_MIB_RX("rx_jabber", mib.rx.jbr), 241 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue), 242 STAT_MIB_RX("rx_good_pkts", mib.rx.pok), 243 STAT_MIB_RX("rx_unicast", mib.rx.uc), 244 STAT_MIB_RX("rx_ppp", mib.rx.ppp), 245 STAT_MIB_RX("rx_crc", mib.rx.rcrc), 246 /* UniMAC TSV counters */ 247 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), 248 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), 249 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), 250 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), 251 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), 252 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), 253 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), 254 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), 255 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), 256 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), 257 STAT_MIB_TX("tx_pkts", mib.tx.pkts), 258 STAT_MIB_TX("tx_multicast", mib.tx.mca), 259 STAT_MIB_TX("tx_broadcast", mib.tx.bca), 260 STAT_MIB_TX("tx_pause", mib.tx.pf), 261 STAT_MIB_TX("tx_control", mib.tx.cf), 262 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs), 263 STAT_MIB_TX("tx_oversize", mib.tx.ovr), 264 STAT_MIB_TX("tx_defer", mib.tx.drf), 265 STAT_MIB_TX("tx_excess_defer", mib.tx.edf), 266 STAT_MIB_TX("tx_single_col", mib.tx.scl), 267 STAT_MIB_TX("tx_multi_col", mib.tx.mcl), 268 STAT_MIB_TX("tx_late_col", mib.tx.lcl), 269 STAT_MIB_TX("tx_excess_col", mib.tx.ecl), 270 STAT_MIB_TX("tx_frags", mib.tx.frg), 271 STAT_MIB_TX("tx_total_col", mib.tx.ncl), 272 STAT_MIB_TX("tx_jabber", mib.tx.jbr), 273 STAT_MIB_TX("tx_bytes", mib.tx.bytes), 274 STAT_MIB_TX("tx_good_pkts", mib.tx.pok), 275 STAT_MIB_TX("tx_unicast", mib.tx.uc), 276 /* UniMAC RUNT counters */ 277 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt), 278 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), 279 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), 280 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes), 281 /* RXCHK misc statistics */ 282 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR), 283 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc, 284 RXCHK_OTHER_DISC_CNTR), 285 /* RBUF misc statistics */ 286 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), 287 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), 288 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 289 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed), 290 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed), 291 STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb), 292 STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed), 293 /* Per TX-queue statistics are dynamically appended */ 294 }; 295 296 #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) 297 298 static void bcm_sysport_get_drvinfo(struct net_device *dev, 299 struct ethtool_drvinfo *info) 300 { 301 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 302 strlcpy(info->version, "0.1", sizeof(info->version)); 303 strlcpy(info->bus_info, "platform", sizeof(info->bus_info)); 304 } 305 306 static u32 bcm_sysport_get_msglvl(struct net_device *dev) 307 { 308 struct bcm_sysport_priv *priv = netdev_priv(dev); 309 310 return priv->msg_enable; 311 } 312 313 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable) 314 { 315 struct bcm_sysport_priv *priv = netdev_priv(dev); 316 317 priv->msg_enable = enable; 318 } 319 320 static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type) 321 { 322 switch (type) { 323 case BCM_SYSPORT_STAT_NETDEV: 324 case BCM_SYSPORT_STAT_NETDEV64: 325 case BCM_SYSPORT_STAT_RXCHK: 326 case BCM_SYSPORT_STAT_RBUF: 327 case BCM_SYSPORT_STAT_SOFT: 328 return true; 329 default: 330 return false; 331 } 332 } 333 334 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) 335 { 336 struct bcm_sysport_priv *priv = netdev_priv(dev); 337 const struct bcm_sysport_stats *s; 338 unsigned int i, j; 339 340 switch (string_set) { 341 case ETH_SS_STATS: 342 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 343 s = &bcm_sysport_gstrings_stats[i]; 344 if (priv->is_lite && 345 !bcm_sysport_lite_stat_valid(s->type)) 346 continue; 347 j++; 348 } 349 /* Include per-queue statistics */ 350 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; 351 default: 352 return -EOPNOTSUPP; 353 } 354 } 355 356 static void bcm_sysport_get_strings(struct net_device *dev, 357 u32 stringset, u8 *data) 358 { 359 struct bcm_sysport_priv *priv = netdev_priv(dev); 360 const struct bcm_sysport_stats *s; 361 char buf[128]; 362 int i, j; 363 364 switch (stringset) { 365 case ETH_SS_STATS: 366 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 367 s = &bcm_sysport_gstrings_stats[i]; 368 if (priv->is_lite && 369 !bcm_sysport_lite_stat_valid(s->type)) 370 continue; 371 372 memcpy(data + j * ETH_GSTRING_LEN, s->stat_string, 373 ETH_GSTRING_LEN); 374 j++; 375 } 376 377 for (i = 0; i < dev->num_tx_queues; i++) { 378 snprintf(buf, sizeof(buf), "txq%d_packets", i); 379 memcpy(data + j * ETH_GSTRING_LEN, buf, 380 ETH_GSTRING_LEN); 381 j++; 382 383 snprintf(buf, sizeof(buf), "txq%d_bytes", i); 384 memcpy(data + j * ETH_GSTRING_LEN, buf, 385 ETH_GSTRING_LEN); 386 j++; 387 } 388 break; 389 default: 390 break; 391 } 392 } 393 394 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) 395 { 396 int i, j = 0; 397 398 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 399 const struct bcm_sysport_stats *s; 400 u8 offset = 0; 401 u32 val = 0; 402 char *p; 403 404 s = &bcm_sysport_gstrings_stats[i]; 405 switch (s->type) { 406 case BCM_SYSPORT_STAT_NETDEV: 407 case BCM_SYSPORT_STAT_NETDEV64: 408 case BCM_SYSPORT_STAT_SOFT: 409 continue; 410 case BCM_SYSPORT_STAT_MIB_RX: 411 case BCM_SYSPORT_STAT_MIB_TX: 412 case BCM_SYSPORT_STAT_RUNT: 413 if (priv->is_lite) 414 continue; 415 416 if (s->type != BCM_SYSPORT_STAT_MIB_RX) 417 offset = UMAC_MIB_STAT_OFFSET; 418 val = umac_readl(priv, UMAC_MIB_START + j + offset); 419 break; 420 case BCM_SYSPORT_STAT_RXCHK: 421 val = rxchk_readl(priv, s->reg_offset); 422 if (val == ~0) 423 rxchk_writel(priv, 0, s->reg_offset); 424 break; 425 case BCM_SYSPORT_STAT_RBUF: 426 val = rbuf_readl(priv, s->reg_offset); 427 if (val == ~0) 428 rbuf_writel(priv, 0, s->reg_offset); 429 break; 430 } 431 432 j += s->stat_sizeof; 433 p = (char *)priv + s->stat_offset; 434 *(u32 *)p = val; 435 } 436 437 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); 438 } 439 440 static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv, 441 u64 *tx_bytes, u64 *tx_packets) 442 { 443 struct bcm_sysport_tx_ring *ring; 444 u64 bytes = 0, packets = 0; 445 unsigned int start; 446 unsigned int q; 447 448 for (q = 0; q < priv->netdev->num_tx_queues; q++) { 449 ring = &priv->tx_rings[q]; 450 do { 451 start = u64_stats_fetch_begin_irq(&priv->syncp); 452 bytes = ring->bytes; 453 packets = ring->packets; 454 } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); 455 456 *tx_bytes += bytes; 457 *tx_packets += packets; 458 } 459 } 460 461 static void bcm_sysport_get_stats(struct net_device *dev, 462 struct ethtool_stats *stats, u64 *data) 463 { 464 struct bcm_sysport_priv *priv = netdev_priv(dev); 465 struct bcm_sysport_stats64 *stats64 = &priv->stats64; 466 struct u64_stats_sync *syncp = &priv->syncp; 467 struct bcm_sysport_tx_ring *ring; 468 u64 tx_bytes = 0, tx_packets = 0; 469 unsigned int start; 470 int i, j; 471 472 if (netif_running(dev)) { 473 bcm_sysport_update_mib_counters(priv); 474 bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets); 475 stats64->tx_bytes = tx_bytes; 476 stats64->tx_packets = tx_packets; 477 } 478 479 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 480 const struct bcm_sysport_stats *s; 481 char *p; 482 483 s = &bcm_sysport_gstrings_stats[i]; 484 if (s->type == BCM_SYSPORT_STAT_NETDEV) 485 p = (char *)&dev->stats; 486 else if (s->type == BCM_SYSPORT_STAT_NETDEV64) 487 p = (char *)stats64; 488 else 489 p = (char *)priv; 490 491 if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type)) 492 continue; 493 p += s->stat_offset; 494 495 if (s->stat_sizeof == sizeof(u64) && 496 s->type == BCM_SYSPORT_STAT_NETDEV64) { 497 do { 498 start = u64_stats_fetch_begin_irq(syncp); 499 data[i] = *(u64 *)p; 500 } while (u64_stats_fetch_retry_irq(syncp, start)); 501 } else 502 data[i] = *(u32 *)p; 503 j++; 504 } 505 506 /* For SYSTEMPORT Lite since we have holes in our statistics, j would 507 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it 508 * needs to point to how many total statistics we have minus the 509 * number of per TX queue statistics 510 */ 511 j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) - 512 dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; 513 514 for (i = 0; i < dev->num_tx_queues; i++) { 515 ring = &priv->tx_rings[i]; 516 data[j] = ring->packets; 517 j++; 518 data[j] = ring->bytes; 519 j++; 520 } 521 } 522 523 static void bcm_sysport_get_wol(struct net_device *dev, 524 struct ethtool_wolinfo *wol) 525 { 526 struct bcm_sysport_priv *priv = netdev_priv(dev); 527 528 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; 529 wol->wolopts = priv->wolopts; 530 531 if (!(priv->wolopts & WAKE_MAGICSECURE)) 532 return; 533 534 memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); 535 } 536 537 static int bcm_sysport_set_wol(struct net_device *dev, 538 struct ethtool_wolinfo *wol) 539 { 540 struct bcm_sysport_priv *priv = netdev_priv(dev); 541 struct device *kdev = &priv->pdev->dev; 542 u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; 543 544 if (!device_can_wakeup(kdev)) 545 return -ENOTSUPP; 546 547 if (wol->wolopts & ~supported) 548 return -EINVAL; 549 550 if (wol->wolopts & WAKE_MAGICSECURE) 551 memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); 552 553 /* Flag the device and relevant IRQ as wakeup capable */ 554 if (wol->wolopts) { 555 device_set_wakeup_enable(kdev, 1); 556 if (priv->wol_irq_disabled) 557 enable_irq_wake(priv->wol_irq); 558 priv->wol_irq_disabled = 0; 559 } else { 560 device_set_wakeup_enable(kdev, 0); 561 /* Avoid unbalanced disable_irq_wake calls */ 562 if (!priv->wol_irq_disabled) 563 disable_irq_wake(priv->wol_irq); 564 priv->wol_irq_disabled = 1; 565 } 566 567 priv->wolopts = wol->wolopts; 568 569 return 0; 570 } 571 572 static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv, 573 u32 usecs, u32 pkts) 574 { 575 u32 reg; 576 577 reg = rdma_readl(priv, RDMA_MBDONE_INTR); 578 reg &= ~(RDMA_INTR_THRESH_MASK | 579 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT); 580 reg |= pkts; 581 reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT; 582 rdma_writel(priv, reg, RDMA_MBDONE_INTR); 583 } 584 585 static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring, 586 struct ethtool_coalesce *ec) 587 { 588 struct bcm_sysport_priv *priv = ring->priv; 589 u32 reg; 590 591 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index)); 592 reg &= ~(RING_INTR_THRESH_MASK | 593 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT); 594 reg |= ec->tx_max_coalesced_frames; 595 reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) << 596 RING_TIMEOUT_SHIFT; 597 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index)); 598 } 599 600 static int bcm_sysport_get_coalesce(struct net_device *dev, 601 struct ethtool_coalesce *ec) 602 { 603 struct bcm_sysport_priv *priv = netdev_priv(dev); 604 u32 reg; 605 606 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0)); 607 608 ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000; 609 ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK; 610 611 reg = rdma_readl(priv, RDMA_MBDONE_INTR); 612 613 ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000; 614 ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK; 615 ec->use_adaptive_rx_coalesce = priv->dim.use_dim; 616 617 return 0; 618 } 619 620 static int bcm_sysport_set_coalesce(struct net_device *dev, 621 struct ethtool_coalesce *ec) 622 { 623 struct bcm_sysport_priv *priv = netdev_priv(dev); 624 struct net_dim_cq_moder moder; 625 u32 usecs, pkts; 626 unsigned int i; 627 628 /* Base system clock is 125Mhz, DMA timeout is this reference clock 629 * divided by 1024, which yield roughly 8.192 us, our maximum value has 630 * to fit in the RING_TIMEOUT_MASK (16 bits). 631 */ 632 if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK || 633 ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 || 634 ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK || 635 ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1) 636 return -EINVAL; 637 638 if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) || 639 (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) || 640 ec->use_adaptive_tx_coalesce) 641 return -EINVAL; 642 643 for (i = 0; i < dev->num_tx_queues; i++) 644 bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec); 645 646 priv->rx_coalesce_usecs = ec->rx_coalesce_usecs; 647 priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; 648 usecs = priv->rx_coalesce_usecs; 649 pkts = priv->rx_max_coalesced_frames; 650 651 if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) { 652 moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode); 653 usecs = moder.usec; 654 pkts = moder.pkts; 655 } 656 657 priv->dim.use_dim = ec->use_adaptive_rx_coalesce; 658 659 /* Apply desired coalescing parameters */ 660 bcm_sysport_set_rx_coalesce(priv, usecs, pkts); 661 662 return 0; 663 } 664 665 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) 666 { 667 dev_consume_skb_any(cb->skb); 668 cb->skb = NULL; 669 dma_unmap_addr_set(cb, dma_addr, 0); 670 } 671 672 static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, 673 struct bcm_sysport_cb *cb) 674 { 675 struct device *kdev = &priv->pdev->dev; 676 struct net_device *ndev = priv->netdev; 677 struct sk_buff *skb, *rx_skb; 678 dma_addr_t mapping; 679 680 /* Allocate a new SKB for a new packet */ 681 skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); 682 if (!skb) { 683 priv->mib.alloc_rx_buff_failed++; 684 netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); 685 return NULL; 686 } 687 688 mapping = dma_map_single(kdev, skb->data, 689 RX_BUF_LENGTH, DMA_FROM_DEVICE); 690 if (dma_mapping_error(kdev, mapping)) { 691 priv->mib.rx_dma_failed++; 692 dev_kfree_skb_any(skb); 693 netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); 694 return NULL; 695 } 696 697 /* Grab the current SKB on the ring */ 698 rx_skb = cb->skb; 699 if (likely(rx_skb)) 700 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 701 RX_BUF_LENGTH, DMA_FROM_DEVICE); 702 703 /* Put the new SKB on the ring */ 704 cb->skb = skb; 705 dma_unmap_addr_set(cb, dma_addr, mapping); 706 dma_desc_set_addr(priv, cb->bd_addr, mapping); 707 708 netif_dbg(priv, rx_status, ndev, "RX refill\n"); 709 710 /* Return the current SKB to the caller */ 711 return rx_skb; 712 } 713 714 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) 715 { 716 struct bcm_sysport_cb *cb; 717 struct sk_buff *skb; 718 unsigned int i; 719 720 for (i = 0; i < priv->num_rx_bds; i++) { 721 cb = &priv->rx_cbs[i]; 722 skb = bcm_sysport_rx_refill(priv, cb); 723 if (skb) 724 dev_kfree_skb(skb); 725 if (!cb->skb) 726 return -ENOMEM; 727 } 728 729 return 0; 730 } 731 732 /* Poll the hardware for up to budget packets to process */ 733 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, 734 unsigned int budget) 735 { 736 struct bcm_sysport_stats64 *stats64 = &priv->stats64; 737 struct net_device *ndev = priv->netdev; 738 unsigned int processed = 0, to_process; 739 unsigned int processed_bytes = 0; 740 struct bcm_sysport_cb *cb; 741 struct sk_buff *skb; 742 unsigned int p_index; 743 u16 len, status; 744 struct bcm_rsb *rsb; 745 746 /* Clear status before servicing to reduce spurious interrupts */ 747 intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR); 748 749 /* Determine how much we should process since last call, SYSTEMPORT Lite 750 * groups the producer and consumer indexes into the same 32-bit 751 * which we access using RDMA_CONS_INDEX 752 */ 753 if (!priv->is_lite) 754 p_index = rdma_readl(priv, RDMA_PROD_INDEX); 755 else 756 p_index = rdma_readl(priv, RDMA_CONS_INDEX); 757 p_index &= RDMA_PROD_INDEX_MASK; 758 759 to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK; 760 761 netif_dbg(priv, rx_status, ndev, 762 "p_index=%d rx_c_index=%d to_process=%d\n", 763 p_index, priv->rx_c_index, to_process); 764 765 while ((processed < to_process) && (processed < budget)) { 766 cb = &priv->rx_cbs[priv->rx_read_ptr]; 767 skb = bcm_sysport_rx_refill(priv, cb); 768 769 770 /* We do not have a backing SKB, so we do not a corresponding 771 * DMA mapping for this incoming packet since 772 * bcm_sysport_rx_refill always either has both skb and mapping 773 * or none. 774 */ 775 if (unlikely(!skb)) { 776 netif_err(priv, rx_err, ndev, "out of memory!\n"); 777 ndev->stats.rx_dropped++; 778 ndev->stats.rx_errors++; 779 goto next; 780 } 781 782 /* Extract the Receive Status Block prepended */ 783 rsb = (struct bcm_rsb *)skb->data; 784 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; 785 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & 786 DESC_STATUS_MASK; 787 788 netif_dbg(priv, rx_status, ndev, 789 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", 790 p_index, priv->rx_c_index, priv->rx_read_ptr, 791 len, status); 792 793 if (unlikely(len > RX_BUF_LENGTH)) { 794 netif_err(priv, rx_status, ndev, "oversized packet\n"); 795 ndev->stats.rx_length_errors++; 796 ndev->stats.rx_errors++; 797 dev_kfree_skb_any(skb); 798 goto next; 799 } 800 801 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { 802 netif_err(priv, rx_status, ndev, "fragmented packet!\n"); 803 ndev->stats.rx_dropped++; 804 ndev->stats.rx_errors++; 805 dev_kfree_skb_any(skb); 806 goto next; 807 } 808 809 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) { 810 netif_err(priv, rx_err, ndev, "error packet\n"); 811 if (status & RX_STATUS_OVFLOW) 812 ndev->stats.rx_over_errors++; 813 ndev->stats.rx_dropped++; 814 ndev->stats.rx_errors++; 815 dev_kfree_skb_any(skb); 816 goto next; 817 } 818 819 skb_put(skb, len); 820 821 /* Hardware validated our checksum */ 822 if (likely(status & DESC_L4_CSUM)) 823 skb->ip_summed = CHECKSUM_UNNECESSARY; 824 825 /* Hardware pre-pends packets with 2bytes before Ethernet 826 * header plus we have the Receive Status Block, strip off all 827 * of this from the SKB. 828 */ 829 skb_pull(skb, sizeof(*rsb) + 2); 830 len -= (sizeof(*rsb) + 2); 831 processed_bytes += len; 832 833 /* UniMAC may forward CRC */ 834 if (priv->crc_fwd) { 835 skb_trim(skb, len - ETH_FCS_LEN); 836 len -= ETH_FCS_LEN; 837 } 838 839 skb->protocol = eth_type_trans(skb, ndev); 840 ndev->stats.rx_packets++; 841 ndev->stats.rx_bytes += len; 842 u64_stats_update_begin(&priv->syncp); 843 stats64->rx_packets++; 844 stats64->rx_bytes += len; 845 u64_stats_update_end(&priv->syncp); 846 847 napi_gro_receive(&priv->napi, skb); 848 next: 849 processed++; 850 priv->rx_read_ptr++; 851 852 if (priv->rx_read_ptr == priv->num_rx_bds) 853 priv->rx_read_ptr = 0; 854 } 855 856 priv->dim.packets = processed; 857 priv->dim.bytes = processed_bytes; 858 859 return processed; 860 } 861 862 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, 863 struct bcm_sysport_cb *cb, 864 unsigned int *bytes_compl, 865 unsigned int *pkts_compl) 866 { 867 struct bcm_sysport_priv *priv = ring->priv; 868 struct device *kdev = &priv->pdev->dev; 869 870 if (cb->skb) { 871 *bytes_compl += cb->skb->len; 872 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 873 dma_unmap_len(cb, dma_len), 874 DMA_TO_DEVICE); 875 (*pkts_compl)++; 876 bcm_sysport_free_cb(cb); 877 /* SKB fragment */ 878 } else if (dma_unmap_addr(cb, dma_addr)) { 879 *bytes_compl += dma_unmap_len(cb, dma_len); 880 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr), 881 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); 882 dma_unmap_addr_set(cb, dma_addr, 0); 883 } 884 } 885 886 /* Reclaim queued SKBs for transmission completion, lockless version */ 887 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 888 struct bcm_sysport_tx_ring *ring) 889 { 890 unsigned int pkts_compl = 0, bytes_compl = 0; 891 struct net_device *ndev = priv->netdev; 892 unsigned int txbds_processed = 0; 893 struct bcm_sysport_cb *cb; 894 unsigned int txbds_ready; 895 unsigned int c_index; 896 u32 hw_ind; 897 898 /* Clear status before servicing to reduce spurious interrupts */ 899 if (!ring->priv->is_lite) 900 intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR); 901 else 902 intrl2_0_writel(ring->priv, BIT(ring->index + 903 INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR); 904 905 /* Compute how many descriptors have been processed since last call */ 906 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); 907 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; 908 txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK; 909 910 netif_dbg(priv, tx_done, ndev, 911 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", 912 ring->index, ring->c_index, c_index, txbds_ready); 913 914 while (txbds_processed < txbds_ready) { 915 cb = &ring->cbs[ring->clean_index]; 916 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl); 917 918 ring->desc_count++; 919 txbds_processed++; 920 921 if (likely(ring->clean_index < ring->size - 1)) 922 ring->clean_index++; 923 else 924 ring->clean_index = 0; 925 } 926 927 u64_stats_update_begin(&priv->syncp); 928 ring->packets += pkts_compl; 929 ring->bytes += bytes_compl; 930 u64_stats_update_end(&priv->syncp); 931 932 ring->c_index = c_index; 933 934 netif_dbg(priv, tx_done, ndev, 935 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", 936 ring->index, ring->c_index, pkts_compl, bytes_compl); 937 938 return pkts_compl; 939 } 940 941 /* Locked version of the per-ring TX reclaim routine */ 942 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 943 struct bcm_sysport_tx_ring *ring) 944 { 945 struct netdev_queue *txq; 946 unsigned int released; 947 unsigned long flags; 948 949 txq = netdev_get_tx_queue(priv->netdev, ring->index); 950 951 spin_lock_irqsave(&ring->lock, flags); 952 released = __bcm_sysport_tx_reclaim(priv, ring); 953 if (released) 954 netif_tx_wake_queue(txq); 955 956 spin_unlock_irqrestore(&ring->lock, flags); 957 958 return released; 959 } 960 961 /* Locked version of the per-ring TX reclaim, but does not wake the queue */ 962 static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv, 963 struct bcm_sysport_tx_ring *ring) 964 { 965 unsigned long flags; 966 967 spin_lock_irqsave(&ring->lock, flags); 968 __bcm_sysport_tx_reclaim(priv, ring); 969 spin_unlock_irqrestore(&ring->lock, flags); 970 } 971 972 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) 973 { 974 struct bcm_sysport_tx_ring *ring = 975 container_of(napi, struct bcm_sysport_tx_ring, napi); 976 unsigned int work_done = 0; 977 978 work_done = bcm_sysport_tx_reclaim(ring->priv, ring); 979 980 if (work_done == 0) { 981 napi_complete(napi); 982 /* re-enable TX interrupt */ 983 if (!ring->priv->is_lite) 984 intrl2_1_mask_clear(ring->priv, BIT(ring->index)); 985 else 986 intrl2_0_mask_clear(ring->priv, BIT(ring->index + 987 INTRL2_0_TDMA_MBDONE_SHIFT)); 988 989 return 0; 990 } 991 992 return budget; 993 } 994 995 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) 996 { 997 unsigned int q; 998 999 for (q = 0; q < priv->netdev->num_tx_queues; q++) 1000 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]); 1001 } 1002 1003 static int bcm_sysport_poll(struct napi_struct *napi, int budget) 1004 { 1005 struct bcm_sysport_priv *priv = 1006 container_of(napi, struct bcm_sysport_priv, napi); 1007 struct net_dim_sample dim_sample; 1008 unsigned int work_done = 0; 1009 1010 work_done = bcm_sysport_desc_rx(priv, budget); 1011 1012 priv->rx_c_index += work_done; 1013 priv->rx_c_index &= RDMA_CONS_INDEX_MASK; 1014 1015 /* SYSTEMPORT Lite groups the producer/consumer index, producer is 1016 * maintained by HW, but writes to it will be ignore while RDMA 1017 * is active 1018 */ 1019 if (!priv->is_lite) 1020 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); 1021 else 1022 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX); 1023 1024 if (work_done < budget) { 1025 napi_complete_done(napi, work_done); 1026 /* re-enable RX interrupts */ 1027 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE); 1028 } 1029 1030 if (priv->dim.use_dim) { 1031 net_dim_sample(priv->dim.event_ctr, priv->dim.packets, 1032 priv->dim.bytes, &dim_sample); 1033 net_dim(&priv->dim.dim, dim_sample); 1034 } 1035 1036 return work_done; 1037 } 1038 1039 static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable) 1040 { 1041 u32 reg, bit; 1042 1043 reg = umac_readl(priv, UMAC_MPD_CTRL); 1044 if (enable) 1045 reg |= MPD_EN; 1046 else 1047 reg &= ~MPD_EN; 1048 umac_writel(priv, reg, UMAC_MPD_CTRL); 1049 1050 if (priv->is_lite) 1051 bit = RBUF_ACPI_EN_LITE; 1052 else 1053 bit = RBUF_ACPI_EN; 1054 1055 reg = rbuf_readl(priv, RBUF_CONTROL); 1056 if (enable) 1057 reg |= bit; 1058 else 1059 reg &= ~bit; 1060 rbuf_writel(priv, reg, RBUF_CONTROL); 1061 } 1062 1063 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) 1064 { 1065 unsigned int index; 1066 u32 reg; 1067 1068 /* Disable RXCHK, active filters and Broadcom tag matching */ 1069 reg = rxchk_readl(priv, RXCHK_CONTROL); 1070 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK << 1071 RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN); 1072 rxchk_writel(priv, reg, RXCHK_CONTROL); 1073 1074 /* Make sure we restore correct CID index in case HW lost 1075 * its context during deep idle state 1076 */ 1077 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { 1078 rxchk_writel(priv, priv->filters_loc[index] << 1079 RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index)); 1080 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index)); 1081 } 1082 1083 /* Clear the MagicPacket detection logic */ 1084 mpd_enable_set(priv, false); 1085 1086 reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS); 1087 if (reg & INTRL2_0_MPD) 1088 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n"); 1089 1090 if (reg & INTRL2_0_BRCM_MATCH_TAG) { 1091 reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) & 1092 RXCHK_BRCM_TAG_MATCH_MASK; 1093 netdev_info(priv->netdev, 1094 "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg); 1095 } 1096 1097 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n"); 1098 } 1099 1100 static void bcm_sysport_dim_work(struct work_struct *work) 1101 { 1102 struct net_dim *dim = container_of(work, struct net_dim, work); 1103 struct bcm_sysport_net_dim *ndim = 1104 container_of(dim, struct bcm_sysport_net_dim, dim); 1105 struct bcm_sysport_priv *priv = 1106 container_of(ndim, struct bcm_sysport_priv, dim); 1107 struct net_dim_cq_moder cur_profile = 1108 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 1109 1110 bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts); 1111 dim->state = NET_DIM_START_MEASURE; 1112 } 1113 1114 /* RX and misc interrupt routine */ 1115 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) 1116 { 1117 struct net_device *dev = dev_id; 1118 struct bcm_sysport_priv *priv = netdev_priv(dev); 1119 struct bcm_sysport_tx_ring *txr; 1120 unsigned int ring, ring_bit; 1121 1122 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & 1123 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); 1124 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 1125 1126 if (unlikely(priv->irq0_stat == 0)) { 1127 netdev_warn(priv->netdev, "spurious RX interrupt\n"); 1128 return IRQ_NONE; 1129 } 1130 1131 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) { 1132 priv->dim.event_ctr++; 1133 if (likely(napi_schedule_prep(&priv->napi))) { 1134 /* disable RX interrupts */ 1135 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE); 1136 __napi_schedule_irqoff(&priv->napi); 1137 } 1138 } 1139 1140 /* TX ring is full, perform a full reclaim since we do not know 1141 * which one would trigger this interrupt 1142 */ 1143 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL) 1144 bcm_sysport_tx_reclaim_all(priv); 1145 1146 if (!priv->is_lite) 1147 goto out; 1148 1149 for (ring = 0; ring < dev->num_tx_queues; ring++) { 1150 ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT); 1151 if (!(priv->irq0_stat & ring_bit)) 1152 continue; 1153 1154 txr = &priv->tx_rings[ring]; 1155 1156 if (likely(napi_schedule_prep(&txr->napi))) { 1157 intrl2_0_mask_set(priv, ring_bit); 1158 __napi_schedule(&txr->napi); 1159 } 1160 } 1161 out: 1162 return IRQ_HANDLED; 1163 } 1164 1165 /* TX interrupt service routine */ 1166 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id) 1167 { 1168 struct net_device *dev = dev_id; 1169 struct bcm_sysport_priv *priv = netdev_priv(dev); 1170 struct bcm_sysport_tx_ring *txr; 1171 unsigned int ring; 1172 1173 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & 1174 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); 1175 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1176 1177 if (unlikely(priv->irq1_stat == 0)) { 1178 netdev_warn(priv->netdev, "spurious TX interrupt\n"); 1179 return IRQ_NONE; 1180 } 1181 1182 for (ring = 0; ring < dev->num_tx_queues; ring++) { 1183 if (!(priv->irq1_stat & BIT(ring))) 1184 continue; 1185 1186 txr = &priv->tx_rings[ring]; 1187 1188 if (likely(napi_schedule_prep(&txr->napi))) { 1189 intrl2_1_mask_set(priv, BIT(ring)); 1190 __napi_schedule_irqoff(&txr->napi); 1191 } 1192 } 1193 1194 return IRQ_HANDLED; 1195 } 1196 1197 static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id) 1198 { 1199 struct bcm_sysport_priv *priv = dev_id; 1200 1201 pm_wakeup_event(&priv->pdev->dev, 0); 1202 1203 return IRQ_HANDLED; 1204 } 1205 1206 #ifdef CONFIG_NET_POLL_CONTROLLER 1207 static void bcm_sysport_poll_controller(struct net_device *dev) 1208 { 1209 struct bcm_sysport_priv *priv = netdev_priv(dev); 1210 1211 disable_irq(priv->irq0); 1212 bcm_sysport_rx_isr(priv->irq0, priv); 1213 enable_irq(priv->irq0); 1214 1215 if (!priv->is_lite) { 1216 disable_irq(priv->irq1); 1217 bcm_sysport_tx_isr(priv->irq1, priv); 1218 enable_irq(priv->irq1); 1219 } 1220 } 1221 #endif 1222 1223 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, 1224 struct net_device *dev) 1225 { 1226 struct bcm_sysport_priv *priv = netdev_priv(dev); 1227 struct sk_buff *nskb; 1228 struct bcm_tsb *tsb; 1229 u32 csum_info; 1230 u8 ip_proto; 1231 u16 csum_start; 1232 __be16 ip_ver; 1233 1234 /* Re-allocate SKB if needed */ 1235 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) { 1236 nskb = skb_realloc_headroom(skb, sizeof(*tsb)); 1237 if (!nskb) { 1238 dev_kfree_skb_any(skb); 1239 priv->mib.tx_realloc_tsb_failed++; 1240 dev->stats.tx_errors++; 1241 dev->stats.tx_dropped++; 1242 return NULL; 1243 } 1244 dev_consume_skb_any(skb); 1245 skb = nskb; 1246 priv->mib.tx_realloc_tsb++; 1247 } 1248 1249 tsb = skb_push(skb, sizeof(*tsb)); 1250 /* Zero-out TSB by default */ 1251 memset(tsb, 0, sizeof(*tsb)); 1252 1253 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1254 ip_ver = skb->protocol; 1255 switch (ip_ver) { 1256 case htons(ETH_P_IP): 1257 ip_proto = ip_hdr(skb)->protocol; 1258 break; 1259 case htons(ETH_P_IPV6): 1260 ip_proto = ipv6_hdr(skb)->nexthdr; 1261 break; 1262 default: 1263 return skb; 1264 } 1265 1266 /* Get the checksum offset and the L4 (transport) offset */ 1267 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb); 1268 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK; 1269 csum_info |= (csum_start << L4_PTR_SHIFT); 1270 1271 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { 1272 csum_info |= L4_LENGTH_VALID; 1273 if (ip_proto == IPPROTO_UDP && 1274 ip_ver == htons(ETH_P_IP)) 1275 csum_info |= L4_UDP; 1276 } else { 1277 csum_info = 0; 1278 } 1279 1280 tsb->l4_ptr_dest_map = csum_info; 1281 } 1282 1283 return skb; 1284 } 1285 1286 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, 1287 struct net_device *dev) 1288 { 1289 struct bcm_sysport_priv *priv = netdev_priv(dev); 1290 struct device *kdev = &priv->pdev->dev; 1291 struct bcm_sysport_tx_ring *ring; 1292 struct bcm_sysport_cb *cb; 1293 struct netdev_queue *txq; 1294 struct dma_desc *desc; 1295 unsigned int skb_len; 1296 unsigned long flags; 1297 dma_addr_t mapping; 1298 u32 len_status; 1299 u16 queue; 1300 int ret; 1301 1302 queue = skb_get_queue_mapping(skb); 1303 txq = netdev_get_tx_queue(dev, queue); 1304 ring = &priv->tx_rings[queue]; 1305 1306 /* lock against tx reclaim in BH context and TX ring full interrupt */ 1307 spin_lock_irqsave(&ring->lock, flags); 1308 if (unlikely(ring->desc_count == 0)) { 1309 netif_tx_stop_queue(txq); 1310 netdev_err(dev, "queue %d awake and ring full!\n", queue); 1311 ret = NETDEV_TX_BUSY; 1312 goto out; 1313 } 1314 1315 /* Insert TSB and checksum infos */ 1316 if (priv->tsb_en) { 1317 skb = bcm_sysport_insert_tsb(skb, dev); 1318 if (!skb) { 1319 ret = NETDEV_TX_OK; 1320 goto out; 1321 } 1322 } 1323 1324 skb_len = skb->len; 1325 1326 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); 1327 if (dma_mapping_error(kdev, mapping)) { 1328 priv->mib.tx_dma_failed++; 1329 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n", 1330 skb->data, skb_len); 1331 ret = NETDEV_TX_OK; 1332 goto out; 1333 } 1334 1335 /* Remember the SKB for future freeing */ 1336 cb = &ring->cbs[ring->curr_desc]; 1337 cb->skb = skb; 1338 dma_unmap_addr_set(cb, dma_addr, mapping); 1339 dma_unmap_len_set(cb, dma_len, skb_len); 1340 1341 /* Fetch a descriptor entry from our pool */ 1342 desc = ring->desc_cpu; 1343 1344 desc->addr_lo = lower_32_bits(mapping); 1345 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK; 1346 len_status |= (skb_len << DESC_LEN_SHIFT); 1347 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) << 1348 DESC_STATUS_SHIFT; 1349 if (skb->ip_summed == CHECKSUM_PARTIAL) 1350 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT); 1351 1352 ring->curr_desc++; 1353 if (ring->curr_desc == ring->size) 1354 ring->curr_desc = 0; 1355 ring->desc_count--; 1356 1357 /* Ensure write completion of the descriptor status/length 1358 * in DRAM before the System Port WRITE_PORT register latches 1359 * the value 1360 */ 1361 wmb(); 1362 desc->addr_status_len = len_status; 1363 wmb(); 1364 1365 /* Write this descriptor address to the RING write port */ 1366 tdma_port_write_desc_addr(priv, desc, ring->index); 1367 1368 /* Check ring space and update SW control flow */ 1369 if (ring->desc_count == 0) 1370 netif_tx_stop_queue(txq); 1371 1372 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n", 1373 ring->index, ring->desc_count, ring->curr_desc); 1374 1375 ret = NETDEV_TX_OK; 1376 out: 1377 spin_unlock_irqrestore(&ring->lock, flags); 1378 return ret; 1379 } 1380 1381 static void bcm_sysport_tx_timeout(struct net_device *dev) 1382 { 1383 netdev_warn(dev, "transmit timeout!\n"); 1384 1385 netif_trans_update(dev); 1386 dev->stats.tx_errors++; 1387 1388 netif_tx_wake_all_queues(dev); 1389 } 1390 1391 /* phylib adjust link callback */ 1392 static void bcm_sysport_adj_link(struct net_device *dev) 1393 { 1394 struct bcm_sysport_priv *priv = netdev_priv(dev); 1395 struct phy_device *phydev = dev->phydev; 1396 unsigned int changed = 0; 1397 u32 cmd_bits = 0, reg; 1398 1399 if (priv->old_link != phydev->link) { 1400 changed = 1; 1401 priv->old_link = phydev->link; 1402 } 1403 1404 if (priv->old_duplex != phydev->duplex) { 1405 changed = 1; 1406 priv->old_duplex = phydev->duplex; 1407 } 1408 1409 if (priv->is_lite) 1410 goto out; 1411 1412 switch (phydev->speed) { 1413 case SPEED_2500: 1414 cmd_bits = CMD_SPEED_2500; 1415 break; 1416 case SPEED_1000: 1417 cmd_bits = CMD_SPEED_1000; 1418 break; 1419 case SPEED_100: 1420 cmd_bits = CMD_SPEED_100; 1421 break; 1422 case SPEED_10: 1423 cmd_bits = CMD_SPEED_10; 1424 break; 1425 default: 1426 break; 1427 } 1428 cmd_bits <<= CMD_SPEED_SHIFT; 1429 1430 if (phydev->duplex == DUPLEX_HALF) 1431 cmd_bits |= CMD_HD_EN; 1432 1433 if (priv->old_pause != phydev->pause) { 1434 changed = 1; 1435 priv->old_pause = phydev->pause; 1436 } 1437 1438 if (!phydev->pause) 1439 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; 1440 1441 if (!changed) 1442 return; 1443 1444 if (phydev->link) { 1445 reg = umac_readl(priv, UMAC_CMD); 1446 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | 1447 CMD_HD_EN | CMD_RX_PAUSE_IGNORE | 1448 CMD_TX_PAUSE_IGNORE); 1449 reg |= cmd_bits; 1450 umac_writel(priv, reg, UMAC_CMD); 1451 } 1452 out: 1453 if (changed) 1454 phy_print_status(phydev); 1455 } 1456 1457 static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv, 1458 void (*cb)(struct work_struct *work)) 1459 { 1460 struct bcm_sysport_net_dim *dim = &priv->dim; 1461 1462 INIT_WORK(&dim->dim.work, cb); 1463 dim->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; 1464 dim->event_ctr = 0; 1465 dim->packets = 0; 1466 dim->bytes = 0; 1467 } 1468 1469 static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv) 1470 { 1471 struct bcm_sysport_net_dim *dim = &priv->dim; 1472 struct net_dim_cq_moder moder; 1473 u32 usecs, pkts; 1474 1475 usecs = priv->rx_coalesce_usecs; 1476 pkts = priv->rx_max_coalesced_frames; 1477 1478 /* If DIM was enabled, re-apply default parameters */ 1479 if (dim->use_dim) { 1480 moder = net_dim_get_def_rx_moderation(dim->dim.mode); 1481 usecs = moder.usec; 1482 pkts = moder.pkts; 1483 } 1484 1485 bcm_sysport_set_rx_coalesce(priv, usecs, pkts); 1486 } 1487 1488 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, 1489 unsigned int index) 1490 { 1491 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; 1492 struct device *kdev = &priv->pdev->dev; 1493 size_t size; 1494 void *p; 1495 u32 reg; 1496 1497 /* Simple descriptors partitioning for now */ 1498 size = 256; 1499 1500 /* We just need one DMA descriptor which is DMA-able, since writing to 1501 * the port will allocate a new descriptor in its internal linked-list 1502 */ 1503 p = dma_alloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, 1504 GFP_KERNEL); 1505 if (!p) { 1506 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); 1507 return -ENOMEM; 1508 } 1509 1510 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL); 1511 if (!ring->cbs) { 1512 dma_free_coherent(kdev, sizeof(struct dma_desc), 1513 ring->desc_cpu, ring->desc_dma); 1514 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1515 return -ENOMEM; 1516 } 1517 1518 /* Initialize SW view of the ring */ 1519 spin_lock_init(&ring->lock); 1520 ring->priv = priv; 1521 netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); 1522 ring->index = index; 1523 ring->size = size; 1524 ring->clean_index = 0; 1525 ring->alloc_size = ring->size; 1526 ring->desc_cpu = p; 1527 ring->desc_count = ring->size; 1528 ring->curr_desc = 0; 1529 1530 /* Initialize HW ring */ 1531 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index)); 1532 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index)); 1533 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index)); 1534 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index)); 1535 1536 /* Configure QID and port mapping */ 1537 reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index)); 1538 reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT); 1539 if (ring->inspect) { 1540 reg |= ring->switch_queue & RING_QID_MASK; 1541 reg |= ring->switch_port << RING_PORT_ID_SHIFT; 1542 } else { 1543 reg |= RING_IGNORE_STATUS; 1544 } 1545 tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index)); 1546 tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index)); 1547 1548 /* Enable ACB algorithm 2 */ 1549 reg = tdma_readl(priv, TDMA_CONTROL); 1550 reg |= tdma_control_bit(priv, ACB_ALGO); 1551 tdma_writel(priv, reg, TDMA_CONTROL); 1552 1553 /* Do not use tdma_control_bit() here because TSB_SWAP1 collides 1554 * with the original definition of ACB_ALGO 1555 */ 1556 reg = tdma_readl(priv, TDMA_CONTROL); 1557 if (priv->is_lite) 1558 reg &= ~BIT(TSB_SWAP1); 1559 /* Set a correct TSB format based on host endian */ 1560 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 1561 reg |= tdma_control_bit(priv, TSB_SWAP0); 1562 else 1563 reg &= ~tdma_control_bit(priv, TSB_SWAP0); 1564 tdma_writel(priv, reg, TDMA_CONTROL); 1565 1566 /* Program the number of descriptors as MAX_THRESHOLD and half of 1567 * its size for the hysteresis trigger 1568 */ 1569 tdma_writel(priv, ring->size | 1570 1 << RING_HYST_THRESH_SHIFT, 1571 TDMA_DESC_RING_MAX_HYST(index)); 1572 1573 /* Enable the ring queue in the arbiter */ 1574 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN); 1575 reg |= (1 << index); 1576 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN); 1577 1578 napi_enable(&ring->napi); 1579 1580 netif_dbg(priv, hw, priv->netdev, 1581 "TDMA cfg, size=%d, desc_cpu=%p switch q=%d,port=%d\n", 1582 ring->size, ring->desc_cpu, ring->switch_queue, 1583 ring->switch_port); 1584 1585 return 0; 1586 } 1587 1588 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, 1589 unsigned int index) 1590 { 1591 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; 1592 struct device *kdev = &priv->pdev->dev; 1593 u32 reg; 1594 1595 /* Caller should stop the TDMA engine */ 1596 reg = tdma_readl(priv, TDMA_STATUS); 1597 if (!(reg & TDMA_DISABLED)) 1598 netdev_warn(priv->netdev, "TDMA not stopped!\n"); 1599 1600 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could 1601 * fail, so by checking this pointer we know whether the TX ring was 1602 * fully initialized or not. 1603 */ 1604 if (!ring->cbs) 1605 return; 1606 1607 napi_disable(&ring->napi); 1608 netif_napi_del(&ring->napi); 1609 1610 bcm_sysport_tx_clean(priv, ring); 1611 1612 kfree(ring->cbs); 1613 ring->cbs = NULL; 1614 1615 if (ring->desc_dma) { 1616 dma_free_coherent(kdev, sizeof(struct dma_desc), 1617 ring->desc_cpu, ring->desc_dma); 1618 ring->desc_dma = 0; 1619 } 1620 ring->size = 0; 1621 ring->alloc_size = 0; 1622 1623 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n"); 1624 } 1625 1626 /* RDMA helper */ 1627 static inline int rdma_enable_set(struct bcm_sysport_priv *priv, 1628 unsigned int enable) 1629 { 1630 unsigned int timeout = 1000; 1631 u32 reg; 1632 1633 reg = rdma_readl(priv, RDMA_CONTROL); 1634 if (enable) 1635 reg |= RDMA_EN; 1636 else 1637 reg &= ~RDMA_EN; 1638 rdma_writel(priv, reg, RDMA_CONTROL); 1639 1640 /* Poll for RMDA disabling completion */ 1641 do { 1642 reg = rdma_readl(priv, RDMA_STATUS); 1643 if (!!(reg & RDMA_DISABLED) == !enable) 1644 return 0; 1645 usleep_range(1000, 2000); 1646 } while (timeout-- > 0); 1647 1648 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n"); 1649 1650 return -ETIMEDOUT; 1651 } 1652 1653 /* TDMA helper */ 1654 static inline int tdma_enable_set(struct bcm_sysport_priv *priv, 1655 unsigned int enable) 1656 { 1657 unsigned int timeout = 1000; 1658 u32 reg; 1659 1660 reg = tdma_readl(priv, TDMA_CONTROL); 1661 if (enable) 1662 reg |= tdma_control_bit(priv, TDMA_EN); 1663 else 1664 reg &= ~tdma_control_bit(priv, TDMA_EN); 1665 tdma_writel(priv, reg, TDMA_CONTROL); 1666 1667 /* Poll for TMDA disabling completion */ 1668 do { 1669 reg = tdma_readl(priv, TDMA_STATUS); 1670 if (!!(reg & TDMA_DISABLED) == !enable) 1671 return 0; 1672 1673 usleep_range(1000, 2000); 1674 } while (timeout-- > 0); 1675 1676 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n"); 1677 1678 return -ETIMEDOUT; 1679 } 1680 1681 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) 1682 { 1683 struct bcm_sysport_cb *cb; 1684 u32 reg; 1685 int ret; 1686 int i; 1687 1688 /* Initialize SW view of the RX ring */ 1689 priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC; 1690 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; 1691 priv->rx_c_index = 0; 1692 priv->rx_read_ptr = 0; 1693 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb), 1694 GFP_KERNEL); 1695 if (!priv->rx_cbs) { 1696 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1697 return -ENOMEM; 1698 } 1699 1700 for (i = 0; i < priv->num_rx_bds; i++) { 1701 cb = priv->rx_cbs + i; 1702 cb->bd_addr = priv->rx_bds + i * DESC_SIZE; 1703 } 1704 1705 ret = bcm_sysport_alloc_rx_bufs(priv); 1706 if (ret) { 1707 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n"); 1708 return ret; 1709 } 1710 1711 /* Initialize HW, ensure RDMA is disabled */ 1712 reg = rdma_readl(priv, RDMA_STATUS); 1713 if (!(reg & RDMA_DISABLED)) 1714 rdma_enable_set(priv, 0); 1715 1716 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO); 1717 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI); 1718 rdma_writel(priv, 0, RDMA_PROD_INDEX); 1719 rdma_writel(priv, 0, RDMA_CONS_INDEX); 1720 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT | 1721 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE); 1722 /* Operate the queue in ring mode */ 1723 rdma_writel(priv, 0, RDMA_START_ADDR_HI); 1724 rdma_writel(priv, 0, RDMA_START_ADDR_LO); 1725 rdma_writel(priv, 0, RDMA_END_ADDR_HI); 1726 rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO); 1727 1728 netif_dbg(priv, hw, priv->netdev, 1729 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n", 1730 priv->num_rx_bds, priv->rx_bds); 1731 1732 return 0; 1733 } 1734 1735 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv) 1736 { 1737 struct bcm_sysport_cb *cb; 1738 unsigned int i; 1739 u32 reg; 1740 1741 /* Caller should ensure RDMA is disabled */ 1742 reg = rdma_readl(priv, RDMA_STATUS); 1743 if (!(reg & RDMA_DISABLED)) 1744 netdev_warn(priv->netdev, "RDMA not stopped!\n"); 1745 1746 for (i = 0; i < priv->num_rx_bds; i++) { 1747 cb = &priv->rx_cbs[i]; 1748 if (dma_unmap_addr(cb, dma_addr)) 1749 dma_unmap_single(&priv->pdev->dev, 1750 dma_unmap_addr(cb, dma_addr), 1751 RX_BUF_LENGTH, DMA_FROM_DEVICE); 1752 bcm_sysport_free_cb(cb); 1753 } 1754 1755 kfree(priv->rx_cbs); 1756 priv->rx_cbs = NULL; 1757 1758 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n"); 1759 } 1760 1761 static void bcm_sysport_set_rx_mode(struct net_device *dev) 1762 { 1763 struct bcm_sysport_priv *priv = netdev_priv(dev); 1764 u32 reg; 1765 1766 if (priv->is_lite) 1767 return; 1768 1769 reg = umac_readl(priv, UMAC_CMD); 1770 if (dev->flags & IFF_PROMISC) 1771 reg |= CMD_PROMISC; 1772 else 1773 reg &= ~CMD_PROMISC; 1774 umac_writel(priv, reg, UMAC_CMD); 1775 1776 /* No support for ALLMULTI */ 1777 if (dev->flags & IFF_ALLMULTI) 1778 return; 1779 } 1780 1781 static inline void umac_enable_set(struct bcm_sysport_priv *priv, 1782 u32 mask, unsigned int enable) 1783 { 1784 u32 reg; 1785 1786 if (!priv->is_lite) { 1787 reg = umac_readl(priv, UMAC_CMD); 1788 if (enable) 1789 reg |= mask; 1790 else 1791 reg &= ~mask; 1792 umac_writel(priv, reg, UMAC_CMD); 1793 } else { 1794 reg = gib_readl(priv, GIB_CONTROL); 1795 if (enable) 1796 reg |= mask; 1797 else 1798 reg &= ~mask; 1799 gib_writel(priv, reg, GIB_CONTROL); 1800 } 1801 1802 /* UniMAC stops on a packet boundary, wait for a full-sized packet 1803 * to be processed (1 msec). 1804 */ 1805 if (enable == 0) 1806 usleep_range(1000, 2000); 1807 } 1808 1809 static inline void umac_reset(struct bcm_sysport_priv *priv) 1810 { 1811 u32 reg; 1812 1813 if (priv->is_lite) 1814 return; 1815 1816 reg = umac_readl(priv, UMAC_CMD); 1817 reg |= CMD_SW_RESET; 1818 umac_writel(priv, reg, UMAC_CMD); 1819 udelay(10); 1820 reg = umac_readl(priv, UMAC_CMD); 1821 reg &= ~CMD_SW_RESET; 1822 umac_writel(priv, reg, UMAC_CMD); 1823 } 1824 1825 static void umac_set_hw_addr(struct bcm_sysport_priv *priv, 1826 unsigned char *addr) 1827 { 1828 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | 1829 addr[3]; 1830 u32 mac1 = (addr[4] << 8) | addr[5]; 1831 1832 if (!priv->is_lite) { 1833 umac_writel(priv, mac0, UMAC_MAC0); 1834 umac_writel(priv, mac1, UMAC_MAC1); 1835 } else { 1836 gib_writel(priv, mac0, GIB_MAC0); 1837 gib_writel(priv, mac1, GIB_MAC1); 1838 } 1839 } 1840 1841 static void topctrl_flush(struct bcm_sysport_priv *priv) 1842 { 1843 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); 1844 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); 1845 mdelay(1); 1846 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 1847 topctrl_writel(priv, 0, TX_FLUSH_CNTL); 1848 } 1849 1850 static int bcm_sysport_change_mac(struct net_device *dev, void *p) 1851 { 1852 struct bcm_sysport_priv *priv = netdev_priv(dev); 1853 struct sockaddr *addr = p; 1854 1855 if (!is_valid_ether_addr(addr->sa_data)) 1856 return -EINVAL; 1857 1858 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1859 1860 /* interface is disabled, changes to MAC will be reflected on next 1861 * open call 1862 */ 1863 if (!netif_running(dev)) 1864 return 0; 1865 1866 umac_set_hw_addr(priv, dev->dev_addr); 1867 1868 return 0; 1869 } 1870 1871 static void bcm_sysport_get_stats64(struct net_device *dev, 1872 struct rtnl_link_stats64 *stats) 1873 { 1874 struct bcm_sysport_priv *priv = netdev_priv(dev); 1875 struct bcm_sysport_stats64 *stats64 = &priv->stats64; 1876 unsigned int start; 1877 1878 netdev_stats_to_stats64(stats, &dev->stats); 1879 1880 bcm_sysport_update_tx_stats(priv, &stats->tx_bytes, 1881 &stats->tx_packets); 1882 1883 do { 1884 start = u64_stats_fetch_begin_irq(&priv->syncp); 1885 stats->rx_packets = stats64->rx_packets; 1886 stats->rx_bytes = stats64->rx_bytes; 1887 } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); 1888 } 1889 1890 static void bcm_sysport_netif_start(struct net_device *dev) 1891 { 1892 struct bcm_sysport_priv *priv = netdev_priv(dev); 1893 1894 /* Enable NAPI */ 1895 bcm_sysport_init_dim(priv, bcm_sysport_dim_work); 1896 bcm_sysport_init_rx_coalesce(priv); 1897 napi_enable(&priv->napi); 1898 1899 /* Enable RX interrupt and TX ring full interrupt */ 1900 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 1901 1902 phy_start(dev->phydev); 1903 1904 /* Enable TX interrupts for the TXQs */ 1905 if (!priv->is_lite) 1906 intrl2_1_mask_clear(priv, 0xffffffff); 1907 else 1908 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK); 1909 } 1910 1911 static void rbuf_init(struct bcm_sysport_priv *priv) 1912 { 1913 u32 reg; 1914 1915 reg = rbuf_readl(priv, RBUF_CONTROL); 1916 reg |= RBUF_4B_ALGN | RBUF_RSB_EN; 1917 /* Set a correct RSB format on SYSTEMPORT Lite */ 1918 if (priv->is_lite) 1919 reg &= ~RBUF_RSB_SWAP1; 1920 1921 /* Set a correct RSB format based on host endian */ 1922 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 1923 reg |= RBUF_RSB_SWAP0; 1924 else 1925 reg &= ~RBUF_RSB_SWAP0; 1926 rbuf_writel(priv, reg, RBUF_CONTROL); 1927 } 1928 1929 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv) 1930 { 1931 intrl2_0_mask_set(priv, 0xffffffff); 1932 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1933 if (!priv->is_lite) { 1934 intrl2_1_mask_set(priv, 0xffffffff); 1935 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1936 } 1937 } 1938 1939 static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv) 1940 { 1941 u32 reg; 1942 1943 reg = gib_readl(priv, GIB_CONTROL); 1944 /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */ 1945 if (netdev_uses_dsa(priv->netdev)) { 1946 reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT); 1947 reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT; 1948 } 1949 reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT); 1950 reg |= 12 << GIB_IPG_LEN_SHIFT; 1951 gib_writel(priv, reg, GIB_CONTROL); 1952 } 1953 1954 static int bcm_sysport_open(struct net_device *dev) 1955 { 1956 struct bcm_sysport_priv *priv = netdev_priv(dev); 1957 struct phy_device *phydev; 1958 unsigned int i; 1959 int ret; 1960 1961 /* Reset UniMAC */ 1962 umac_reset(priv); 1963 1964 /* Flush TX and RX FIFOs at TOPCTRL level */ 1965 topctrl_flush(priv); 1966 1967 /* Disable the UniMAC RX/TX */ 1968 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0); 1969 1970 /* Enable RBUF 2bytes alignment and Receive Status Block */ 1971 rbuf_init(priv); 1972 1973 /* Set maximum frame length */ 1974 if (!priv->is_lite) 1975 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 1976 else 1977 gib_set_pad_extension(priv); 1978 1979 /* Apply features again in case we changed them while interface was 1980 * down 1981 */ 1982 bcm_sysport_set_features(dev, dev->features); 1983 1984 /* Set MAC address */ 1985 umac_set_hw_addr(priv, dev->dev_addr); 1986 1987 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 1988 0, priv->phy_interface); 1989 if (!phydev) { 1990 netdev_err(dev, "could not attach to PHY\n"); 1991 return -ENODEV; 1992 } 1993 1994 /* Reset house keeping link status */ 1995 priv->old_duplex = -1; 1996 priv->old_link = -1; 1997 priv->old_pause = -1; 1998 1999 /* mask all interrupts and request them */ 2000 bcm_sysport_mask_all_intrs(priv); 2001 2002 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev); 2003 if (ret) { 2004 netdev_err(dev, "failed to request RX interrupt\n"); 2005 goto out_phy_disconnect; 2006 } 2007 2008 if (!priv->is_lite) { 2009 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, 2010 dev->name, dev); 2011 if (ret) { 2012 netdev_err(dev, "failed to request TX interrupt\n"); 2013 goto out_free_irq0; 2014 } 2015 } 2016 2017 /* Initialize both hardware and software ring */ 2018 for (i = 0; i < dev->num_tx_queues; i++) { 2019 ret = bcm_sysport_init_tx_ring(priv, i); 2020 if (ret) { 2021 netdev_err(dev, "failed to initialize TX ring %d\n", 2022 i); 2023 goto out_free_tx_ring; 2024 } 2025 } 2026 2027 /* Initialize linked-list */ 2028 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); 2029 2030 /* Initialize RX ring */ 2031 ret = bcm_sysport_init_rx_ring(priv); 2032 if (ret) { 2033 netdev_err(dev, "failed to initialize RX ring\n"); 2034 goto out_free_rx_ring; 2035 } 2036 2037 /* Turn on RDMA */ 2038 ret = rdma_enable_set(priv, 1); 2039 if (ret) 2040 goto out_free_rx_ring; 2041 2042 /* Turn on TDMA */ 2043 ret = tdma_enable_set(priv, 1); 2044 if (ret) 2045 goto out_clear_rx_int; 2046 2047 /* Turn on UniMAC TX/RX */ 2048 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1); 2049 2050 bcm_sysport_netif_start(dev); 2051 2052 netif_tx_start_all_queues(dev); 2053 2054 return 0; 2055 2056 out_clear_rx_int: 2057 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 2058 out_free_rx_ring: 2059 bcm_sysport_fini_rx_ring(priv); 2060 out_free_tx_ring: 2061 for (i = 0; i < dev->num_tx_queues; i++) 2062 bcm_sysport_fini_tx_ring(priv, i); 2063 if (!priv->is_lite) 2064 free_irq(priv->irq1, dev); 2065 out_free_irq0: 2066 free_irq(priv->irq0, dev); 2067 out_phy_disconnect: 2068 phy_disconnect(phydev); 2069 return ret; 2070 } 2071 2072 static void bcm_sysport_netif_stop(struct net_device *dev) 2073 { 2074 struct bcm_sysport_priv *priv = netdev_priv(dev); 2075 2076 /* stop all software from updating hardware */ 2077 netif_tx_disable(dev); 2078 napi_disable(&priv->napi); 2079 cancel_work_sync(&priv->dim.dim.work); 2080 phy_stop(dev->phydev); 2081 2082 /* mask all interrupts */ 2083 bcm_sysport_mask_all_intrs(priv); 2084 } 2085 2086 static int bcm_sysport_stop(struct net_device *dev) 2087 { 2088 struct bcm_sysport_priv *priv = netdev_priv(dev); 2089 unsigned int i; 2090 int ret; 2091 2092 bcm_sysport_netif_stop(dev); 2093 2094 /* Disable UniMAC RX */ 2095 umac_enable_set(priv, CMD_RX_EN, 0); 2096 2097 ret = tdma_enable_set(priv, 0); 2098 if (ret) { 2099 netdev_err(dev, "timeout disabling RDMA\n"); 2100 return ret; 2101 } 2102 2103 /* Wait for a maximum packet size to be drained */ 2104 usleep_range(2000, 3000); 2105 2106 ret = rdma_enable_set(priv, 0); 2107 if (ret) { 2108 netdev_err(dev, "timeout disabling TDMA\n"); 2109 return ret; 2110 } 2111 2112 /* Disable UniMAC TX */ 2113 umac_enable_set(priv, CMD_TX_EN, 0); 2114 2115 /* Free RX/TX rings SW structures */ 2116 for (i = 0; i < dev->num_tx_queues; i++) 2117 bcm_sysport_fini_tx_ring(priv, i); 2118 bcm_sysport_fini_rx_ring(priv); 2119 2120 free_irq(priv->irq0, dev); 2121 if (!priv->is_lite) 2122 free_irq(priv->irq1, dev); 2123 2124 /* Disconnect from PHY */ 2125 phy_disconnect(dev->phydev); 2126 2127 return 0; 2128 } 2129 2130 static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv, 2131 u64 location) 2132 { 2133 unsigned int index; 2134 u32 reg; 2135 2136 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { 2137 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index)); 2138 reg >>= RXCHK_BRCM_TAG_CID_SHIFT; 2139 reg &= RXCHK_BRCM_TAG_CID_MASK; 2140 if (reg == location) 2141 return index; 2142 } 2143 2144 return -EINVAL; 2145 } 2146 2147 static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv, 2148 struct ethtool_rxnfc *nfc) 2149 { 2150 int index; 2151 2152 /* This is not a rule that we know about */ 2153 index = bcm_sysport_rule_find(priv, nfc->fs.location); 2154 if (index < 0) 2155 return -EOPNOTSUPP; 2156 2157 nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE; 2158 2159 return 0; 2160 } 2161 2162 static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv, 2163 struct ethtool_rxnfc *nfc) 2164 { 2165 unsigned int index; 2166 u32 reg; 2167 2168 /* We cannot match locations greater than what the classification ID 2169 * permits (256 entries) 2170 */ 2171 if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK) 2172 return -E2BIG; 2173 2174 /* We cannot support flows that are not destined for a wake-up */ 2175 if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE) 2176 return -EOPNOTSUPP; 2177 2178 /* All filters are already in use, we cannot match more rules */ 2179 if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) == 2180 RXCHK_BRCM_TAG_MAX) 2181 return -ENOSPC; 2182 2183 index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX); 2184 if (index > RXCHK_BRCM_TAG_MAX) 2185 return -ENOSPC; 2186 2187 /* Location is the classification ID, and index is the position 2188 * within one of our 8 possible filters to be programmed 2189 */ 2190 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index)); 2191 reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT); 2192 reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT; 2193 rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index)); 2194 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index)); 2195 2196 priv->filters_loc[index] = nfc->fs.location; 2197 set_bit(index, priv->filters); 2198 2199 return 0; 2200 } 2201 2202 static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv, 2203 u64 location) 2204 { 2205 int index; 2206 2207 /* This is not a rule that we know about */ 2208 index = bcm_sysport_rule_find(priv, location); 2209 if (index < 0) 2210 return -EOPNOTSUPP; 2211 2212 /* No need to disable this filter if it was enabled, this will 2213 * be taken care of during suspend time by bcm_sysport_suspend_to_wol 2214 */ 2215 clear_bit(index, priv->filters); 2216 priv->filters_loc[index] = 0; 2217 2218 return 0; 2219 } 2220 2221 static int bcm_sysport_get_rxnfc(struct net_device *dev, 2222 struct ethtool_rxnfc *nfc, u32 *rule_locs) 2223 { 2224 struct bcm_sysport_priv *priv = netdev_priv(dev); 2225 int ret = -EOPNOTSUPP; 2226 2227 switch (nfc->cmd) { 2228 case ETHTOOL_GRXCLSRULE: 2229 ret = bcm_sysport_rule_get(priv, nfc); 2230 break; 2231 default: 2232 break; 2233 } 2234 2235 return ret; 2236 } 2237 2238 static int bcm_sysport_set_rxnfc(struct net_device *dev, 2239 struct ethtool_rxnfc *nfc) 2240 { 2241 struct bcm_sysport_priv *priv = netdev_priv(dev); 2242 int ret = -EOPNOTSUPP; 2243 2244 switch (nfc->cmd) { 2245 case ETHTOOL_SRXCLSRLINS: 2246 ret = bcm_sysport_rule_set(priv, nfc); 2247 break; 2248 case ETHTOOL_SRXCLSRLDEL: 2249 ret = bcm_sysport_rule_del(priv, nfc->fs.location); 2250 break; 2251 default: 2252 break; 2253 } 2254 2255 return ret; 2256 } 2257 2258 static const struct ethtool_ops bcm_sysport_ethtool_ops = { 2259 .get_drvinfo = bcm_sysport_get_drvinfo, 2260 .get_msglevel = bcm_sysport_get_msglvl, 2261 .set_msglevel = bcm_sysport_set_msglvl, 2262 .get_link = ethtool_op_get_link, 2263 .get_strings = bcm_sysport_get_strings, 2264 .get_ethtool_stats = bcm_sysport_get_stats, 2265 .get_sset_count = bcm_sysport_get_sset_count, 2266 .get_wol = bcm_sysport_get_wol, 2267 .set_wol = bcm_sysport_set_wol, 2268 .get_coalesce = bcm_sysport_get_coalesce, 2269 .set_coalesce = bcm_sysport_set_coalesce, 2270 .get_link_ksettings = phy_ethtool_get_link_ksettings, 2271 .set_link_ksettings = phy_ethtool_set_link_ksettings, 2272 .get_rxnfc = bcm_sysport_get_rxnfc, 2273 .set_rxnfc = bcm_sysport_set_rxnfc, 2274 }; 2275 2276 static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, 2277 struct net_device *sb_dev, 2278 select_queue_fallback_t fallback) 2279 { 2280 struct bcm_sysport_priv *priv = netdev_priv(dev); 2281 u16 queue = skb_get_queue_mapping(skb); 2282 struct bcm_sysport_tx_ring *tx_ring; 2283 unsigned int q, port; 2284 2285 if (!netdev_uses_dsa(dev)) 2286 return fallback(dev, skb, NULL); 2287 2288 /* DSA tagging layer will have configured the correct queue */ 2289 q = BRCM_TAG_GET_QUEUE(queue); 2290 port = BRCM_TAG_GET_PORT(queue); 2291 tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues]; 2292 2293 if (unlikely(!tx_ring)) 2294 return fallback(dev, skb, NULL); 2295 2296 return tx_ring->index; 2297 } 2298 2299 static const struct net_device_ops bcm_sysport_netdev_ops = { 2300 .ndo_start_xmit = bcm_sysport_xmit, 2301 .ndo_tx_timeout = bcm_sysport_tx_timeout, 2302 .ndo_open = bcm_sysport_open, 2303 .ndo_stop = bcm_sysport_stop, 2304 .ndo_set_features = bcm_sysport_set_features, 2305 .ndo_set_rx_mode = bcm_sysport_set_rx_mode, 2306 .ndo_set_mac_address = bcm_sysport_change_mac, 2307 #ifdef CONFIG_NET_POLL_CONTROLLER 2308 .ndo_poll_controller = bcm_sysport_poll_controller, 2309 #endif 2310 .ndo_get_stats64 = bcm_sysport_get_stats64, 2311 .ndo_select_queue = bcm_sysport_select_queue, 2312 }; 2313 2314 static int bcm_sysport_map_queues(struct notifier_block *nb, 2315 struct dsa_notifier_register_info *info) 2316 { 2317 struct bcm_sysport_tx_ring *ring; 2318 struct bcm_sysport_priv *priv; 2319 struct net_device *slave_dev; 2320 unsigned int num_tx_queues; 2321 unsigned int q, qp, port; 2322 struct net_device *dev; 2323 2324 priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); 2325 if (priv->netdev != info->master) 2326 return 0; 2327 2328 dev = info->master; 2329 2330 /* We can't be setting up queue inspection for non directly attached 2331 * switches 2332 */ 2333 if (info->switch_number) 2334 return 0; 2335 2336 if (dev->netdev_ops != &bcm_sysport_netdev_ops) 2337 return 0; 2338 2339 port = info->port_number; 2340 slave_dev = info->info.dev; 2341 2342 /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a 2343 * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of 2344 * per-port (slave_dev) network devices queue, we achieve just that. 2345 * This need to happen now before any slave network device is used such 2346 * it accurately reflects the number of real TX queues. 2347 */ 2348 if (priv->is_lite) 2349 netif_set_real_num_tx_queues(slave_dev, 2350 slave_dev->num_tx_queues / 2); 2351 2352 num_tx_queues = slave_dev->real_num_tx_queues; 2353 2354 if (priv->per_port_num_tx_queues && 2355 priv->per_port_num_tx_queues != num_tx_queues) 2356 netdev_warn(slave_dev, "asymmetric number of per-port queues\n"); 2357 2358 priv->per_port_num_tx_queues = num_tx_queues; 2359 2360 for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues; 2361 q++) { 2362 ring = &priv->tx_rings[q]; 2363 2364 if (ring->inspect) 2365 continue; 2366 2367 /* Just remember the mapping actual programming done 2368 * during bcm_sysport_init_tx_ring 2369 */ 2370 ring->switch_queue = qp; 2371 ring->switch_port = port; 2372 ring->inspect = true; 2373 priv->ring_map[q + port * num_tx_queues] = ring; 2374 qp++; 2375 } 2376 2377 return 0; 2378 } 2379 2380 static int bcm_sysport_unmap_queues(struct notifier_block *nb, 2381 struct dsa_notifier_register_info *info) 2382 { 2383 struct bcm_sysport_tx_ring *ring; 2384 struct bcm_sysport_priv *priv; 2385 struct net_device *slave_dev; 2386 unsigned int num_tx_queues; 2387 struct net_device *dev; 2388 unsigned int q, port; 2389 2390 priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); 2391 if (priv->netdev != info->master) 2392 return 0; 2393 2394 dev = info->master; 2395 2396 if (dev->netdev_ops != &bcm_sysport_netdev_ops) 2397 return 0; 2398 2399 port = info->port_number; 2400 slave_dev = info->info.dev; 2401 2402 num_tx_queues = slave_dev->real_num_tx_queues; 2403 2404 for (q = 0; q < dev->num_tx_queues; q++) { 2405 ring = &priv->tx_rings[q]; 2406 2407 if (ring->switch_port != port) 2408 continue; 2409 2410 if (!ring->inspect) 2411 continue; 2412 2413 ring->inspect = false; 2414 priv->ring_map[q + port * num_tx_queues] = NULL; 2415 } 2416 2417 return 0; 2418 } 2419 2420 static int bcm_sysport_dsa_notifier(struct notifier_block *nb, 2421 unsigned long event, void *ptr) 2422 { 2423 int ret = NOTIFY_DONE; 2424 2425 switch (event) { 2426 case DSA_PORT_REGISTER: 2427 ret = bcm_sysport_map_queues(nb, ptr); 2428 break; 2429 case DSA_PORT_UNREGISTER: 2430 ret = bcm_sysport_unmap_queues(nb, ptr); 2431 break; 2432 } 2433 2434 return notifier_from_errno(ret); 2435 } 2436 2437 #define REV_FMT "v%2x.%02x" 2438 2439 static const struct bcm_sysport_hw_params bcm_sysport_params[] = { 2440 [SYSTEMPORT] = { 2441 .is_lite = false, 2442 .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS, 2443 }, 2444 [SYSTEMPORT_LITE] = { 2445 .is_lite = true, 2446 .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS, 2447 }, 2448 }; 2449 2450 static const struct of_device_id bcm_sysport_of_match[] = { 2451 { .compatible = "brcm,systemportlite-v1.00", 2452 .data = &bcm_sysport_params[SYSTEMPORT_LITE] }, 2453 { .compatible = "brcm,systemport-v1.00", 2454 .data = &bcm_sysport_params[SYSTEMPORT] }, 2455 { .compatible = "brcm,systemport", 2456 .data = &bcm_sysport_params[SYSTEMPORT] }, 2457 { /* sentinel */ } 2458 }; 2459 MODULE_DEVICE_TABLE(of, bcm_sysport_of_match); 2460 2461 static int bcm_sysport_probe(struct platform_device *pdev) 2462 { 2463 const struct bcm_sysport_hw_params *params; 2464 const struct of_device_id *of_id = NULL; 2465 struct bcm_sysport_priv *priv; 2466 struct device_node *dn; 2467 struct net_device *dev; 2468 const void *macaddr; 2469 struct resource *r; 2470 u32 txq, rxq; 2471 int ret; 2472 2473 dn = pdev->dev.of_node; 2474 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2475 of_id = of_match_node(bcm_sysport_of_match, dn); 2476 if (!of_id || !of_id->data) 2477 return -EINVAL; 2478 2479 /* Fairly quickly we need to know the type of adapter we have */ 2480 params = of_id->data; 2481 2482 /* Read the Transmit/Receive Queue properties */ 2483 if (of_property_read_u32(dn, "systemport,num-txq", &txq)) 2484 txq = TDMA_NUM_RINGS; 2485 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq)) 2486 rxq = 1; 2487 2488 /* Sanity check the number of transmit queues */ 2489 if (!txq || txq > TDMA_NUM_RINGS) 2490 return -EINVAL; 2491 2492 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq); 2493 if (!dev) 2494 return -ENOMEM; 2495 2496 /* Initialize private members */ 2497 priv = netdev_priv(dev); 2498 2499 /* Allocate number of TX rings */ 2500 priv->tx_rings = devm_kcalloc(&pdev->dev, txq, 2501 sizeof(struct bcm_sysport_tx_ring), 2502 GFP_KERNEL); 2503 if (!priv->tx_rings) 2504 return -ENOMEM; 2505 2506 priv->is_lite = params->is_lite; 2507 priv->num_rx_desc_words = params->num_rx_desc_words; 2508 2509 priv->irq0 = platform_get_irq(pdev, 0); 2510 if (!priv->is_lite) { 2511 priv->irq1 = platform_get_irq(pdev, 1); 2512 priv->wol_irq = platform_get_irq(pdev, 2); 2513 } else { 2514 priv->wol_irq = platform_get_irq(pdev, 1); 2515 } 2516 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { 2517 dev_err(&pdev->dev, "invalid interrupts\n"); 2518 ret = -EINVAL; 2519 goto err_free_netdev; 2520 } 2521 2522 priv->base = devm_ioremap_resource(&pdev->dev, r); 2523 if (IS_ERR(priv->base)) { 2524 ret = PTR_ERR(priv->base); 2525 goto err_free_netdev; 2526 } 2527 2528 priv->netdev = dev; 2529 priv->pdev = pdev; 2530 2531 priv->phy_interface = of_get_phy_mode(dn); 2532 /* Default to GMII interface mode */ 2533 if (priv->phy_interface < 0) 2534 priv->phy_interface = PHY_INTERFACE_MODE_GMII; 2535 2536 /* In the case of a fixed PHY, the DT node associated 2537 * to the PHY is the Ethernet MAC DT node. 2538 */ 2539 if (of_phy_is_fixed_link(dn)) { 2540 ret = of_phy_register_fixed_link(dn); 2541 if (ret) { 2542 dev_err(&pdev->dev, "failed to register fixed PHY\n"); 2543 goto err_free_netdev; 2544 } 2545 2546 priv->phy_dn = dn; 2547 } 2548 2549 /* Initialize netdevice members */ 2550 macaddr = of_get_mac_address(dn); 2551 if (!macaddr || !is_valid_ether_addr(macaddr)) { 2552 dev_warn(&pdev->dev, "using random Ethernet MAC\n"); 2553 eth_hw_addr_random(dev); 2554 } else { 2555 ether_addr_copy(dev->dev_addr, macaddr); 2556 } 2557 2558 SET_NETDEV_DEV(dev, &pdev->dev); 2559 dev_set_drvdata(&pdev->dev, dev); 2560 dev->ethtool_ops = &bcm_sysport_ethtool_ops; 2561 dev->netdev_ops = &bcm_sysport_netdev_ops; 2562 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64); 2563 2564 dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | 2565 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2566 dev->hw_features |= dev->features; 2567 dev->vlan_features |= dev->features; 2568 2569 /* Request the WOL interrupt and advertise suspend if available */ 2570 priv->wol_irq_disabled = 1; 2571 ret = devm_request_irq(&pdev->dev, priv->wol_irq, 2572 bcm_sysport_wol_isr, 0, dev->name, priv); 2573 if (!ret) 2574 device_set_wakeup_capable(&pdev->dev, 1); 2575 2576 /* Set the needed headroom once and for all */ 2577 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); 2578 dev->needed_headroom += sizeof(struct bcm_tsb); 2579 2580 /* libphy will adjust the link state accordingly */ 2581 netif_carrier_off(dev); 2582 2583 priv->rx_max_coalesced_frames = 1; 2584 u64_stats_init(&priv->syncp); 2585 2586 priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier; 2587 2588 ret = register_dsa_notifier(&priv->dsa_notifier); 2589 if (ret) { 2590 dev_err(&pdev->dev, "failed to register DSA notifier\n"); 2591 goto err_deregister_fixed_link; 2592 } 2593 2594 ret = register_netdev(dev); 2595 if (ret) { 2596 dev_err(&pdev->dev, "failed to register net_device\n"); 2597 goto err_deregister_notifier; 2598 } 2599 2600 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; 2601 dev_info(&pdev->dev, 2602 "Broadcom SYSTEMPORT%s" REV_FMT 2603 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", 2604 priv->is_lite ? " Lite" : "", 2605 (priv->rev >> 8) & 0xff, priv->rev & 0xff, 2606 priv->base, priv->irq0, priv->irq1, txq, rxq); 2607 2608 return 0; 2609 2610 err_deregister_notifier: 2611 unregister_dsa_notifier(&priv->dsa_notifier); 2612 err_deregister_fixed_link: 2613 if (of_phy_is_fixed_link(dn)) 2614 of_phy_deregister_fixed_link(dn); 2615 err_free_netdev: 2616 free_netdev(dev); 2617 return ret; 2618 } 2619 2620 static int bcm_sysport_remove(struct platform_device *pdev) 2621 { 2622 struct net_device *dev = dev_get_drvdata(&pdev->dev); 2623 struct bcm_sysport_priv *priv = netdev_priv(dev); 2624 struct device_node *dn = pdev->dev.of_node; 2625 2626 /* Not much to do, ndo_close has been called 2627 * and we use managed allocations 2628 */ 2629 unregister_dsa_notifier(&priv->dsa_notifier); 2630 unregister_netdev(dev); 2631 if (of_phy_is_fixed_link(dn)) 2632 of_phy_deregister_fixed_link(dn); 2633 free_netdev(dev); 2634 dev_set_drvdata(&pdev->dev, NULL); 2635 2636 return 0; 2637 } 2638 2639 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) 2640 { 2641 struct net_device *ndev = priv->netdev; 2642 unsigned int timeout = 1000; 2643 unsigned int index, i = 0; 2644 u32 reg; 2645 2646 reg = umac_readl(priv, UMAC_MPD_CTRL); 2647 if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) 2648 reg |= MPD_EN; 2649 reg &= ~PSW_EN; 2650 if (priv->wolopts & WAKE_MAGICSECURE) { 2651 /* Program the SecureOn password */ 2652 umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), 2653 UMAC_PSW_MS); 2654 umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), 2655 UMAC_PSW_LS); 2656 reg |= PSW_EN; 2657 } 2658 umac_writel(priv, reg, UMAC_MPD_CTRL); 2659 2660 if (priv->wolopts & WAKE_FILTER) { 2661 /* Turn on ACPI matching to steal packets from RBUF */ 2662 reg = rbuf_readl(priv, RBUF_CONTROL); 2663 if (priv->is_lite) 2664 reg |= RBUF_ACPI_EN_LITE; 2665 else 2666 reg |= RBUF_ACPI_EN; 2667 rbuf_writel(priv, reg, RBUF_CONTROL); 2668 2669 /* Enable RXCHK, active filters and Broadcom tag matching */ 2670 reg = rxchk_readl(priv, RXCHK_CONTROL); 2671 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK << 2672 RXCHK_BRCM_TAG_MATCH_SHIFT); 2673 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { 2674 reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i); 2675 i++; 2676 } 2677 reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN; 2678 rxchk_writel(priv, reg, RXCHK_CONTROL); 2679 } 2680 2681 /* Make sure RBUF entered WoL mode as result */ 2682 do { 2683 reg = rbuf_readl(priv, RBUF_STATUS); 2684 if (reg & RBUF_WOL_MODE) 2685 break; 2686 2687 udelay(10); 2688 } while (timeout-- > 0); 2689 2690 /* Do not leave the UniMAC RBUF matching only MPD packets */ 2691 if (!timeout) { 2692 mpd_enable_set(priv, false); 2693 netif_err(priv, wol, ndev, "failed to enter WOL mode\n"); 2694 return -ETIMEDOUT; 2695 } 2696 2697 /* UniMAC receive needs to be turned on */ 2698 umac_enable_set(priv, CMD_RX_EN, 1); 2699 2700 netif_dbg(priv, wol, ndev, "entered WOL mode\n"); 2701 2702 return 0; 2703 } 2704 2705 static int __maybe_unused bcm_sysport_suspend(struct device *d) 2706 { 2707 struct net_device *dev = dev_get_drvdata(d); 2708 struct bcm_sysport_priv *priv = netdev_priv(dev); 2709 unsigned int i; 2710 int ret = 0; 2711 u32 reg; 2712 2713 if (!netif_running(dev)) 2714 return 0; 2715 2716 netif_device_detach(dev); 2717 2718 bcm_sysport_netif_stop(dev); 2719 2720 phy_suspend(dev->phydev); 2721 2722 /* Disable UniMAC RX */ 2723 umac_enable_set(priv, CMD_RX_EN, 0); 2724 2725 ret = rdma_enable_set(priv, 0); 2726 if (ret) { 2727 netdev_err(dev, "RDMA timeout!\n"); 2728 return ret; 2729 } 2730 2731 /* Disable RXCHK if enabled */ 2732 if (priv->rx_chk_en) { 2733 reg = rxchk_readl(priv, RXCHK_CONTROL); 2734 reg &= ~RXCHK_EN; 2735 rxchk_writel(priv, reg, RXCHK_CONTROL); 2736 } 2737 2738 /* Flush RX pipe */ 2739 if (!priv->wolopts) 2740 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); 2741 2742 ret = tdma_enable_set(priv, 0); 2743 if (ret) { 2744 netdev_err(dev, "TDMA timeout!\n"); 2745 return ret; 2746 } 2747 2748 /* Wait for a packet boundary */ 2749 usleep_range(2000, 3000); 2750 2751 umac_enable_set(priv, CMD_TX_EN, 0); 2752 2753 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); 2754 2755 /* Free RX/TX rings SW structures */ 2756 for (i = 0; i < dev->num_tx_queues; i++) 2757 bcm_sysport_fini_tx_ring(priv, i); 2758 bcm_sysport_fini_rx_ring(priv); 2759 2760 /* Get prepared for Wake-on-LAN */ 2761 if (device_may_wakeup(d) && priv->wolopts) 2762 ret = bcm_sysport_suspend_to_wol(priv); 2763 2764 return ret; 2765 } 2766 2767 static int __maybe_unused bcm_sysport_resume(struct device *d) 2768 { 2769 struct net_device *dev = dev_get_drvdata(d); 2770 struct bcm_sysport_priv *priv = netdev_priv(dev); 2771 unsigned int i; 2772 int ret; 2773 2774 if (!netif_running(dev)) 2775 return 0; 2776 2777 umac_reset(priv); 2778 2779 /* We may have been suspended and never received a WOL event that 2780 * would turn off MPD detection, take care of that now 2781 */ 2782 bcm_sysport_resume_from_wol(priv); 2783 2784 /* Initialize both hardware and software ring */ 2785 for (i = 0; i < dev->num_tx_queues; i++) { 2786 ret = bcm_sysport_init_tx_ring(priv, i); 2787 if (ret) { 2788 netdev_err(dev, "failed to initialize TX ring %d\n", 2789 i); 2790 goto out_free_tx_rings; 2791 } 2792 } 2793 2794 /* Initialize linked-list */ 2795 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); 2796 2797 /* Initialize RX ring */ 2798 ret = bcm_sysport_init_rx_ring(priv); 2799 if (ret) { 2800 netdev_err(dev, "failed to initialize RX ring\n"); 2801 goto out_free_rx_ring; 2802 } 2803 2804 /* RX pipe enable */ 2805 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 2806 2807 ret = rdma_enable_set(priv, 1); 2808 if (ret) { 2809 netdev_err(dev, "failed to enable RDMA\n"); 2810 goto out_free_rx_ring; 2811 } 2812 2813 /* Restore enabled features */ 2814 bcm_sysport_set_features(dev, dev->features); 2815 2816 rbuf_init(priv); 2817 2818 /* Set maximum frame length */ 2819 if (!priv->is_lite) 2820 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 2821 else 2822 gib_set_pad_extension(priv); 2823 2824 /* Set MAC address */ 2825 umac_set_hw_addr(priv, dev->dev_addr); 2826 2827 umac_enable_set(priv, CMD_RX_EN, 1); 2828 2829 /* TX pipe enable */ 2830 topctrl_writel(priv, 0, TX_FLUSH_CNTL); 2831 2832 umac_enable_set(priv, CMD_TX_EN, 1); 2833 2834 ret = tdma_enable_set(priv, 1); 2835 if (ret) { 2836 netdev_err(dev, "TDMA timeout!\n"); 2837 goto out_free_rx_ring; 2838 } 2839 2840 phy_resume(dev->phydev); 2841 2842 bcm_sysport_netif_start(dev); 2843 2844 netif_device_attach(dev); 2845 2846 return 0; 2847 2848 out_free_rx_ring: 2849 bcm_sysport_fini_rx_ring(priv); 2850 out_free_tx_rings: 2851 for (i = 0; i < dev->num_tx_queues; i++) 2852 bcm_sysport_fini_tx_ring(priv, i); 2853 return ret; 2854 } 2855 2856 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops, 2857 bcm_sysport_suspend, bcm_sysport_resume); 2858 2859 static struct platform_driver bcm_sysport_driver = { 2860 .probe = bcm_sysport_probe, 2861 .remove = bcm_sysport_remove, 2862 .driver = { 2863 .name = "brcm-systemport", 2864 .of_match_table = bcm_sysport_of_match, 2865 .pm = &bcm_sysport_pm_ops, 2866 }, 2867 }; 2868 module_platform_driver(bcm_sysport_driver); 2869 2870 MODULE_AUTHOR("Broadcom Corporation"); 2871 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver"); 2872 MODULE_ALIAS("platform:brcm-systemport"); 2873 MODULE_LICENSE("GPL"); 2874