1 /* 2 * Broadcom BCM7xxx System Port Ethernet MAC driver 3 * 4 * Copyright (C) 2014 Broadcom Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/module.h> 16 #include <linux/kernel.h> 17 #include <linux/netdevice.h> 18 #include <linux/etherdevice.h> 19 #include <linux/platform_device.h> 20 #include <linux/of.h> 21 #include <linux/of_net.h> 22 #include <linux/of_mdio.h> 23 #include <linux/phy.h> 24 #include <linux/phy_fixed.h> 25 #include <net/ip.h> 26 #include <net/ipv6.h> 27 28 #include "bcmsysport.h" 29 30 /* I/O accessors register helpers */ 31 #define BCM_SYSPORT_IO_MACRO(name, offset) \ 32 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \ 33 { \ 34 u32 reg = __raw_readl(priv->base + offset + off); \ 35 return reg; \ 36 } \ 37 static inline void name##_writel(struct bcm_sysport_priv *priv, \ 38 u32 val, u32 off) \ 39 { \ 40 __raw_writel(val, priv->base + offset + off); \ 41 } \ 42 43 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET); 44 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET); 45 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET); 46 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET); 47 BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET); 48 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET); 49 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET); 50 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET); 51 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET); 52 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET); 53 54 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied 55 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths. 56 */ 57 #define BCM_SYSPORT_INTR_L2(which) \ 58 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \ 59 u32 mask) \ 60 { \ 61 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ 62 priv->irq##which##_mask &= ~(mask); \ 63 } \ 64 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \ 65 u32 mask) \ 66 { \ 67 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \ 68 priv->irq##which##_mask |= (mask); \ 69 } \ 70 71 BCM_SYSPORT_INTR_L2(0) 72 BCM_SYSPORT_INTR_L2(1) 73 74 /* Register accesses to GISB/RBUS registers are expensive (few hundred 75 * nanoseconds), so keep the check for 64-bits explicit here to save 76 * one register write per-packet on 32-bits platforms. 77 */ 78 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv, 79 void __iomem *d, 80 dma_addr_t addr) 81 { 82 #ifdef CONFIG_PHYS_ADDR_T_64BIT 83 __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK, 84 d + DESC_ADDR_HI_STATUS_LEN); 85 #endif 86 __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO); 87 } 88 89 static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, 90 struct dma_desc *desc, 91 unsigned int port) 92 { 93 /* Ports are latched, so write upper address first */ 94 tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port)); 95 tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port)); 96 } 97 98 /* Ethtool operations */ 99 static int bcm_sysport_set_settings(struct net_device *dev, 100 struct ethtool_cmd *cmd) 101 { 102 struct bcm_sysport_priv *priv = netdev_priv(dev); 103 104 if (!netif_running(dev)) 105 return -EINVAL; 106 107 return phy_ethtool_sset(priv->phydev, cmd); 108 } 109 110 static int bcm_sysport_get_settings(struct net_device *dev, 111 struct ethtool_cmd *cmd) 112 { 113 struct bcm_sysport_priv *priv = netdev_priv(dev); 114 115 if (!netif_running(dev)) 116 return -EINVAL; 117 118 return phy_ethtool_gset(priv->phydev, cmd); 119 } 120 121 static int bcm_sysport_set_rx_csum(struct net_device *dev, 122 netdev_features_t wanted) 123 { 124 struct bcm_sysport_priv *priv = netdev_priv(dev); 125 u32 reg; 126 127 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); 128 reg = rxchk_readl(priv, RXCHK_CONTROL); 129 if (priv->rx_chk_en) 130 reg |= RXCHK_EN; 131 else 132 reg &= ~RXCHK_EN; 133 134 /* If UniMAC forwards CRC, we need to skip over it to get 135 * a valid CHK bit to be set in the per-packet status word 136 */ 137 if (priv->rx_chk_en && priv->crc_fwd) 138 reg |= RXCHK_SKIP_FCS; 139 else 140 reg &= ~RXCHK_SKIP_FCS; 141 142 /* If Broadcom tags are enabled (e.g: using a switch), make 143 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom 144 * tag after the Ethernet MAC Source Address. 145 */ 146 if (netdev_uses_dsa(dev)) 147 reg |= RXCHK_BRCM_TAG_EN; 148 else 149 reg &= ~RXCHK_BRCM_TAG_EN; 150 151 rxchk_writel(priv, reg, RXCHK_CONTROL); 152 153 return 0; 154 } 155 156 static int bcm_sysport_set_tx_csum(struct net_device *dev, 157 netdev_features_t wanted) 158 { 159 struct bcm_sysport_priv *priv = netdev_priv(dev); 160 u32 reg; 161 162 /* Hardware transmit checksum requires us to enable the Transmit status 163 * block prepended to the packet contents 164 */ 165 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); 166 reg = tdma_readl(priv, TDMA_CONTROL); 167 if (priv->tsb_en) 168 reg |= TSB_EN; 169 else 170 reg &= ~TSB_EN; 171 tdma_writel(priv, reg, TDMA_CONTROL); 172 173 return 0; 174 } 175 176 static int bcm_sysport_set_features(struct net_device *dev, 177 netdev_features_t features) 178 { 179 netdev_features_t changed = features ^ dev->features; 180 netdev_features_t wanted = dev->wanted_features; 181 int ret = 0; 182 183 if (changed & NETIF_F_RXCSUM) 184 ret = bcm_sysport_set_rx_csum(dev, wanted); 185 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) 186 ret = bcm_sysport_set_tx_csum(dev, wanted); 187 188 return ret; 189 } 190 191 /* Hardware counters must be kept in sync because the order/offset 192 * is important here (order in structure declaration = order in hardware) 193 */ 194 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { 195 /* general stats */ 196 STAT_NETDEV(rx_packets), 197 STAT_NETDEV(tx_packets), 198 STAT_NETDEV(rx_bytes), 199 STAT_NETDEV(tx_bytes), 200 STAT_NETDEV(rx_errors), 201 STAT_NETDEV(tx_errors), 202 STAT_NETDEV(rx_dropped), 203 STAT_NETDEV(tx_dropped), 204 STAT_NETDEV(multicast), 205 /* UniMAC RSV counters */ 206 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), 207 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), 208 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), 209 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), 210 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), 211 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), 212 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), 213 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), 214 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), 215 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), 216 STAT_MIB_RX("rx_pkts", mib.rx.pkt), 217 STAT_MIB_RX("rx_bytes", mib.rx.bytes), 218 STAT_MIB_RX("rx_multicast", mib.rx.mca), 219 STAT_MIB_RX("rx_broadcast", mib.rx.bca), 220 STAT_MIB_RX("rx_fcs", mib.rx.fcs), 221 STAT_MIB_RX("rx_control", mib.rx.cf), 222 STAT_MIB_RX("rx_pause", mib.rx.pf), 223 STAT_MIB_RX("rx_unknown", mib.rx.uo), 224 STAT_MIB_RX("rx_align", mib.rx.aln), 225 STAT_MIB_RX("rx_outrange", mib.rx.flr), 226 STAT_MIB_RX("rx_code", mib.rx.cde), 227 STAT_MIB_RX("rx_carrier", mib.rx.fcr), 228 STAT_MIB_RX("rx_oversize", mib.rx.ovr), 229 STAT_MIB_RX("rx_jabber", mib.rx.jbr), 230 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue), 231 STAT_MIB_RX("rx_good_pkts", mib.rx.pok), 232 STAT_MIB_RX("rx_unicast", mib.rx.uc), 233 STAT_MIB_RX("rx_ppp", mib.rx.ppp), 234 STAT_MIB_RX("rx_crc", mib.rx.rcrc), 235 /* UniMAC TSV counters */ 236 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), 237 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), 238 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), 239 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), 240 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), 241 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), 242 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), 243 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), 244 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), 245 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), 246 STAT_MIB_TX("tx_pkts", mib.tx.pkts), 247 STAT_MIB_TX("tx_multicast", mib.tx.mca), 248 STAT_MIB_TX("tx_broadcast", mib.tx.bca), 249 STAT_MIB_TX("tx_pause", mib.tx.pf), 250 STAT_MIB_TX("tx_control", mib.tx.cf), 251 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs), 252 STAT_MIB_TX("tx_oversize", mib.tx.ovr), 253 STAT_MIB_TX("tx_defer", mib.tx.drf), 254 STAT_MIB_TX("tx_excess_defer", mib.tx.edf), 255 STAT_MIB_TX("tx_single_col", mib.tx.scl), 256 STAT_MIB_TX("tx_multi_col", mib.tx.mcl), 257 STAT_MIB_TX("tx_late_col", mib.tx.lcl), 258 STAT_MIB_TX("tx_excess_col", mib.tx.ecl), 259 STAT_MIB_TX("tx_frags", mib.tx.frg), 260 STAT_MIB_TX("tx_total_col", mib.tx.ncl), 261 STAT_MIB_TX("tx_jabber", mib.tx.jbr), 262 STAT_MIB_TX("tx_bytes", mib.tx.bytes), 263 STAT_MIB_TX("tx_good_pkts", mib.tx.pok), 264 STAT_MIB_TX("tx_unicast", mib.tx.uc), 265 /* UniMAC RUNT counters */ 266 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt), 267 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), 268 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), 269 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes), 270 /* RXCHK misc statistics */ 271 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR), 272 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc, 273 RXCHK_OTHER_DISC_CNTR), 274 /* RBUF misc statistics */ 275 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), 276 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), 277 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 278 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed), 279 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed), 280 }; 281 282 #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) 283 284 static void bcm_sysport_get_drvinfo(struct net_device *dev, 285 struct ethtool_drvinfo *info) 286 { 287 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 288 strlcpy(info->version, "0.1", sizeof(info->version)); 289 strlcpy(info->bus_info, "platform", sizeof(info->bus_info)); 290 } 291 292 static u32 bcm_sysport_get_msglvl(struct net_device *dev) 293 { 294 struct bcm_sysport_priv *priv = netdev_priv(dev); 295 296 return priv->msg_enable; 297 } 298 299 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable) 300 { 301 struct bcm_sysport_priv *priv = netdev_priv(dev); 302 303 priv->msg_enable = enable; 304 } 305 306 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) 307 { 308 switch (string_set) { 309 case ETH_SS_STATS: 310 return BCM_SYSPORT_STATS_LEN; 311 default: 312 return -EOPNOTSUPP; 313 } 314 } 315 316 static void bcm_sysport_get_strings(struct net_device *dev, 317 u32 stringset, u8 *data) 318 { 319 int i; 320 321 switch (stringset) { 322 case ETH_SS_STATS: 323 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 324 memcpy(data + i * ETH_GSTRING_LEN, 325 bcm_sysport_gstrings_stats[i].stat_string, 326 ETH_GSTRING_LEN); 327 } 328 break; 329 default: 330 break; 331 } 332 } 333 334 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) 335 { 336 int i, j = 0; 337 338 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 339 const struct bcm_sysport_stats *s; 340 u8 offset = 0; 341 u32 val = 0; 342 char *p; 343 344 s = &bcm_sysport_gstrings_stats[i]; 345 switch (s->type) { 346 case BCM_SYSPORT_STAT_NETDEV: 347 case BCM_SYSPORT_STAT_SOFT: 348 continue; 349 case BCM_SYSPORT_STAT_MIB_RX: 350 case BCM_SYSPORT_STAT_MIB_TX: 351 case BCM_SYSPORT_STAT_RUNT: 352 if (s->type != BCM_SYSPORT_STAT_MIB_RX) 353 offset = UMAC_MIB_STAT_OFFSET; 354 val = umac_readl(priv, UMAC_MIB_START + j + offset); 355 break; 356 case BCM_SYSPORT_STAT_RXCHK: 357 val = rxchk_readl(priv, s->reg_offset); 358 if (val == ~0) 359 rxchk_writel(priv, 0, s->reg_offset); 360 break; 361 case BCM_SYSPORT_STAT_RBUF: 362 val = rbuf_readl(priv, s->reg_offset); 363 if (val == ~0) 364 rbuf_writel(priv, 0, s->reg_offset); 365 break; 366 } 367 368 j += s->stat_sizeof; 369 p = (char *)priv + s->stat_offset; 370 *(u32 *)p = val; 371 } 372 373 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); 374 } 375 376 static void bcm_sysport_get_stats(struct net_device *dev, 377 struct ethtool_stats *stats, u64 *data) 378 { 379 struct bcm_sysport_priv *priv = netdev_priv(dev); 380 int i; 381 382 if (netif_running(dev)) 383 bcm_sysport_update_mib_counters(priv); 384 385 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 386 const struct bcm_sysport_stats *s; 387 char *p; 388 389 s = &bcm_sysport_gstrings_stats[i]; 390 if (s->type == BCM_SYSPORT_STAT_NETDEV) 391 p = (char *)&dev->stats; 392 else 393 p = (char *)priv; 394 p += s->stat_offset; 395 data[i] = *(u32 *)p; 396 } 397 } 398 399 static void bcm_sysport_get_wol(struct net_device *dev, 400 struct ethtool_wolinfo *wol) 401 { 402 struct bcm_sysport_priv *priv = netdev_priv(dev); 403 u32 reg; 404 405 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE; 406 wol->wolopts = priv->wolopts; 407 408 if (!(priv->wolopts & WAKE_MAGICSECURE)) 409 return; 410 411 /* Return the programmed SecureOn password */ 412 reg = umac_readl(priv, UMAC_PSW_MS); 413 put_unaligned_be16(reg, &wol->sopass[0]); 414 reg = umac_readl(priv, UMAC_PSW_LS); 415 put_unaligned_be32(reg, &wol->sopass[2]); 416 } 417 418 static int bcm_sysport_set_wol(struct net_device *dev, 419 struct ethtool_wolinfo *wol) 420 { 421 struct bcm_sysport_priv *priv = netdev_priv(dev); 422 struct device *kdev = &priv->pdev->dev; 423 u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE; 424 425 if (!device_can_wakeup(kdev)) 426 return -ENOTSUPP; 427 428 if (wol->wolopts & ~supported) 429 return -EINVAL; 430 431 /* Program the SecureOn password */ 432 if (wol->wolopts & WAKE_MAGICSECURE) { 433 umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), 434 UMAC_PSW_MS); 435 umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), 436 UMAC_PSW_LS); 437 } 438 439 /* Flag the device and relevant IRQ as wakeup capable */ 440 if (wol->wolopts) { 441 device_set_wakeup_enable(kdev, 1); 442 if (priv->wol_irq_disabled) 443 enable_irq_wake(priv->wol_irq); 444 priv->wol_irq_disabled = 0; 445 } else { 446 device_set_wakeup_enable(kdev, 0); 447 /* Avoid unbalanced disable_irq_wake calls */ 448 if (!priv->wol_irq_disabled) 449 disable_irq_wake(priv->wol_irq); 450 priv->wol_irq_disabled = 1; 451 } 452 453 priv->wolopts = wol->wolopts; 454 455 return 0; 456 } 457 458 static int bcm_sysport_get_coalesce(struct net_device *dev, 459 struct ethtool_coalesce *ec) 460 { 461 struct bcm_sysport_priv *priv = netdev_priv(dev); 462 u32 reg; 463 464 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0)); 465 466 ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000; 467 ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK; 468 469 reg = rdma_readl(priv, RDMA_MBDONE_INTR); 470 471 ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000; 472 ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK; 473 474 return 0; 475 } 476 477 static int bcm_sysport_set_coalesce(struct net_device *dev, 478 struct ethtool_coalesce *ec) 479 { 480 struct bcm_sysport_priv *priv = netdev_priv(dev); 481 unsigned int i; 482 u32 reg; 483 484 /* Base system clock is 125Mhz, DMA timeout is this reference clock 485 * divided by 1024, which yield roughly 8.192 us, our maximum value has 486 * to fit in the RING_TIMEOUT_MASK (16 bits). 487 */ 488 if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK || 489 ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 || 490 ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK || 491 ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1) 492 return -EINVAL; 493 494 if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) || 495 (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)) 496 return -EINVAL; 497 498 for (i = 0; i < dev->num_tx_queues; i++) { 499 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(i)); 500 reg &= ~(RING_INTR_THRESH_MASK | 501 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT); 502 reg |= ec->tx_max_coalesced_frames; 503 reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) << 504 RING_TIMEOUT_SHIFT; 505 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(i)); 506 } 507 508 reg = rdma_readl(priv, RDMA_MBDONE_INTR); 509 reg &= ~(RDMA_INTR_THRESH_MASK | 510 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT); 511 reg |= ec->rx_max_coalesced_frames; 512 reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192) << 513 RDMA_TIMEOUT_SHIFT; 514 rdma_writel(priv, reg, RDMA_MBDONE_INTR); 515 516 return 0; 517 } 518 519 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) 520 { 521 dev_kfree_skb_any(cb->skb); 522 cb->skb = NULL; 523 dma_unmap_addr_set(cb, dma_addr, 0); 524 } 525 526 static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, 527 struct bcm_sysport_cb *cb) 528 { 529 struct device *kdev = &priv->pdev->dev; 530 struct net_device *ndev = priv->netdev; 531 struct sk_buff *skb, *rx_skb; 532 dma_addr_t mapping; 533 534 /* Allocate a new SKB for a new packet */ 535 skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); 536 if (!skb) { 537 priv->mib.alloc_rx_buff_failed++; 538 netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); 539 return NULL; 540 } 541 542 mapping = dma_map_single(kdev, skb->data, 543 RX_BUF_LENGTH, DMA_FROM_DEVICE); 544 if (dma_mapping_error(kdev, mapping)) { 545 priv->mib.rx_dma_failed++; 546 dev_kfree_skb_any(skb); 547 netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); 548 return NULL; 549 } 550 551 /* Grab the current SKB on the ring */ 552 rx_skb = cb->skb; 553 if (likely(rx_skb)) 554 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 555 RX_BUF_LENGTH, DMA_FROM_DEVICE); 556 557 /* Put the new SKB on the ring */ 558 cb->skb = skb; 559 dma_unmap_addr_set(cb, dma_addr, mapping); 560 dma_desc_set_addr(priv, cb->bd_addr, mapping); 561 562 netif_dbg(priv, rx_status, ndev, "RX refill\n"); 563 564 /* Return the current SKB to the caller */ 565 return rx_skb; 566 } 567 568 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) 569 { 570 struct bcm_sysport_cb *cb; 571 struct sk_buff *skb; 572 unsigned int i; 573 574 for (i = 0; i < priv->num_rx_bds; i++) { 575 cb = &priv->rx_cbs[i]; 576 skb = bcm_sysport_rx_refill(priv, cb); 577 if (skb) 578 dev_kfree_skb(skb); 579 if (!cb->skb) 580 return -ENOMEM; 581 } 582 583 return 0; 584 } 585 586 /* Poll the hardware for up to budget packets to process */ 587 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, 588 unsigned int budget) 589 { 590 struct net_device *ndev = priv->netdev; 591 unsigned int processed = 0, to_process; 592 struct bcm_sysport_cb *cb; 593 struct sk_buff *skb; 594 unsigned int p_index; 595 u16 len, status; 596 struct bcm_rsb *rsb; 597 598 /* Determine how much we should process since last call */ 599 p_index = rdma_readl(priv, RDMA_PROD_INDEX); 600 p_index &= RDMA_PROD_INDEX_MASK; 601 602 if (p_index < priv->rx_c_index) 603 to_process = (RDMA_CONS_INDEX_MASK + 1) - 604 priv->rx_c_index + p_index; 605 else 606 to_process = p_index - priv->rx_c_index; 607 608 netif_dbg(priv, rx_status, ndev, 609 "p_index=%d rx_c_index=%d to_process=%d\n", 610 p_index, priv->rx_c_index, to_process); 611 612 while ((processed < to_process) && (processed < budget)) { 613 cb = &priv->rx_cbs[priv->rx_read_ptr]; 614 skb = bcm_sysport_rx_refill(priv, cb); 615 616 617 /* We do not have a backing SKB, so we do not a corresponding 618 * DMA mapping for this incoming packet since 619 * bcm_sysport_rx_refill always either has both skb and mapping 620 * or none. 621 */ 622 if (unlikely(!skb)) { 623 netif_err(priv, rx_err, ndev, "out of memory!\n"); 624 ndev->stats.rx_dropped++; 625 ndev->stats.rx_errors++; 626 goto next; 627 } 628 629 /* Extract the Receive Status Block prepended */ 630 rsb = (struct bcm_rsb *)skb->data; 631 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; 632 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & 633 DESC_STATUS_MASK; 634 635 netif_dbg(priv, rx_status, ndev, 636 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", 637 p_index, priv->rx_c_index, priv->rx_read_ptr, 638 len, status); 639 640 if (unlikely(len > RX_BUF_LENGTH)) { 641 netif_err(priv, rx_status, ndev, "oversized packet\n"); 642 ndev->stats.rx_length_errors++; 643 ndev->stats.rx_errors++; 644 dev_kfree_skb_any(skb); 645 goto next; 646 } 647 648 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { 649 netif_err(priv, rx_status, ndev, "fragmented packet!\n"); 650 ndev->stats.rx_dropped++; 651 ndev->stats.rx_errors++; 652 dev_kfree_skb_any(skb); 653 goto next; 654 } 655 656 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) { 657 netif_err(priv, rx_err, ndev, "error packet\n"); 658 if (status & RX_STATUS_OVFLOW) 659 ndev->stats.rx_over_errors++; 660 ndev->stats.rx_dropped++; 661 ndev->stats.rx_errors++; 662 dev_kfree_skb_any(skb); 663 goto next; 664 } 665 666 skb_put(skb, len); 667 668 /* Hardware validated our checksum */ 669 if (likely(status & DESC_L4_CSUM)) 670 skb->ip_summed = CHECKSUM_UNNECESSARY; 671 672 /* Hardware pre-pends packets with 2bytes before Ethernet 673 * header plus we have the Receive Status Block, strip off all 674 * of this from the SKB. 675 */ 676 skb_pull(skb, sizeof(*rsb) + 2); 677 len -= (sizeof(*rsb) + 2); 678 679 /* UniMAC may forward CRC */ 680 if (priv->crc_fwd) { 681 skb_trim(skb, len - ETH_FCS_LEN); 682 len -= ETH_FCS_LEN; 683 } 684 685 skb->protocol = eth_type_trans(skb, ndev); 686 ndev->stats.rx_packets++; 687 ndev->stats.rx_bytes += len; 688 689 napi_gro_receive(&priv->napi, skb); 690 next: 691 processed++; 692 priv->rx_read_ptr++; 693 694 if (priv->rx_read_ptr == priv->num_rx_bds) 695 priv->rx_read_ptr = 0; 696 } 697 698 return processed; 699 } 700 701 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv, 702 struct bcm_sysport_cb *cb, 703 unsigned int *bytes_compl, 704 unsigned int *pkts_compl) 705 { 706 struct device *kdev = &priv->pdev->dev; 707 struct net_device *ndev = priv->netdev; 708 709 if (cb->skb) { 710 ndev->stats.tx_bytes += cb->skb->len; 711 *bytes_compl += cb->skb->len; 712 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 713 dma_unmap_len(cb, dma_len), 714 DMA_TO_DEVICE); 715 ndev->stats.tx_packets++; 716 (*pkts_compl)++; 717 bcm_sysport_free_cb(cb); 718 /* SKB fragment */ 719 } else if (dma_unmap_addr(cb, dma_addr)) { 720 ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len); 721 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr), 722 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); 723 dma_unmap_addr_set(cb, dma_addr, 0); 724 } 725 } 726 727 /* Reclaim queued SKBs for transmission completion, lockless version */ 728 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 729 struct bcm_sysport_tx_ring *ring) 730 { 731 struct net_device *ndev = priv->netdev; 732 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; 733 unsigned int pkts_compl = 0, bytes_compl = 0; 734 struct bcm_sysport_cb *cb; 735 struct netdev_queue *txq; 736 u32 hw_ind; 737 738 txq = netdev_get_tx_queue(ndev, ring->index); 739 740 /* Compute how many descriptors have been processed since last call */ 741 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); 742 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; 743 ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); 744 745 last_c_index = ring->c_index; 746 num_tx_cbs = ring->size; 747 748 c_index &= (num_tx_cbs - 1); 749 750 if (c_index >= last_c_index) 751 last_tx_cn = c_index - last_c_index; 752 else 753 last_tx_cn = num_tx_cbs - last_c_index + c_index; 754 755 netif_dbg(priv, tx_done, ndev, 756 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", 757 ring->index, c_index, last_tx_cn, last_c_index); 758 759 while (last_tx_cn-- > 0) { 760 cb = ring->cbs + last_c_index; 761 bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl); 762 763 ring->desc_count++; 764 last_c_index++; 765 last_c_index &= (num_tx_cbs - 1); 766 } 767 768 ring->c_index = c_index; 769 770 if (netif_tx_queue_stopped(txq) && pkts_compl) 771 netif_tx_wake_queue(txq); 772 773 netif_dbg(priv, tx_done, ndev, 774 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", 775 ring->index, ring->c_index, pkts_compl, bytes_compl); 776 777 return pkts_compl; 778 } 779 780 /* Locked version of the per-ring TX reclaim routine */ 781 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 782 struct bcm_sysport_tx_ring *ring) 783 { 784 unsigned int released; 785 unsigned long flags; 786 787 spin_lock_irqsave(&ring->lock, flags); 788 released = __bcm_sysport_tx_reclaim(priv, ring); 789 spin_unlock_irqrestore(&ring->lock, flags); 790 791 return released; 792 } 793 794 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) 795 { 796 struct bcm_sysport_tx_ring *ring = 797 container_of(napi, struct bcm_sysport_tx_ring, napi); 798 unsigned int work_done = 0; 799 800 work_done = bcm_sysport_tx_reclaim(ring->priv, ring); 801 802 if (work_done == 0) { 803 napi_complete(napi); 804 /* re-enable TX interrupt */ 805 intrl2_1_mask_clear(ring->priv, BIT(ring->index)); 806 807 return 0; 808 } 809 810 return budget; 811 } 812 813 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) 814 { 815 unsigned int q; 816 817 for (q = 0; q < priv->netdev->num_tx_queues; q++) 818 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]); 819 } 820 821 static int bcm_sysport_poll(struct napi_struct *napi, int budget) 822 { 823 struct bcm_sysport_priv *priv = 824 container_of(napi, struct bcm_sysport_priv, napi); 825 unsigned int work_done = 0; 826 827 work_done = bcm_sysport_desc_rx(priv, budget); 828 829 priv->rx_c_index += work_done; 830 priv->rx_c_index &= RDMA_CONS_INDEX_MASK; 831 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); 832 833 if (work_done < budget) { 834 napi_complete_done(napi, work_done); 835 /* re-enable RX interrupts */ 836 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE); 837 } 838 839 return work_done; 840 } 841 842 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) 843 { 844 u32 reg; 845 846 /* Stop monitoring MPD interrupt */ 847 intrl2_0_mask_set(priv, INTRL2_0_MPD); 848 849 /* Clear the MagicPacket detection logic */ 850 reg = umac_readl(priv, UMAC_MPD_CTRL); 851 reg &= ~MPD_EN; 852 umac_writel(priv, reg, UMAC_MPD_CTRL); 853 854 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n"); 855 } 856 857 /* RX and misc interrupt routine */ 858 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) 859 { 860 struct net_device *dev = dev_id; 861 struct bcm_sysport_priv *priv = netdev_priv(dev); 862 863 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & 864 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); 865 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 866 867 if (unlikely(priv->irq0_stat == 0)) { 868 netdev_warn(priv->netdev, "spurious RX interrupt\n"); 869 return IRQ_NONE; 870 } 871 872 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) { 873 if (likely(napi_schedule_prep(&priv->napi))) { 874 /* disable RX interrupts */ 875 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE); 876 __napi_schedule_irqoff(&priv->napi); 877 } 878 } 879 880 /* TX ring is full, perform a full reclaim since we do not know 881 * which one would trigger this interrupt 882 */ 883 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL) 884 bcm_sysport_tx_reclaim_all(priv); 885 886 if (priv->irq0_stat & INTRL2_0_MPD) { 887 netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n"); 888 bcm_sysport_resume_from_wol(priv); 889 } 890 891 return IRQ_HANDLED; 892 } 893 894 /* TX interrupt service routine */ 895 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id) 896 { 897 struct net_device *dev = dev_id; 898 struct bcm_sysport_priv *priv = netdev_priv(dev); 899 struct bcm_sysport_tx_ring *txr; 900 unsigned int ring; 901 902 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & 903 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); 904 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 905 906 if (unlikely(priv->irq1_stat == 0)) { 907 netdev_warn(priv->netdev, "spurious TX interrupt\n"); 908 return IRQ_NONE; 909 } 910 911 for (ring = 0; ring < dev->num_tx_queues; ring++) { 912 if (!(priv->irq1_stat & BIT(ring))) 913 continue; 914 915 txr = &priv->tx_rings[ring]; 916 917 if (likely(napi_schedule_prep(&txr->napi))) { 918 intrl2_1_mask_set(priv, BIT(ring)); 919 __napi_schedule_irqoff(&txr->napi); 920 } 921 } 922 923 return IRQ_HANDLED; 924 } 925 926 static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id) 927 { 928 struct bcm_sysport_priv *priv = dev_id; 929 930 pm_wakeup_event(&priv->pdev->dev, 0); 931 932 return IRQ_HANDLED; 933 } 934 935 #ifdef CONFIG_NET_POLL_CONTROLLER 936 static void bcm_sysport_poll_controller(struct net_device *dev) 937 { 938 struct bcm_sysport_priv *priv = netdev_priv(dev); 939 940 disable_irq(priv->irq0); 941 bcm_sysport_rx_isr(priv->irq0, priv); 942 enable_irq(priv->irq0); 943 944 disable_irq(priv->irq1); 945 bcm_sysport_tx_isr(priv->irq1, priv); 946 enable_irq(priv->irq1); 947 } 948 #endif 949 950 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, 951 struct net_device *dev) 952 { 953 struct sk_buff *nskb; 954 struct bcm_tsb *tsb; 955 u32 csum_info; 956 u8 ip_proto; 957 u16 csum_start; 958 u16 ip_ver; 959 960 /* Re-allocate SKB if needed */ 961 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) { 962 nskb = skb_realloc_headroom(skb, sizeof(*tsb)); 963 dev_kfree_skb(skb); 964 if (!nskb) { 965 dev->stats.tx_errors++; 966 dev->stats.tx_dropped++; 967 return NULL; 968 } 969 skb = nskb; 970 } 971 972 tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb)); 973 /* Zero-out TSB by default */ 974 memset(tsb, 0, sizeof(*tsb)); 975 976 if (skb->ip_summed == CHECKSUM_PARTIAL) { 977 ip_ver = htons(skb->protocol); 978 switch (ip_ver) { 979 case ETH_P_IP: 980 ip_proto = ip_hdr(skb)->protocol; 981 break; 982 case ETH_P_IPV6: 983 ip_proto = ipv6_hdr(skb)->nexthdr; 984 break; 985 default: 986 return skb; 987 } 988 989 /* Get the checksum offset and the L4 (transport) offset */ 990 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb); 991 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK; 992 csum_info |= (csum_start << L4_PTR_SHIFT); 993 994 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { 995 csum_info |= L4_LENGTH_VALID; 996 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) 997 csum_info |= L4_UDP; 998 } else { 999 csum_info = 0; 1000 } 1001 1002 tsb->l4_ptr_dest_map = csum_info; 1003 } 1004 1005 return skb; 1006 } 1007 1008 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, 1009 struct net_device *dev) 1010 { 1011 struct bcm_sysport_priv *priv = netdev_priv(dev); 1012 struct device *kdev = &priv->pdev->dev; 1013 struct bcm_sysport_tx_ring *ring; 1014 struct bcm_sysport_cb *cb; 1015 struct netdev_queue *txq; 1016 struct dma_desc *desc; 1017 unsigned int skb_len; 1018 unsigned long flags; 1019 dma_addr_t mapping; 1020 u32 len_status; 1021 u16 queue; 1022 int ret; 1023 1024 queue = skb_get_queue_mapping(skb); 1025 txq = netdev_get_tx_queue(dev, queue); 1026 ring = &priv->tx_rings[queue]; 1027 1028 /* lock against tx reclaim in BH context and TX ring full interrupt */ 1029 spin_lock_irqsave(&ring->lock, flags); 1030 if (unlikely(ring->desc_count == 0)) { 1031 netif_tx_stop_queue(txq); 1032 netdev_err(dev, "queue %d awake and ring full!\n", queue); 1033 ret = NETDEV_TX_BUSY; 1034 goto out; 1035 } 1036 1037 /* Insert TSB and checksum infos */ 1038 if (priv->tsb_en) { 1039 skb = bcm_sysport_insert_tsb(skb, dev); 1040 if (!skb) { 1041 ret = NETDEV_TX_OK; 1042 goto out; 1043 } 1044 } 1045 1046 /* The Ethernet switch we are interfaced with needs packets to be at 1047 * least 64 bytes (including FCS) otherwise they will be discarded when 1048 * they enter the switch port logic. When Broadcom tags are enabled, we 1049 * need to make sure that packets are at least 68 bytes 1050 * (including FCS and tag) because the length verification is done after 1051 * the Broadcom tag is stripped off the ingress packet. 1052 */ 1053 if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) { 1054 ret = NETDEV_TX_OK; 1055 goto out; 1056 } 1057 1058 skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ? 1059 ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len; 1060 1061 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); 1062 if (dma_mapping_error(kdev, mapping)) { 1063 priv->mib.tx_dma_failed++; 1064 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n", 1065 skb->data, skb_len); 1066 ret = NETDEV_TX_OK; 1067 goto out; 1068 } 1069 1070 /* Remember the SKB for future freeing */ 1071 cb = &ring->cbs[ring->curr_desc]; 1072 cb->skb = skb; 1073 dma_unmap_addr_set(cb, dma_addr, mapping); 1074 dma_unmap_len_set(cb, dma_len, skb_len); 1075 1076 /* Fetch a descriptor entry from our pool */ 1077 desc = ring->desc_cpu; 1078 1079 desc->addr_lo = lower_32_bits(mapping); 1080 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK; 1081 len_status |= (skb_len << DESC_LEN_SHIFT); 1082 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) << 1083 DESC_STATUS_SHIFT; 1084 if (skb->ip_summed == CHECKSUM_PARTIAL) 1085 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT); 1086 1087 ring->curr_desc++; 1088 if (ring->curr_desc == ring->size) 1089 ring->curr_desc = 0; 1090 ring->desc_count--; 1091 1092 /* Ensure write completion of the descriptor status/length 1093 * in DRAM before the System Port WRITE_PORT register latches 1094 * the value 1095 */ 1096 wmb(); 1097 desc->addr_status_len = len_status; 1098 wmb(); 1099 1100 /* Write this descriptor address to the RING write port */ 1101 tdma_port_write_desc_addr(priv, desc, ring->index); 1102 1103 /* Check ring space and update SW control flow */ 1104 if (ring->desc_count == 0) 1105 netif_tx_stop_queue(txq); 1106 1107 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n", 1108 ring->index, ring->desc_count, ring->curr_desc); 1109 1110 ret = NETDEV_TX_OK; 1111 out: 1112 spin_unlock_irqrestore(&ring->lock, flags); 1113 return ret; 1114 } 1115 1116 static void bcm_sysport_tx_timeout(struct net_device *dev) 1117 { 1118 netdev_warn(dev, "transmit timeout!\n"); 1119 1120 netif_trans_update(dev); 1121 dev->stats.tx_errors++; 1122 1123 netif_tx_wake_all_queues(dev); 1124 } 1125 1126 /* phylib adjust link callback */ 1127 static void bcm_sysport_adj_link(struct net_device *dev) 1128 { 1129 struct bcm_sysport_priv *priv = netdev_priv(dev); 1130 struct phy_device *phydev = priv->phydev; 1131 unsigned int changed = 0; 1132 u32 cmd_bits = 0, reg; 1133 1134 if (priv->old_link != phydev->link) { 1135 changed = 1; 1136 priv->old_link = phydev->link; 1137 } 1138 1139 if (priv->old_duplex != phydev->duplex) { 1140 changed = 1; 1141 priv->old_duplex = phydev->duplex; 1142 } 1143 1144 switch (phydev->speed) { 1145 case SPEED_2500: 1146 cmd_bits = CMD_SPEED_2500; 1147 break; 1148 case SPEED_1000: 1149 cmd_bits = CMD_SPEED_1000; 1150 break; 1151 case SPEED_100: 1152 cmd_bits = CMD_SPEED_100; 1153 break; 1154 case SPEED_10: 1155 cmd_bits = CMD_SPEED_10; 1156 break; 1157 default: 1158 break; 1159 } 1160 cmd_bits <<= CMD_SPEED_SHIFT; 1161 1162 if (phydev->duplex == DUPLEX_HALF) 1163 cmd_bits |= CMD_HD_EN; 1164 1165 if (priv->old_pause != phydev->pause) { 1166 changed = 1; 1167 priv->old_pause = phydev->pause; 1168 } 1169 1170 if (!phydev->pause) 1171 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; 1172 1173 if (!changed) 1174 return; 1175 1176 if (phydev->link) { 1177 reg = umac_readl(priv, UMAC_CMD); 1178 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | 1179 CMD_HD_EN | CMD_RX_PAUSE_IGNORE | 1180 CMD_TX_PAUSE_IGNORE); 1181 reg |= cmd_bits; 1182 umac_writel(priv, reg, UMAC_CMD); 1183 } 1184 1185 phy_print_status(priv->phydev); 1186 } 1187 1188 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, 1189 unsigned int index) 1190 { 1191 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; 1192 struct device *kdev = &priv->pdev->dev; 1193 size_t size; 1194 void *p; 1195 u32 reg; 1196 1197 /* Simple descriptors partitioning for now */ 1198 size = 256; 1199 1200 /* We just need one DMA descriptor which is DMA-able, since writing to 1201 * the port will allocate a new descriptor in its internal linked-list 1202 */ 1203 p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, 1204 GFP_KERNEL); 1205 if (!p) { 1206 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); 1207 return -ENOMEM; 1208 } 1209 1210 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL); 1211 if (!ring->cbs) { 1212 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1213 return -ENOMEM; 1214 } 1215 1216 /* Initialize SW view of the ring */ 1217 spin_lock_init(&ring->lock); 1218 ring->priv = priv; 1219 netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); 1220 ring->index = index; 1221 ring->size = size; 1222 ring->alloc_size = ring->size; 1223 ring->desc_cpu = p; 1224 ring->desc_count = ring->size; 1225 ring->curr_desc = 0; 1226 1227 /* Initialize HW ring */ 1228 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index)); 1229 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index)); 1230 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index)); 1231 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index)); 1232 tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index)); 1233 tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index)); 1234 1235 /* Program the number of descriptors as MAX_THRESHOLD and half of 1236 * its size for the hysteresis trigger 1237 */ 1238 tdma_writel(priv, ring->size | 1239 1 << RING_HYST_THRESH_SHIFT, 1240 TDMA_DESC_RING_MAX_HYST(index)); 1241 1242 /* Enable the ring queue in the arbiter */ 1243 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN); 1244 reg |= (1 << index); 1245 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN); 1246 1247 napi_enable(&ring->napi); 1248 1249 netif_dbg(priv, hw, priv->netdev, 1250 "TDMA cfg, size=%d, desc_cpu=%p\n", 1251 ring->size, ring->desc_cpu); 1252 1253 return 0; 1254 } 1255 1256 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, 1257 unsigned int index) 1258 { 1259 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; 1260 struct device *kdev = &priv->pdev->dev; 1261 u32 reg; 1262 1263 /* Caller should stop the TDMA engine */ 1264 reg = tdma_readl(priv, TDMA_STATUS); 1265 if (!(reg & TDMA_DISABLED)) 1266 netdev_warn(priv->netdev, "TDMA not stopped!\n"); 1267 1268 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could 1269 * fail, so by checking this pointer we know whether the TX ring was 1270 * fully initialized or not. 1271 */ 1272 if (!ring->cbs) 1273 return; 1274 1275 napi_disable(&ring->napi); 1276 netif_napi_del(&ring->napi); 1277 1278 bcm_sysport_tx_reclaim(priv, ring); 1279 1280 kfree(ring->cbs); 1281 ring->cbs = NULL; 1282 1283 if (ring->desc_dma) { 1284 dma_free_coherent(kdev, sizeof(struct dma_desc), 1285 ring->desc_cpu, ring->desc_dma); 1286 ring->desc_dma = 0; 1287 } 1288 ring->size = 0; 1289 ring->alloc_size = 0; 1290 1291 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n"); 1292 } 1293 1294 /* RDMA helper */ 1295 static inline int rdma_enable_set(struct bcm_sysport_priv *priv, 1296 unsigned int enable) 1297 { 1298 unsigned int timeout = 1000; 1299 u32 reg; 1300 1301 reg = rdma_readl(priv, RDMA_CONTROL); 1302 if (enable) 1303 reg |= RDMA_EN; 1304 else 1305 reg &= ~RDMA_EN; 1306 rdma_writel(priv, reg, RDMA_CONTROL); 1307 1308 /* Poll for RMDA disabling completion */ 1309 do { 1310 reg = rdma_readl(priv, RDMA_STATUS); 1311 if (!!(reg & RDMA_DISABLED) == !enable) 1312 return 0; 1313 usleep_range(1000, 2000); 1314 } while (timeout-- > 0); 1315 1316 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n"); 1317 1318 return -ETIMEDOUT; 1319 } 1320 1321 /* TDMA helper */ 1322 static inline int tdma_enable_set(struct bcm_sysport_priv *priv, 1323 unsigned int enable) 1324 { 1325 unsigned int timeout = 1000; 1326 u32 reg; 1327 1328 reg = tdma_readl(priv, TDMA_CONTROL); 1329 if (enable) 1330 reg |= TDMA_EN; 1331 else 1332 reg &= ~TDMA_EN; 1333 tdma_writel(priv, reg, TDMA_CONTROL); 1334 1335 /* Poll for TMDA disabling completion */ 1336 do { 1337 reg = tdma_readl(priv, TDMA_STATUS); 1338 if (!!(reg & TDMA_DISABLED) == !enable) 1339 return 0; 1340 1341 usleep_range(1000, 2000); 1342 } while (timeout-- > 0); 1343 1344 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n"); 1345 1346 return -ETIMEDOUT; 1347 } 1348 1349 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) 1350 { 1351 struct bcm_sysport_cb *cb; 1352 u32 reg; 1353 int ret; 1354 int i; 1355 1356 /* Initialize SW view of the RX ring */ 1357 priv->num_rx_bds = NUM_RX_DESC; 1358 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; 1359 priv->rx_c_index = 0; 1360 priv->rx_read_ptr = 0; 1361 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb), 1362 GFP_KERNEL); 1363 if (!priv->rx_cbs) { 1364 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1365 return -ENOMEM; 1366 } 1367 1368 for (i = 0; i < priv->num_rx_bds; i++) { 1369 cb = priv->rx_cbs + i; 1370 cb->bd_addr = priv->rx_bds + i * DESC_SIZE; 1371 } 1372 1373 ret = bcm_sysport_alloc_rx_bufs(priv); 1374 if (ret) { 1375 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n"); 1376 return ret; 1377 } 1378 1379 /* Initialize HW, ensure RDMA is disabled */ 1380 reg = rdma_readl(priv, RDMA_STATUS); 1381 if (!(reg & RDMA_DISABLED)) 1382 rdma_enable_set(priv, 0); 1383 1384 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO); 1385 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI); 1386 rdma_writel(priv, 0, RDMA_PROD_INDEX); 1387 rdma_writel(priv, 0, RDMA_CONS_INDEX); 1388 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT | 1389 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE); 1390 /* Operate the queue in ring mode */ 1391 rdma_writel(priv, 0, RDMA_START_ADDR_HI); 1392 rdma_writel(priv, 0, RDMA_START_ADDR_LO); 1393 rdma_writel(priv, 0, RDMA_END_ADDR_HI); 1394 rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO); 1395 1396 rdma_writel(priv, 1, RDMA_MBDONE_INTR); 1397 1398 netif_dbg(priv, hw, priv->netdev, 1399 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n", 1400 priv->num_rx_bds, priv->rx_bds); 1401 1402 return 0; 1403 } 1404 1405 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv) 1406 { 1407 struct bcm_sysport_cb *cb; 1408 unsigned int i; 1409 u32 reg; 1410 1411 /* Caller should ensure RDMA is disabled */ 1412 reg = rdma_readl(priv, RDMA_STATUS); 1413 if (!(reg & RDMA_DISABLED)) 1414 netdev_warn(priv->netdev, "RDMA not stopped!\n"); 1415 1416 for (i = 0; i < priv->num_rx_bds; i++) { 1417 cb = &priv->rx_cbs[i]; 1418 if (dma_unmap_addr(cb, dma_addr)) 1419 dma_unmap_single(&priv->pdev->dev, 1420 dma_unmap_addr(cb, dma_addr), 1421 RX_BUF_LENGTH, DMA_FROM_DEVICE); 1422 bcm_sysport_free_cb(cb); 1423 } 1424 1425 kfree(priv->rx_cbs); 1426 priv->rx_cbs = NULL; 1427 1428 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n"); 1429 } 1430 1431 static void bcm_sysport_set_rx_mode(struct net_device *dev) 1432 { 1433 struct bcm_sysport_priv *priv = netdev_priv(dev); 1434 u32 reg; 1435 1436 reg = umac_readl(priv, UMAC_CMD); 1437 if (dev->flags & IFF_PROMISC) 1438 reg |= CMD_PROMISC; 1439 else 1440 reg &= ~CMD_PROMISC; 1441 umac_writel(priv, reg, UMAC_CMD); 1442 1443 /* No support for ALLMULTI */ 1444 if (dev->flags & IFF_ALLMULTI) 1445 return; 1446 } 1447 1448 static inline void umac_enable_set(struct bcm_sysport_priv *priv, 1449 u32 mask, unsigned int enable) 1450 { 1451 u32 reg; 1452 1453 reg = umac_readl(priv, UMAC_CMD); 1454 if (enable) 1455 reg |= mask; 1456 else 1457 reg &= ~mask; 1458 umac_writel(priv, reg, UMAC_CMD); 1459 1460 /* UniMAC stops on a packet boundary, wait for a full-sized packet 1461 * to be processed (1 msec). 1462 */ 1463 if (enable == 0) 1464 usleep_range(1000, 2000); 1465 } 1466 1467 static inline void umac_reset(struct bcm_sysport_priv *priv) 1468 { 1469 u32 reg; 1470 1471 reg = umac_readl(priv, UMAC_CMD); 1472 reg |= CMD_SW_RESET; 1473 umac_writel(priv, reg, UMAC_CMD); 1474 udelay(10); 1475 reg = umac_readl(priv, UMAC_CMD); 1476 reg &= ~CMD_SW_RESET; 1477 umac_writel(priv, reg, UMAC_CMD); 1478 } 1479 1480 static void umac_set_hw_addr(struct bcm_sysport_priv *priv, 1481 unsigned char *addr) 1482 { 1483 umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | 1484 (addr[2] << 8) | addr[3], UMAC_MAC0); 1485 umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); 1486 } 1487 1488 static void topctrl_flush(struct bcm_sysport_priv *priv) 1489 { 1490 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); 1491 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); 1492 mdelay(1); 1493 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 1494 topctrl_writel(priv, 0, TX_FLUSH_CNTL); 1495 } 1496 1497 static int bcm_sysport_change_mac(struct net_device *dev, void *p) 1498 { 1499 struct bcm_sysport_priv *priv = netdev_priv(dev); 1500 struct sockaddr *addr = p; 1501 1502 if (!is_valid_ether_addr(addr->sa_data)) 1503 return -EINVAL; 1504 1505 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1506 1507 /* interface is disabled, changes to MAC will be reflected on next 1508 * open call 1509 */ 1510 if (!netif_running(dev)) 1511 return 0; 1512 1513 umac_set_hw_addr(priv, dev->dev_addr); 1514 1515 return 0; 1516 } 1517 1518 static void bcm_sysport_netif_start(struct net_device *dev) 1519 { 1520 struct bcm_sysport_priv *priv = netdev_priv(dev); 1521 1522 /* Enable NAPI */ 1523 napi_enable(&priv->napi); 1524 1525 /* Enable RX interrupt and TX ring full interrupt */ 1526 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 1527 1528 phy_start(priv->phydev); 1529 1530 /* Enable TX interrupts for the 32 TXQs */ 1531 intrl2_1_mask_clear(priv, 0xffffffff); 1532 1533 /* Last call before we start the real business */ 1534 netif_tx_start_all_queues(dev); 1535 } 1536 1537 static void rbuf_init(struct bcm_sysport_priv *priv) 1538 { 1539 u32 reg; 1540 1541 reg = rbuf_readl(priv, RBUF_CONTROL); 1542 reg |= RBUF_4B_ALGN | RBUF_RSB_EN; 1543 rbuf_writel(priv, reg, RBUF_CONTROL); 1544 } 1545 1546 static int bcm_sysport_open(struct net_device *dev) 1547 { 1548 struct bcm_sysport_priv *priv = netdev_priv(dev); 1549 unsigned int i; 1550 int ret; 1551 1552 /* Reset UniMAC */ 1553 umac_reset(priv); 1554 1555 /* Flush TX and RX FIFOs at TOPCTRL level */ 1556 topctrl_flush(priv); 1557 1558 /* Disable the UniMAC RX/TX */ 1559 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0); 1560 1561 /* Enable RBUF 2bytes alignment and Receive Status Block */ 1562 rbuf_init(priv); 1563 1564 /* Set maximum frame length */ 1565 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 1566 1567 /* Set MAC address */ 1568 umac_set_hw_addr(priv, dev->dev_addr); 1569 1570 /* Read CRC forward */ 1571 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); 1572 1573 priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 1574 0, priv->phy_interface); 1575 if (!priv->phydev) { 1576 netdev_err(dev, "could not attach to PHY\n"); 1577 return -ENODEV; 1578 } 1579 1580 /* Reset house keeping link status */ 1581 priv->old_duplex = -1; 1582 priv->old_link = -1; 1583 priv->old_pause = -1; 1584 1585 /* mask all interrupts and request them */ 1586 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); 1587 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1588 intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); 1589 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); 1590 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1591 intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); 1592 1593 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev); 1594 if (ret) { 1595 netdev_err(dev, "failed to request RX interrupt\n"); 1596 goto out_phy_disconnect; 1597 } 1598 1599 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev); 1600 if (ret) { 1601 netdev_err(dev, "failed to request TX interrupt\n"); 1602 goto out_free_irq0; 1603 } 1604 1605 /* Initialize both hardware and software ring */ 1606 for (i = 0; i < dev->num_tx_queues; i++) { 1607 ret = bcm_sysport_init_tx_ring(priv, i); 1608 if (ret) { 1609 netdev_err(dev, "failed to initialize TX ring %d\n", 1610 i); 1611 goto out_free_tx_ring; 1612 } 1613 } 1614 1615 /* Initialize linked-list */ 1616 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); 1617 1618 /* Initialize RX ring */ 1619 ret = bcm_sysport_init_rx_ring(priv); 1620 if (ret) { 1621 netdev_err(dev, "failed to initialize RX ring\n"); 1622 goto out_free_rx_ring; 1623 } 1624 1625 /* Turn on RDMA */ 1626 ret = rdma_enable_set(priv, 1); 1627 if (ret) 1628 goto out_free_rx_ring; 1629 1630 /* Turn on TDMA */ 1631 ret = tdma_enable_set(priv, 1); 1632 if (ret) 1633 goto out_clear_rx_int; 1634 1635 /* Turn on UniMAC TX/RX */ 1636 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1); 1637 1638 bcm_sysport_netif_start(dev); 1639 1640 return 0; 1641 1642 out_clear_rx_int: 1643 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 1644 out_free_rx_ring: 1645 bcm_sysport_fini_rx_ring(priv); 1646 out_free_tx_ring: 1647 for (i = 0; i < dev->num_tx_queues; i++) 1648 bcm_sysport_fini_tx_ring(priv, i); 1649 free_irq(priv->irq1, dev); 1650 out_free_irq0: 1651 free_irq(priv->irq0, dev); 1652 out_phy_disconnect: 1653 phy_disconnect(priv->phydev); 1654 return ret; 1655 } 1656 1657 static void bcm_sysport_netif_stop(struct net_device *dev) 1658 { 1659 struct bcm_sysport_priv *priv = netdev_priv(dev); 1660 1661 /* stop all software from updating hardware */ 1662 netif_tx_stop_all_queues(dev); 1663 napi_disable(&priv->napi); 1664 phy_stop(priv->phydev); 1665 1666 /* mask all interrupts */ 1667 intrl2_0_mask_set(priv, 0xffffffff); 1668 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1669 intrl2_1_mask_set(priv, 0xffffffff); 1670 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1671 } 1672 1673 static int bcm_sysport_stop(struct net_device *dev) 1674 { 1675 struct bcm_sysport_priv *priv = netdev_priv(dev); 1676 unsigned int i; 1677 int ret; 1678 1679 bcm_sysport_netif_stop(dev); 1680 1681 /* Disable UniMAC RX */ 1682 umac_enable_set(priv, CMD_RX_EN, 0); 1683 1684 ret = tdma_enable_set(priv, 0); 1685 if (ret) { 1686 netdev_err(dev, "timeout disabling RDMA\n"); 1687 return ret; 1688 } 1689 1690 /* Wait for a maximum packet size to be drained */ 1691 usleep_range(2000, 3000); 1692 1693 ret = rdma_enable_set(priv, 0); 1694 if (ret) { 1695 netdev_err(dev, "timeout disabling TDMA\n"); 1696 return ret; 1697 } 1698 1699 /* Disable UniMAC TX */ 1700 umac_enable_set(priv, CMD_TX_EN, 0); 1701 1702 /* Free RX/TX rings SW structures */ 1703 for (i = 0; i < dev->num_tx_queues; i++) 1704 bcm_sysport_fini_tx_ring(priv, i); 1705 bcm_sysport_fini_rx_ring(priv); 1706 1707 free_irq(priv->irq0, dev); 1708 free_irq(priv->irq1, dev); 1709 1710 /* Disconnect from PHY */ 1711 phy_disconnect(priv->phydev); 1712 1713 return 0; 1714 } 1715 1716 static struct ethtool_ops bcm_sysport_ethtool_ops = { 1717 .get_settings = bcm_sysport_get_settings, 1718 .set_settings = bcm_sysport_set_settings, 1719 .get_drvinfo = bcm_sysport_get_drvinfo, 1720 .get_msglevel = bcm_sysport_get_msglvl, 1721 .set_msglevel = bcm_sysport_set_msglvl, 1722 .get_link = ethtool_op_get_link, 1723 .get_strings = bcm_sysport_get_strings, 1724 .get_ethtool_stats = bcm_sysport_get_stats, 1725 .get_sset_count = bcm_sysport_get_sset_count, 1726 .get_wol = bcm_sysport_get_wol, 1727 .set_wol = bcm_sysport_set_wol, 1728 .get_coalesce = bcm_sysport_get_coalesce, 1729 .set_coalesce = bcm_sysport_set_coalesce, 1730 }; 1731 1732 static const struct net_device_ops bcm_sysport_netdev_ops = { 1733 .ndo_start_xmit = bcm_sysport_xmit, 1734 .ndo_tx_timeout = bcm_sysport_tx_timeout, 1735 .ndo_open = bcm_sysport_open, 1736 .ndo_stop = bcm_sysport_stop, 1737 .ndo_set_features = bcm_sysport_set_features, 1738 .ndo_set_rx_mode = bcm_sysport_set_rx_mode, 1739 .ndo_set_mac_address = bcm_sysport_change_mac, 1740 #ifdef CONFIG_NET_POLL_CONTROLLER 1741 .ndo_poll_controller = bcm_sysport_poll_controller, 1742 #endif 1743 }; 1744 1745 #define REV_FMT "v%2x.%02x" 1746 1747 static int bcm_sysport_probe(struct platform_device *pdev) 1748 { 1749 struct bcm_sysport_priv *priv; 1750 struct device_node *dn; 1751 struct net_device *dev; 1752 const void *macaddr; 1753 struct resource *r; 1754 u32 txq, rxq; 1755 int ret; 1756 1757 dn = pdev->dev.of_node; 1758 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1759 1760 /* Read the Transmit/Receive Queue properties */ 1761 if (of_property_read_u32(dn, "systemport,num-txq", &txq)) 1762 txq = TDMA_NUM_RINGS; 1763 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq)) 1764 rxq = 1; 1765 1766 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq); 1767 if (!dev) 1768 return -ENOMEM; 1769 1770 /* Initialize private members */ 1771 priv = netdev_priv(dev); 1772 1773 priv->irq0 = platform_get_irq(pdev, 0); 1774 priv->irq1 = platform_get_irq(pdev, 1); 1775 priv->wol_irq = platform_get_irq(pdev, 2); 1776 if (priv->irq0 <= 0 || priv->irq1 <= 0) { 1777 dev_err(&pdev->dev, "invalid interrupts\n"); 1778 ret = -EINVAL; 1779 goto err; 1780 } 1781 1782 priv->base = devm_ioremap_resource(&pdev->dev, r); 1783 if (IS_ERR(priv->base)) { 1784 ret = PTR_ERR(priv->base); 1785 goto err; 1786 } 1787 1788 priv->netdev = dev; 1789 priv->pdev = pdev; 1790 1791 priv->phy_interface = of_get_phy_mode(dn); 1792 /* Default to GMII interface mode */ 1793 if (priv->phy_interface < 0) 1794 priv->phy_interface = PHY_INTERFACE_MODE_GMII; 1795 1796 /* In the case of a fixed PHY, the DT node associated 1797 * to the PHY is the Ethernet MAC DT node. 1798 */ 1799 if (of_phy_is_fixed_link(dn)) { 1800 ret = of_phy_register_fixed_link(dn); 1801 if (ret) { 1802 dev_err(&pdev->dev, "failed to register fixed PHY\n"); 1803 goto err; 1804 } 1805 1806 priv->phy_dn = dn; 1807 } 1808 1809 /* Initialize netdevice members */ 1810 macaddr = of_get_mac_address(dn); 1811 if (!macaddr || !is_valid_ether_addr(macaddr)) { 1812 dev_warn(&pdev->dev, "using random Ethernet MAC\n"); 1813 eth_hw_addr_random(dev); 1814 } else { 1815 ether_addr_copy(dev->dev_addr, macaddr); 1816 } 1817 1818 SET_NETDEV_DEV(dev, &pdev->dev); 1819 dev_set_drvdata(&pdev->dev, dev); 1820 dev->ethtool_ops = &bcm_sysport_ethtool_ops; 1821 dev->netdev_ops = &bcm_sysport_netdev_ops; 1822 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64); 1823 1824 /* HW supported features, none enabled by default */ 1825 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | 1826 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1827 1828 /* Request the WOL interrupt and advertise suspend if available */ 1829 priv->wol_irq_disabled = 1; 1830 ret = devm_request_irq(&pdev->dev, priv->wol_irq, 1831 bcm_sysport_wol_isr, 0, dev->name, priv); 1832 if (!ret) 1833 device_set_wakeup_capable(&pdev->dev, 1); 1834 1835 /* Set the needed headroom once and for all */ 1836 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); 1837 dev->needed_headroom += sizeof(struct bcm_tsb); 1838 1839 /* libphy will adjust the link state accordingly */ 1840 netif_carrier_off(dev); 1841 1842 ret = register_netdev(dev); 1843 if (ret) { 1844 dev_err(&pdev->dev, "failed to register net_device\n"); 1845 goto err; 1846 } 1847 1848 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; 1849 dev_info(&pdev->dev, 1850 "Broadcom SYSTEMPORT" REV_FMT 1851 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", 1852 (priv->rev >> 8) & 0xff, priv->rev & 0xff, 1853 priv->base, priv->irq0, priv->irq1, txq, rxq); 1854 1855 return 0; 1856 err: 1857 free_netdev(dev); 1858 return ret; 1859 } 1860 1861 static int bcm_sysport_remove(struct platform_device *pdev) 1862 { 1863 struct net_device *dev = dev_get_drvdata(&pdev->dev); 1864 1865 /* Not much to do, ndo_close has been called 1866 * and we use managed allocations 1867 */ 1868 unregister_netdev(dev); 1869 free_netdev(dev); 1870 dev_set_drvdata(&pdev->dev, NULL); 1871 1872 return 0; 1873 } 1874 1875 #ifdef CONFIG_PM_SLEEP 1876 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) 1877 { 1878 struct net_device *ndev = priv->netdev; 1879 unsigned int timeout = 1000; 1880 u32 reg; 1881 1882 /* Password has already been programmed */ 1883 reg = umac_readl(priv, UMAC_MPD_CTRL); 1884 reg |= MPD_EN; 1885 reg &= ~PSW_EN; 1886 if (priv->wolopts & WAKE_MAGICSECURE) 1887 reg |= PSW_EN; 1888 umac_writel(priv, reg, UMAC_MPD_CTRL); 1889 1890 /* Make sure RBUF entered WoL mode as result */ 1891 do { 1892 reg = rbuf_readl(priv, RBUF_STATUS); 1893 if (reg & RBUF_WOL_MODE) 1894 break; 1895 1896 udelay(10); 1897 } while (timeout-- > 0); 1898 1899 /* Do not leave the UniMAC RBUF matching only MPD packets */ 1900 if (!timeout) { 1901 reg = umac_readl(priv, UMAC_MPD_CTRL); 1902 reg &= ~MPD_EN; 1903 umac_writel(priv, reg, UMAC_MPD_CTRL); 1904 netif_err(priv, wol, ndev, "failed to enter WOL mode\n"); 1905 return -ETIMEDOUT; 1906 } 1907 1908 /* UniMAC receive needs to be turned on */ 1909 umac_enable_set(priv, CMD_RX_EN, 1); 1910 1911 /* Enable the interrupt wake-up source */ 1912 intrl2_0_mask_clear(priv, INTRL2_0_MPD); 1913 1914 netif_dbg(priv, wol, ndev, "entered WOL mode\n"); 1915 1916 return 0; 1917 } 1918 1919 static int bcm_sysport_suspend(struct device *d) 1920 { 1921 struct net_device *dev = dev_get_drvdata(d); 1922 struct bcm_sysport_priv *priv = netdev_priv(dev); 1923 unsigned int i; 1924 int ret = 0; 1925 u32 reg; 1926 1927 if (!netif_running(dev)) 1928 return 0; 1929 1930 bcm_sysport_netif_stop(dev); 1931 1932 phy_suspend(priv->phydev); 1933 1934 netif_device_detach(dev); 1935 1936 /* Disable UniMAC RX */ 1937 umac_enable_set(priv, CMD_RX_EN, 0); 1938 1939 ret = rdma_enable_set(priv, 0); 1940 if (ret) { 1941 netdev_err(dev, "RDMA timeout!\n"); 1942 return ret; 1943 } 1944 1945 /* Disable RXCHK if enabled */ 1946 if (priv->rx_chk_en) { 1947 reg = rxchk_readl(priv, RXCHK_CONTROL); 1948 reg &= ~RXCHK_EN; 1949 rxchk_writel(priv, reg, RXCHK_CONTROL); 1950 } 1951 1952 /* Flush RX pipe */ 1953 if (!priv->wolopts) 1954 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); 1955 1956 ret = tdma_enable_set(priv, 0); 1957 if (ret) { 1958 netdev_err(dev, "TDMA timeout!\n"); 1959 return ret; 1960 } 1961 1962 /* Wait for a packet boundary */ 1963 usleep_range(2000, 3000); 1964 1965 umac_enable_set(priv, CMD_TX_EN, 0); 1966 1967 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); 1968 1969 /* Free RX/TX rings SW structures */ 1970 for (i = 0; i < dev->num_tx_queues; i++) 1971 bcm_sysport_fini_tx_ring(priv, i); 1972 bcm_sysport_fini_rx_ring(priv); 1973 1974 /* Get prepared for Wake-on-LAN */ 1975 if (device_may_wakeup(d) && priv->wolopts) 1976 ret = bcm_sysport_suspend_to_wol(priv); 1977 1978 return ret; 1979 } 1980 1981 static int bcm_sysport_resume(struct device *d) 1982 { 1983 struct net_device *dev = dev_get_drvdata(d); 1984 struct bcm_sysport_priv *priv = netdev_priv(dev); 1985 unsigned int i; 1986 u32 reg; 1987 int ret; 1988 1989 if (!netif_running(dev)) 1990 return 0; 1991 1992 umac_reset(priv); 1993 1994 /* We may have been suspended and never received a WOL event that 1995 * would turn off MPD detection, take care of that now 1996 */ 1997 bcm_sysport_resume_from_wol(priv); 1998 1999 /* Initialize both hardware and software ring */ 2000 for (i = 0; i < dev->num_tx_queues; i++) { 2001 ret = bcm_sysport_init_tx_ring(priv, i); 2002 if (ret) { 2003 netdev_err(dev, "failed to initialize TX ring %d\n", 2004 i); 2005 goto out_free_tx_rings; 2006 } 2007 } 2008 2009 /* Initialize linked-list */ 2010 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); 2011 2012 /* Initialize RX ring */ 2013 ret = bcm_sysport_init_rx_ring(priv); 2014 if (ret) { 2015 netdev_err(dev, "failed to initialize RX ring\n"); 2016 goto out_free_rx_ring; 2017 } 2018 2019 netif_device_attach(dev); 2020 2021 /* RX pipe enable */ 2022 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 2023 2024 ret = rdma_enable_set(priv, 1); 2025 if (ret) { 2026 netdev_err(dev, "failed to enable RDMA\n"); 2027 goto out_free_rx_ring; 2028 } 2029 2030 /* Enable rxhck */ 2031 if (priv->rx_chk_en) { 2032 reg = rxchk_readl(priv, RXCHK_CONTROL); 2033 reg |= RXCHK_EN; 2034 rxchk_writel(priv, reg, RXCHK_CONTROL); 2035 } 2036 2037 rbuf_init(priv); 2038 2039 /* Set maximum frame length */ 2040 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 2041 2042 /* Set MAC address */ 2043 umac_set_hw_addr(priv, dev->dev_addr); 2044 2045 umac_enable_set(priv, CMD_RX_EN, 1); 2046 2047 /* TX pipe enable */ 2048 topctrl_writel(priv, 0, TX_FLUSH_CNTL); 2049 2050 umac_enable_set(priv, CMD_TX_EN, 1); 2051 2052 ret = tdma_enable_set(priv, 1); 2053 if (ret) { 2054 netdev_err(dev, "TDMA timeout!\n"); 2055 goto out_free_rx_ring; 2056 } 2057 2058 phy_resume(priv->phydev); 2059 2060 bcm_sysport_netif_start(dev); 2061 2062 return 0; 2063 2064 out_free_rx_ring: 2065 bcm_sysport_fini_rx_ring(priv); 2066 out_free_tx_rings: 2067 for (i = 0; i < dev->num_tx_queues; i++) 2068 bcm_sysport_fini_tx_ring(priv, i); 2069 return ret; 2070 } 2071 #endif 2072 2073 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops, 2074 bcm_sysport_suspend, bcm_sysport_resume); 2075 2076 static const struct of_device_id bcm_sysport_of_match[] = { 2077 { .compatible = "brcm,systemport-v1.00" }, 2078 { .compatible = "brcm,systemport" }, 2079 { /* sentinel */ } 2080 }; 2081 MODULE_DEVICE_TABLE(of, bcm_sysport_of_match); 2082 2083 static struct platform_driver bcm_sysport_driver = { 2084 .probe = bcm_sysport_probe, 2085 .remove = bcm_sysport_remove, 2086 .driver = { 2087 .name = "brcm-systemport", 2088 .of_match_table = bcm_sysport_of_match, 2089 .pm = &bcm_sysport_pm_ops, 2090 }, 2091 }; 2092 module_platform_driver(bcm_sysport_driver); 2093 2094 MODULE_AUTHOR("Broadcom Corporation"); 2095 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver"); 2096 MODULE_ALIAS("platform:brcm-systemport"); 2097 MODULE_LICENSE("GPL"); 2098