1 /* 2 * Broadcom BCM7xxx System Port Ethernet MAC driver 3 * 4 * Copyright (C) 2014 Broadcom Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/module.h> 16 #include <linux/kernel.h> 17 #include <linux/netdevice.h> 18 #include <linux/etherdevice.h> 19 #include <linux/platform_device.h> 20 #include <linux/of.h> 21 #include <linux/of_net.h> 22 #include <linux/of_mdio.h> 23 #include <linux/phy.h> 24 #include <linux/phy_fixed.h> 25 #include <net/ip.h> 26 #include <net/ipv6.h> 27 28 #include "bcmsysport.h" 29 30 /* I/O accessors register helpers */ 31 #define BCM_SYSPORT_IO_MACRO(name, offset) \ 32 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \ 33 { \ 34 u32 reg = __raw_readl(priv->base + offset + off); \ 35 return reg; \ 36 } \ 37 static inline void name##_writel(struct bcm_sysport_priv *priv, \ 38 u32 val, u32 off) \ 39 { \ 40 __raw_writel(val, priv->base + offset + off); \ 41 } \ 42 43 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET); 44 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET); 45 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET); 46 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET); 47 BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET); 48 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET); 49 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET); 50 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET); 51 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET); 52 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET); 53 54 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied 55 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths. 56 */ 57 #define BCM_SYSPORT_INTR_L2(which) \ 58 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \ 59 u32 mask) \ 60 { \ 61 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ 62 priv->irq##which##_mask &= ~(mask); \ 63 } \ 64 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \ 65 u32 mask) \ 66 { \ 67 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \ 68 priv->irq##which##_mask |= (mask); \ 69 } \ 70 71 BCM_SYSPORT_INTR_L2(0) 72 BCM_SYSPORT_INTR_L2(1) 73 74 /* Register accesses to GISB/RBUS registers are expensive (few hundred 75 * nanoseconds), so keep the check for 64-bits explicit here to save 76 * one register write per-packet on 32-bits platforms. 77 */ 78 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv, 79 void __iomem *d, 80 dma_addr_t addr) 81 { 82 #ifdef CONFIG_PHYS_ADDR_T_64BIT 83 __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK, 84 d + DESC_ADDR_HI_STATUS_LEN); 85 #endif 86 __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO); 87 } 88 89 static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, 90 struct dma_desc *desc, 91 unsigned int port) 92 { 93 /* Ports are latched, so write upper address first */ 94 tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port)); 95 tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port)); 96 } 97 98 /* Ethtool operations */ 99 static int bcm_sysport_set_settings(struct net_device *dev, 100 struct ethtool_cmd *cmd) 101 { 102 struct bcm_sysport_priv *priv = netdev_priv(dev); 103 104 if (!netif_running(dev)) 105 return -EINVAL; 106 107 return phy_ethtool_sset(priv->phydev, cmd); 108 } 109 110 static int bcm_sysport_get_settings(struct net_device *dev, 111 struct ethtool_cmd *cmd) 112 { 113 struct bcm_sysport_priv *priv = netdev_priv(dev); 114 115 if (!netif_running(dev)) 116 return -EINVAL; 117 118 return phy_ethtool_gset(priv->phydev, cmd); 119 } 120 121 static int bcm_sysport_set_rx_csum(struct net_device *dev, 122 netdev_features_t wanted) 123 { 124 struct bcm_sysport_priv *priv = netdev_priv(dev); 125 u32 reg; 126 127 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); 128 reg = rxchk_readl(priv, RXCHK_CONTROL); 129 if (priv->rx_chk_en) 130 reg |= RXCHK_EN; 131 else 132 reg &= ~RXCHK_EN; 133 134 /* If UniMAC forwards CRC, we need to skip over it to get 135 * a valid CHK bit to be set in the per-packet status word 136 */ 137 if (priv->rx_chk_en && priv->crc_fwd) 138 reg |= RXCHK_SKIP_FCS; 139 else 140 reg &= ~RXCHK_SKIP_FCS; 141 142 /* If Broadcom tags are enabled (e.g: using a switch), make 143 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom 144 * tag after the Ethernet MAC Source Address. 145 */ 146 if (netdev_uses_dsa(dev)) 147 reg |= RXCHK_BRCM_TAG_EN; 148 else 149 reg &= ~RXCHK_BRCM_TAG_EN; 150 151 rxchk_writel(priv, reg, RXCHK_CONTROL); 152 153 return 0; 154 } 155 156 static int bcm_sysport_set_tx_csum(struct net_device *dev, 157 netdev_features_t wanted) 158 { 159 struct bcm_sysport_priv *priv = netdev_priv(dev); 160 u32 reg; 161 162 /* Hardware transmit checksum requires us to enable the Transmit status 163 * block prepended to the packet contents 164 */ 165 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); 166 reg = tdma_readl(priv, TDMA_CONTROL); 167 if (priv->tsb_en) 168 reg |= TSB_EN; 169 else 170 reg &= ~TSB_EN; 171 tdma_writel(priv, reg, TDMA_CONTROL); 172 173 return 0; 174 } 175 176 static int bcm_sysport_set_features(struct net_device *dev, 177 netdev_features_t features) 178 { 179 netdev_features_t changed = features ^ dev->features; 180 netdev_features_t wanted = dev->wanted_features; 181 int ret = 0; 182 183 if (changed & NETIF_F_RXCSUM) 184 ret = bcm_sysport_set_rx_csum(dev, wanted); 185 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) 186 ret = bcm_sysport_set_tx_csum(dev, wanted); 187 188 return ret; 189 } 190 191 /* Hardware counters must be kept in sync because the order/offset 192 * is important here (order in structure declaration = order in hardware) 193 */ 194 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { 195 /* general stats */ 196 STAT_NETDEV(rx_packets), 197 STAT_NETDEV(tx_packets), 198 STAT_NETDEV(rx_bytes), 199 STAT_NETDEV(tx_bytes), 200 STAT_NETDEV(rx_errors), 201 STAT_NETDEV(tx_errors), 202 STAT_NETDEV(rx_dropped), 203 STAT_NETDEV(tx_dropped), 204 STAT_NETDEV(multicast), 205 /* UniMAC RSV counters */ 206 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), 207 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), 208 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), 209 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), 210 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), 211 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), 212 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), 213 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), 214 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), 215 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), 216 STAT_MIB_RX("rx_pkts", mib.rx.pkt), 217 STAT_MIB_RX("rx_bytes", mib.rx.bytes), 218 STAT_MIB_RX("rx_multicast", mib.rx.mca), 219 STAT_MIB_RX("rx_broadcast", mib.rx.bca), 220 STAT_MIB_RX("rx_fcs", mib.rx.fcs), 221 STAT_MIB_RX("rx_control", mib.rx.cf), 222 STAT_MIB_RX("rx_pause", mib.rx.pf), 223 STAT_MIB_RX("rx_unknown", mib.rx.uo), 224 STAT_MIB_RX("rx_align", mib.rx.aln), 225 STAT_MIB_RX("rx_outrange", mib.rx.flr), 226 STAT_MIB_RX("rx_code", mib.rx.cde), 227 STAT_MIB_RX("rx_carrier", mib.rx.fcr), 228 STAT_MIB_RX("rx_oversize", mib.rx.ovr), 229 STAT_MIB_RX("rx_jabber", mib.rx.jbr), 230 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue), 231 STAT_MIB_RX("rx_good_pkts", mib.rx.pok), 232 STAT_MIB_RX("rx_unicast", mib.rx.uc), 233 STAT_MIB_RX("rx_ppp", mib.rx.ppp), 234 STAT_MIB_RX("rx_crc", mib.rx.rcrc), 235 /* UniMAC TSV counters */ 236 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), 237 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), 238 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), 239 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), 240 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), 241 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), 242 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), 243 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), 244 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), 245 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), 246 STAT_MIB_TX("tx_pkts", mib.tx.pkts), 247 STAT_MIB_TX("tx_multicast", mib.tx.mca), 248 STAT_MIB_TX("tx_broadcast", mib.tx.bca), 249 STAT_MIB_TX("tx_pause", mib.tx.pf), 250 STAT_MIB_TX("tx_control", mib.tx.cf), 251 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs), 252 STAT_MIB_TX("tx_oversize", mib.tx.ovr), 253 STAT_MIB_TX("tx_defer", mib.tx.drf), 254 STAT_MIB_TX("tx_excess_defer", mib.tx.edf), 255 STAT_MIB_TX("tx_single_col", mib.tx.scl), 256 STAT_MIB_TX("tx_multi_col", mib.tx.mcl), 257 STAT_MIB_TX("tx_late_col", mib.tx.lcl), 258 STAT_MIB_TX("tx_excess_col", mib.tx.ecl), 259 STAT_MIB_TX("tx_frags", mib.tx.frg), 260 STAT_MIB_TX("tx_total_col", mib.tx.ncl), 261 STAT_MIB_TX("tx_jabber", mib.tx.jbr), 262 STAT_MIB_TX("tx_bytes", mib.tx.bytes), 263 STAT_MIB_TX("tx_good_pkts", mib.tx.pok), 264 STAT_MIB_TX("tx_unicast", mib.tx.uc), 265 /* UniMAC RUNT counters */ 266 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt), 267 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), 268 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), 269 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes), 270 /* RXCHK misc statistics */ 271 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR), 272 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc, 273 RXCHK_OTHER_DISC_CNTR), 274 /* RBUF misc statistics */ 275 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), 276 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), 277 }; 278 279 #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) 280 281 static void bcm_sysport_get_drvinfo(struct net_device *dev, 282 struct ethtool_drvinfo *info) 283 { 284 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 285 strlcpy(info->version, "0.1", sizeof(info->version)); 286 strlcpy(info->bus_info, "platform", sizeof(info->bus_info)); 287 info->n_stats = BCM_SYSPORT_STATS_LEN; 288 } 289 290 static u32 bcm_sysport_get_msglvl(struct net_device *dev) 291 { 292 struct bcm_sysport_priv *priv = netdev_priv(dev); 293 294 return priv->msg_enable; 295 } 296 297 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable) 298 { 299 struct bcm_sysport_priv *priv = netdev_priv(dev); 300 301 priv->msg_enable = enable; 302 } 303 304 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) 305 { 306 switch (string_set) { 307 case ETH_SS_STATS: 308 return BCM_SYSPORT_STATS_LEN; 309 default: 310 return -EOPNOTSUPP; 311 } 312 } 313 314 static void bcm_sysport_get_strings(struct net_device *dev, 315 u32 stringset, u8 *data) 316 { 317 int i; 318 319 switch (stringset) { 320 case ETH_SS_STATS: 321 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 322 memcpy(data + i * ETH_GSTRING_LEN, 323 bcm_sysport_gstrings_stats[i].stat_string, 324 ETH_GSTRING_LEN); 325 } 326 break; 327 default: 328 break; 329 } 330 } 331 332 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) 333 { 334 int i, j = 0; 335 336 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 337 const struct bcm_sysport_stats *s; 338 u8 offset = 0; 339 u32 val = 0; 340 char *p; 341 342 s = &bcm_sysport_gstrings_stats[i]; 343 switch (s->type) { 344 case BCM_SYSPORT_STAT_NETDEV: 345 continue; 346 case BCM_SYSPORT_STAT_MIB_RX: 347 case BCM_SYSPORT_STAT_MIB_TX: 348 case BCM_SYSPORT_STAT_RUNT: 349 if (s->type != BCM_SYSPORT_STAT_MIB_RX) 350 offset = UMAC_MIB_STAT_OFFSET; 351 val = umac_readl(priv, UMAC_MIB_START + j + offset); 352 break; 353 case BCM_SYSPORT_STAT_RXCHK: 354 val = rxchk_readl(priv, s->reg_offset); 355 if (val == ~0) 356 rxchk_writel(priv, 0, s->reg_offset); 357 break; 358 case BCM_SYSPORT_STAT_RBUF: 359 val = rbuf_readl(priv, s->reg_offset); 360 if (val == ~0) 361 rbuf_writel(priv, 0, s->reg_offset); 362 break; 363 } 364 365 j += s->stat_sizeof; 366 p = (char *)priv + s->stat_offset; 367 *(u32 *)p = val; 368 } 369 370 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); 371 } 372 373 static void bcm_sysport_get_stats(struct net_device *dev, 374 struct ethtool_stats *stats, u64 *data) 375 { 376 struct bcm_sysport_priv *priv = netdev_priv(dev); 377 int i; 378 379 if (netif_running(dev)) 380 bcm_sysport_update_mib_counters(priv); 381 382 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 383 const struct bcm_sysport_stats *s; 384 char *p; 385 386 s = &bcm_sysport_gstrings_stats[i]; 387 if (s->type == BCM_SYSPORT_STAT_NETDEV) 388 p = (char *)&dev->stats; 389 else 390 p = (char *)priv; 391 p += s->stat_offset; 392 data[i] = *(u32 *)p; 393 } 394 } 395 396 static void bcm_sysport_get_wol(struct net_device *dev, 397 struct ethtool_wolinfo *wol) 398 { 399 struct bcm_sysport_priv *priv = netdev_priv(dev); 400 u32 reg; 401 402 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE; 403 wol->wolopts = priv->wolopts; 404 405 if (!(priv->wolopts & WAKE_MAGICSECURE)) 406 return; 407 408 /* Return the programmed SecureOn password */ 409 reg = umac_readl(priv, UMAC_PSW_MS); 410 put_unaligned_be16(reg, &wol->sopass[0]); 411 reg = umac_readl(priv, UMAC_PSW_LS); 412 put_unaligned_be32(reg, &wol->sopass[2]); 413 } 414 415 static int bcm_sysport_set_wol(struct net_device *dev, 416 struct ethtool_wolinfo *wol) 417 { 418 struct bcm_sysport_priv *priv = netdev_priv(dev); 419 struct device *kdev = &priv->pdev->dev; 420 u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE; 421 422 if (!device_can_wakeup(kdev)) 423 return -ENOTSUPP; 424 425 if (wol->wolopts & ~supported) 426 return -EINVAL; 427 428 /* Program the SecureOn password */ 429 if (wol->wolopts & WAKE_MAGICSECURE) { 430 umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), 431 UMAC_PSW_MS); 432 umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), 433 UMAC_PSW_LS); 434 } 435 436 /* Flag the device and relevant IRQ as wakeup capable */ 437 if (wol->wolopts) { 438 device_set_wakeup_enable(kdev, 1); 439 if (priv->wol_irq_disabled) 440 enable_irq_wake(priv->wol_irq); 441 priv->wol_irq_disabled = 0; 442 } else { 443 device_set_wakeup_enable(kdev, 0); 444 /* Avoid unbalanced disable_irq_wake calls */ 445 if (!priv->wol_irq_disabled) 446 disable_irq_wake(priv->wol_irq); 447 priv->wol_irq_disabled = 1; 448 } 449 450 priv->wolopts = wol->wolopts; 451 452 return 0; 453 } 454 455 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) 456 { 457 dev_kfree_skb_any(cb->skb); 458 cb->skb = NULL; 459 dma_unmap_addr_set(cb, dma_addr, 0); 460 } 461 462 static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, 463 struct bcm_sysport_cb *cb) 464 { 465 struct device *kdev = &priv->pdev->dev; 466 struct net_device *ndev = priv->netdev; 467 dma_addr_t mapping; 468 int ret; 469 470 cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); 471 if (!cb->skb) { 472 netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); 473 return -ENOMEM; 474 } 475 476 mapping = dma_map_single(kdev, cb->skb->data, 477 RX_BUF_LENGTH, DMA_FROM_DEVICE); 478 ret = dma_mapping_error(kdev, mapping); 479 if (ret) { 480 bcm_sysport_free_cb(cb); 481 netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); 482 return ret; 483 } 484 485 dma_unmap_addr_set(cb, dma_addr, mapping); 486 dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping); 487 488 priv->rx_bd_assign_index++; 489 priv->rx_bd_assign_index &= (priv->num_rx_bds - 1); 490 priv->rx_bd_assign_ptr = priv->rx_bds + 491 (priv->rx_bd_assign_index * DESC_SIZE); 492 493 netif_dbg(priv, rx_status, ndev, "RX refill\n"); 494 495 return 0; 496 } 497 498 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) 499 { 500 struct bcm_sysport_cb *cb; 501 int ret = 0; 502 unsigned int i; 503 504 for (i = 0; i < priv->num_rx_bds; i++) { 505 cb = &priv->rx_cbs[priv->rx_bd_assign_index]; 506 if (cb->skb) 507 continue; 508 509 ret = bcm_sysport_rx_refill(priv, cb); 510 if (ret) 511 break; 512 } 513 514 return ret; 515 } 516 517 /* Poll the hardware for up to budget packets to process */ 518 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, 519 unsigned int budget) 520 { 521 struct device *kdev = &priv->pdev->dev; 522 struct net_device *ndev = priv->netdev; 523 unsigned int processed = 0, to_process; 524 struct bcm_sysport_cb *cb; 525 struct sk_buff *skb; 526 unsigned int p_index; 527 u16 len, status; 528 struct bcm_rsb *rsb; 529 530 /* Determine how much we should process since last call */ 531 p_index = rdma_readl(priv, RDMA_PROD_INDEX); 532 p_index &= RDMA_PROD_INDEX_MASK; 533 534 if (p_index < priv->rx_c_index) 535 to_process = (RDMA_CONS_INDEX_MASK + 1) - 536 priv->rx_c_index + p_index; 537 else 538 to_process = p_index - priv->rx_c_index; 539 540 netif_dbg(priv, rx_status, ndev, 541 "p_index=%d rx_c_index=%d to_process=%d\n", 542 p_index, priv->rx_c_index, to_process); 543 544 while ((processed < to_process) && (processed < budget)) { 545 cb = &priv->rx_cbs[priv->rx_read_ptr]; 546 skb = cb->skb; 547 548 processed++; 549 priv->rx_read_ptr++; 550 551 if (priv->rx_read_ptr == priv->num_rx_bds) 552 priv->rx_read_ptr = 0; 553 554 /* We do not have a backing SKB, so we do not a corresponding 555 * DMA mapping for this incoming packet since 556 * bcm_sysport_rx_refill always either has both skb and mapping 557 * or none. 558 */ 559 if (unlikely(!skb)) { 560 netif_err(priv, rx_err, ndev, "out of memory!\n"); 561 ndev->stats.rx_dropped++; 562 ndev->stats.rx_errors++; 563 goto refill; 564 } 565 566 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 567 RX_BUF_LENGTH, DMA_FROM_DEVICE); 568 569 /* Extract the Receive Status Block prepended */ 570 rsb = (struct bcm_rsb *)skb->data; 571 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; 572 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & 573 DESC_STATUS_MASK; 574 575 netif_dbg(priv, rx_status, ndev, 576 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", 577 p_index, priv->rx_c_index, priv->rx_read_ptr, 578 len, status); 579 580 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { 581 netif_err(priv, rx_status, ndev, "fragmented packet!\n"); 582 ndev->stats.rx_dropped++; 583 ndev->stats.rx_errors++; 584 bcm_sysport_free_cb(cb); 585 goto refill; 586 } 587 588 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) { 589 netif_err(priv, rx_err, ndev, "error packet\n"); 590 if (status & RX_STATUS_OVFLOW) 591 ndev->stats.rx_over_errors++; 592 ndev->stats.rx_dropped++; 593 ndev->stats.rx_errors++; 594 bcm_sysport_free_cb(cb); 595 goto refill; 596 } 597 598 skb_put(skb, len); 599 600 /* Hardware validated our checksum */ 601 if (likely(status & DESC_L4_CSUM)) 602 skb->ip_summed = CHECKSUM_UNNECESSARY; 603 604 /* Hardware pre-pends packets with 2bytes before Ethernet 605 * header plus we have the Receive Status Block, strip off all 606 * of this from the SKB. 607 */ 608 skb_pull(skb, sizeof(*rsb) + 2); 609 len -= (sizeof(*rsb) + 2); 610 611 /* UniMAC may forward CRC */ 612 if (priv->crc_fwd) { 613 skb_trim(skb, len - ETH_FCS_LEN); 614 len -= ETH_FCS_LEN; 615 } 616 617 skb->protocol = eth_type_trans(skb, ndev); 618 ndev->stats.rx_packets++; 619 ndev->stats.rx_bytes += len; 620 621 napi_gro_receive(&priv->napi, skb); 622 refill: 623 bcm_sysport_rx_refill(priv, cb); 624 } 625 626 return processed; 627 } 628 629 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv, 630 struct bcm_sysport_cb *cb, 631 unsigned int *bytes_compl, 632 unsigned int *pkts_compl) 633 { 634 struct device *kdev = &priv->pdev->dev; 635 struct net_device *ndev = priv->netdev; 636 637 if (cb->skb) { 638 ndev->stats.tx_bytes += cb->skb->len; 639 *bytes_compl += cb->skb->len; 640 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 641 dma_unmap_len(cb, dma_len), 642 DMA_TO_DEVICE); 643 ndev->stats.tx_packets++; 644 (*pkts_compl)++; 645 bcm_sysport_free_cb(cb); 646 /* SKB fragment */ 647 } else if (dma_unmap_addr(cb, dma_addr)) { 648 ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len); 649 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr), 650 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); 651 dma_unmap_addr_set(cb, dma_addr, 0); 652 } 653 } 654 655 /* Reclaim queued SKBs for transmission completion, lockless version */ 656 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 657 struct bcm_sysport_tx_ring *ring) 658 { 659 struct net_device *ndev = priv->netdev; 660 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; 661 unsigned int pkts_compl = 0, bytes_compl = 0; 662 struct bcm_sysport_cb *cb; 663 struct netdev_queue *txq; 664 u32 hw_ind; 665 666 txq = netdev_get_tx_queue(ndev, ring->index); 667 668 /* Compute how many descriptors have been processed since last call */ 669 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); 670 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; 671 ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); 672 673 last_c_index = ring->c_index; 674 num_tx_cbs = ring->size; 675 676 c_index &= (num_tx_cbs - 1); 677 678 if (c_index >= last_c_index) 679 last_tx_cn = c_index - last_c_index; 680 else 681 last_tx_cn = num_tx_cbs - last_c_index + c_index; 682 683 netif_dbg(priv, tx_done, ndev, 684 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", 685 ring->index, c_index, last_tx_cn, last_c_index); 686 687 while (last_tx_cn-- > 0) { 688 cb = ring->cbs + last_c_index; 689 bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl); 690 691 ring->desc_count++; 692 last_c_index++; 693 last_c_index &= (num_tx_cbs - 1); 694 } 695 696 ring->c_index = c_index; 697 698 if (netif_tx_queue_stopped(txq) && pkts_compl) 699 netif_tx_wake_queue(txq); 700 701 netif_dbg(priv, tx_done, ndev, 702 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", 703 ring->index, ring->c_index, pkts_compl, bytes_compl); 704 705 return pkts_compl; 706 } 707 708 /* Locked version of the per-ring TX reclaim routine */ 709 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 710 struct bcm_sysport_tx_ring *ring) 711 { 712 unsigned int released; 713 unsigned long flags; 714 715 spin_lock_irqsave(&ring->lock, flags); 716 released = __bcm_sysport_tx_reclaim(priv, ring); 717 spin_unlock_irqrestore(&ring->lock, flags); 718 719 return released; 720 } 721 722 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) 723 { 724 struct bcm_sysport_tx_ring *ring = 725 container_of(napi, struct bcm_sysport_tx_ring, napi); 726 unsigned int work_done = 0; 727 728 work_done = bcm_sysport_tx_reclaim(ring->priv, ring); 729 730 if (work_done == 0) { 731 napi_complete(napi); 732 /* re-enable TX interrupt */ 733 intrl2_1_mask_clear(ring->priv, BIT(ring->index)); 734 } 735 736 return 0; 737 } 738 739 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) 740 { 741 unsigned int q; 742 743 for (q = 0; q < priv->netdev->num_tx_queues; q++) 744 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]); 745 } 746 747 static int bcm_sysport_poll(struct napi_struct *napi, int budget) 748 { 749 struct bcm_sysport_priv *priv = 750 container_of(napi, struct bcm_sysport_priv, napi); 751 unsigned int work_done = 0; 752 753 work_done = bcm_sysport_desc_rx(priv, budget); 754 755 priv->rx_c_index += work_done; 756 priv->rx_c_index &= RDMA_CONS_INDEX_MASK; 757 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); 758 759 if (work_done < budget) { 760 napi_complete(napi); 761 /* re-enable RX interrupts */ 762 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE); 763 } 764 765 return work_done; 766 } 767 768 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) 769 { 770 u32 reg; 771 772 /* Stop monitoring MPD interrupt */ 773 intrl2_0_mask_set(priv, INTRL2_0_MPD); 774 775 /* Clear the MagicPacket detection logic */ 776 reg = umac_readl(priv, UMAC_MPD_CTRL); 777 reg &= ~MPD_EN; 778 umac_writel(priv, reg, UMAC_MPD_CTRL); 779 780 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n"); 781 } 782 783 /* RX and misc interrupt routine */ 784 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) 785 { 786 struct net_device *dev = dev_id; 787 struct bcm_sysport_priv *priv = netdev_priv(dev); 788 789 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & 790 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); 791 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 792 793 if (unlikely(priv->irq0_stat == 0)) { 794 netdev_warn(priv->netdev, "spurious RX interrupt\n"); 795 return IRQ_NONE; 796 } 797 798 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) { 799 if (likely(napi_schedule_prep(&priv->napi))) { 800 /* disable RX interrupts */ 801 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE); 802 __napi_schedule(&priv->napi); 803 } 804 } 805 806 /* TX ring is full, perform a full reclaim since we do not know 807 * which one would trigger this interrupt 808 */ 809 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL) 810 bcm_sysport_tx_reclaim_all(priv); 811 812 if (priv->irq0_stat & INTRL2_0_MPD) { 813 netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n"); 814 bcm_sysport_resume_from_wol(priv); 815 } 816 817 return IRQ_HANDLED; 818 } 819 820 /* TX interrupt service routine */ 821 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id) 822 { 823 struct net_device *dev = dev_id; 824 struct bcm_sysport_priv *priv = netdev_priv(dev); 825 struct bcm_sysport_tx_ring *txr; 826 unsigned int ring; 827 828 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & 829 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); 830 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 831 832 if (unlikely(priv->irq1_stat == 0)) { 833 netdev_warn(priv->netdev, "spurious TX interrupt\n"); 834 return IRQ_NONE; 835 } 836 837 for (ring = 0; ring < dev->num_tx_queues; ring++) { 838 if (!(priv->irq1_stat & BIT(ring))) 839 continue; 840 841 txr = &priv->tx_rings[ring]; 842 843 if (likely(napi_schedule_prep(&txr->napi))) { 844 intrl2_1_mask_set(priv, BIT(ring)); 845 __napi_schedule(&txr->napi); 846 } 847 } 848 849 return IRQ_HANDLED; 850 } 851 852 static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id) 853 { 854 struct bcm_sysport_priv *priv = dev_id; 855 856 pm_wakeup_event(&priv->pdev->dev, 0); 857 858 return IRQ_HANDLED; 859 } 860 861 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, 862 struct net_device *dev) 863 { 864 struct sk_buff *nskb; 865 struct bcm_tsb *tsb; 866 u32 csum_info; 867 u8 ip_proto; 868 u16 csum_start; 869 u16 ip_ver; 870 871 /* Re-allocate SKB if needed */ 872 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) { 873 nskb = skb_realloc_headroom(skb, sizeof(*tsb)); 874 dev_kfree_skb(skb); 875 if (!nskb) { 876 dev->stats.tx_errors++; 877 dev->stats.tx_dropped++; 878 return NULL; 879 } 880 skb = nskb; 881 } 882 883 tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb)); 884 /* Zero-out TSB by default */ 885 memset(tsb, 0, sizeof(*tsb)); 886 887 if (skb->ip_summed == CHECKSUM_PARTIAL) { 888 ip_ver = htons(skb->protocol); 889 switch (ip_ver) { 890 case ETH_P_IP: 891 ip_proto = ip_hdr(skb)->protocol; 892 break; 893 case ETH_P_IPV6: 894 ip_proto = ipv6_hdr(skb)->nexthdr; 895 break; 896 default: 897 return skb; 898 } 899 900 /* Get the checksum offset and the L4 (transport) offset */ 901 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb); 902 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK; 903 csum_info |= (csum_start << L4_PTR_SHIFT); 904 905 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { 906 csum_info |= L4_LENGTH_VALID; 907 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) 908 csum_info |= L4_UDP; 909 } else { 910 csum_info = 0; 911 } 912 913 tsb->l4_ptr_dest_map = csum_info; 914 } 915 916 return skb; 917 } 918 919 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, 920 struct net_device *dev) 921 { 922 struct bcm_sysport_priv *priv = netdev_priv(dev); 923 struct device *kdev = &priv->pdev->dev; 924 struct bcm_sysport_tx_ring *ring; 925 struct bcm_sysport_cb *cb; 926 struct netdev_queue *txq; 927 struct dma_desc *desc; 928 unsigned int skb_len; 929 unsigned long flags; 930 dma_addr_t mapping; 931 u32 len_status; 932 u16 queue; 933 int ret; 934 935 queue = skb_get_queue_mapping(skb); 936 txq = netdev_get_tx_queue(dev, queue); 937 ring = &priv->tx_rings[queue]; 938 939 /* lock against tx reclaim in BH context and TX ring full interrupt */ 940 spin_lock_irqsave(&ring->lock, flags); 941 if (unlikely(ring->desc_count == 0)) { 942 netif_tx_stop_queue(txq); 943 netdev_err(dev, "queue %d awake and ring full!\n", queue); 944 ret = NETDEV_TX_BUSY; 945 goto out; 946 } 947 948 /* Insert TSB and checksum infos */ 949 if (priv->tsb_en) { 950 skb = bcm_sysport_insert_tsb(skb, dev); 951 if (!skb) { 952 ret = NETDEV_TX_OK; 953 goto out; 954 } 955 } 956 957 /* The Ethernet switch we are interfaced with needs packets to be at 958 * least 64 bytes (including FCS) otherwise they will be discarded when 959 * they enter the switch port logic. When Broadcom tags are enabled, we 960 * need to make sure that packets are at least 68 bytes 961 * (including FCS and tag) because the length verification is done after 962 * the Broadcom tag is stripped off the ingress packet. 963 */ 964 if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) { 965 ret = NETDEV_TX_OK; 966 goto out; 967 } 968 969 skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ? 970 ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len; 971 972 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); 973 if (dma_mapping_error(kdev, mapping)) { 974 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n", 975 skb->data, skb_len); 976 ret = NETDEV_TX_OK; 977 goto out; 978 } 979 980 /* Remember the SKB for future freeing */ 981 cb = &ring->cbs[ring->curr_desc]; 982 cb->skb = skb; 983 dma_unmap_addr_set(cb, dma_addr, mapping); 984 dma_unmap_len_set(cb, dma_len, skb_len); 985 986 /* Fetch a descriptor entry from our pool */ 987 desc = ring->desc_cpu; 988 989 desc->addr_lo = lower_32_bits(mapping); 990 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK; 991 len_status |= (skb_len << DESC_LEN_SHIFT); 992 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) << 993 DESC_STATUS_SHIFT; 994 if (skb->ip_summed == CHECKSUM_PARTIAL) 995 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT); 996 997 ring->curr_desc++; 998 if (ring->curr_desc == ring->size) 999 ring->curr_desc = 0; 1000 ring->desc_count--; 1001 1002 /* Ensure write completion of the descriptor status/length 1003 * in DRAM before the System Port WRITE_PORT register latches 1004 * the value 1005 */ 1006 wmb(); 1007 desc->addr_status_len = len_status; 1008 wmb(); 1009 1010 /* Write this descriptor address to the RING write port */ 1011 tdma_port_write_desc_addr(priv, desc, ring->index); 1012 1013 /* Check ring space and update SW control flow */ 1014 if (ring->desc_count == 0) 1015 netif_tx_stop_queue(txq); 1016 1017 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n", 1018 ring->index, ring->desc_count, ring->curr_desc); 1019 1020 ret = NETDEV_TX_OK; 1021 out: 1022 spin_unlock_irqrestore(&ring->lock, flags); 1023 return ret; 1024 } 1025 1026 static void bcm_sysport_tx_timeout(struct net_device *dev) 1027 { 1028 netdev_warn(dev, "transmit timeout!\n"); 1029 1030 dev->trans_start = jiffies; 1031 dev->stats.tx_errors++; 1032 1033 netif_tx_wake_all_queues(dev); 1034 } 1035 1036 /* phylib adjust link callback */ 1037 static void bcm_sysport_adj_link(struct net_device *dev) 1038 { 1039 struct bcm_sysport_priv *priv = netdev_priv(dev); 1040 struct phy_device *phydev = priv->phydev; 1041 unsigned int changed = 0; 1042 u32 cmd_bits = 0, reg; 1043 1044 if (priv->old_link != phydev->link) { 1045 changed = 1; 1046 priv->old_link = phydev->link; 1047 } 1048 1049 if (priv->old_duplex != phydev->duplex) { 1050 changed = 1; 1051 priv->old_duplex = phydev->duplex; 1052 } 1053 1054 switch (phydev->speed) { 1055 case SPEED_2500: 1056 cmd_bits = CMD_SPEED_2500; 1057 break; 1058 case SPEED_1000: 1059 cmd_bits = CMD_SPEED_1000; 1060 break; 1061 case SPEED_100: 1062 cmd_bits = CMD_SPEED_100; 1063 break; 1064 case SPEED_10: 1065 cmd_bits = CMD_SPEED_10; 1066 break; 1067 default: 1068 break; 1069 } 1070 cmd_bits <<= CMD_SPEED_SHIFT; 1071 1072 if (phydev->duplex == DUPLEX_HALF) 1073 cmd_bits |= CMD_HD_EN; 1074 1075 if (priv->old_pause != phydev->pause) { 1076 changed = 1; 1077 priv->old_pause = phydev->pause; 1078 } 1079 1080 if (!phydev->pause) 1081 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; 1082 1083 if (!changed) 1084 return; 1085 1086 if (phydev->link) { 1087 reg = umac_readl(priv, UMAC_CMD); 1088 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | 1089 CMD_HD_EN | CMD_RX_PAUSE_IGNORE | 1090 CMD_TX_PAUSE_IGNORE); 1091 reg |= cmd_bits; 1092 umac_writel(priv, reg, UMAC_CMD); 1093 } 1094 1095 phy_print_status(priv->phydev); 1096 } 1097 1098 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, 1099 unsigned int index) 1100 { 1101 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; 1102 struct device *kdev = &priv->pdev->dev; 1103 size_t size; 1104 void *p; 1105 u32 reg; 1106 1107 /* Simple descriptors partitioning for now */ 1108 size = 256; 1109 1110 /* We just need one DMA descriptor which is DMA-able, since writing to 1111 * the port will allocate a new descriptor in its internal linked-list 1112 */ 1113 p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, 1114 GFP_KERNEL); 1115 if (!p) { 1116 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); 1117 return -ENOMEM; 1118 } 1119 1120 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL); 1121 if (!ring->cbs) { 1122 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1123 return -ENOMEM; 1124 } 1125 1126 /* Initialize SW view of the ring */ 1127 spin_lock_init(&ring->lock); 1128 ring->priv = priv; 1129 netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); 1130 ring->index = index; 1131 ring->size = size; 1132 ring->alloc_size = ring->size; 1133 ring->desc_cpu = p; 1134 ring->desc_count = ring->size; 1135 ring->curr_desc = 0; 1136 1137 /* Initialize HW ring */ 1138 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index)); 1139 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index)); 1140 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index)); 1141 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index)); 1142 tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index)); 1143 tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index)); 1144 1145 /* Program the number of descriptors as MAX_THRESHOLD and half of 1146 * its size for the hysteresis trigger 1147 */ 1148 tdma_writel(priv, ring->size | 1149 1 << RING_HYST_THRESH_SHIFT, 1150 TDMA_DESC_RING_MAX_HYST(index)); 1151 1152 /* Enable the ring queue in the arbiter */ 1153 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN); 1154 reg |= (1 << index); 1155 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN); 1156 1157 napi_enable(&ring->napi); 1158 1159 netif_dbg(priv, hw, priv->netdev, 1160 "TDMA cfg, size=%d, desc_cpu=%p\n", 1161 ring->size, ring->desc_cpu); 1162 1163 return 0; 1164 } 1165 1166 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, 1167 unsigned int index) 1168 { 1169 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; 1170 struct device *kdev = &priv->pdev->dev; 1171 u32 reg; 1172 1173 /* Caller should stop the TDMA engine */ 1174 reg = tdma_readl(priv, TDMA_STATUS); 1175 if (!(reg & TDMA_DISABLED)) 1176 netdev_warn(priv->netdev, "TDMA not stopped!\n"); 1177 1178 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could 1179 * fail, so by checking this pointer we know whether the TX ring was 1180 * fully initialized or not. 1181 */ 1182 if (!ring->cbs) 1183 return; 1184 1185 napi_disable(&ring->napi); 1186 netif_napi_del(&ring->napi); 1187 1188 bcm_sysport_tx_reclaim(priv, ring); 1189 1190 kfree(ring->cbs); 1191 ring->cbs = NULL; 1192 1193 if (ring->desc_dma) { 1194 dma_free_coherent(kdev, sizeof(struct dma_desc), 1195 ring->desc_cpu, ring->desc_dma); 1196 ring->desc_dma = 0; 1197 } 1198 ring->size = 0; 1199 ring->alloc_size = 0; 1200 1201 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n"); 1202 } 1203 1204 /* RDMA helper */ 1205 static inline int rdma_enable_set(struct bcm_sysport_priv *priv, 1206 unsigned int enable) 1207 { 1208 unsigned int timeout = 1000; 1209 u32 reg; 1210 1211 reg = rdma_readl(priv, RDMA_CONTROL); 1212 if (enable) 1213 reg |= RDMA_EN; 1214 else 1215 reg &= ~RDMA_EN; 1216 rdma_writel(priv, reg, RDMA_CONTROL); 1217 1218 /* Poll for RMDA disabling completion */ 1219 do { 1220 reg = rdma_readl(priv, RDMA_STATUS); 1221 if (!!(reg & RDMA_DISABLED) == !enable) 1222 return 0; 1223 usleep_range(1000, 2000); 1224 } while (timeout-- > 0); 1225 1226 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n"); 1227 1228 return -ETIMEDOUT; 1229 } 1230 1231 /* TDMA helper */ 1232 static inline int tdma_enable_set(struct bcm_sysport_priv *priv, 1233 unsigned int enable) 1234 { 1235 unsigned int timeout = 1000; 1236 u32 reg; 1237 1238 reg = tdma_readl(priv, TDMA_CONTROL); 1239 if (enable) 1240 reg |= TDMA_EN; 1241 else 1242 reg &= ~TDMA_EN; 1243 tdma_writel(priv, reg, TDMA_CONTROL); 1244 1245 /* Poll for TMDA disabling completion */ 1246 do { 1247 reg = tdma_readl(priv, TDMA_STATUS); 1248 if (!!(reg & TDMA_DISABLED) == !enable) 1249 return 0; 1250 1251 usleep_range(1000, 2000); 1252 } while (timeout-- > 0); 1253 1254 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n"); 1255 1256 return -ETIMEDOUT; 1257 } 1258 1259 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) 1260 { 1261 u32 reg; 1262 int ret; 1263 1264 /* Initialize SW view of the RX ring */ 1265 priv->num_rx_bds = NUM_RX_DESC; 1266 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; 1267 priv->rx_bd_assign_ptr = priv->rx_bds; 1268 priv->rx_bd_assign_index = 0; 1269 priv->rx_c_index = 0; 1270 priv->rx_read_ptr = 0; 1271 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb), 1272 GFP_KERNEL); 1273 if (!priv->rx_cbs) { 1274 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1275 return -ENOMEM; 1276 } 1277 1278 ret = bcm_sysport_alloc_rx_bufs(priv); 1279 if (ret) { 1280 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n"); 1281 return ret; 1282 } 1283 1284 /* Initialize HW, ensure RDMA is disabled */ 1285 reg = rdma_readl(priv, RDMA_STATUS); 1286 if (!(reg & RDMA_DISABLED)) 1287 rdma_enable_set(priv, 0); 1288 1289 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO); 1290 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI); 1291 rdma_writel(priv, 0, RDMA_PROD_INDEX); 1292 rdma_writel(priv, 0, RDMA_CONS_INDEX); 1293 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT | 1294 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE); 1295 /* Operate the queue in ring mode */ 1296 rdma_writel(priv, 0, RDMA_START_ADDR_HI); 1297 rdma_writel(priv, 0, RDMA_START_ADDR_LO); 1298 rdma_writel(priv, 0, RDMA_END_ADDR_HI); 1299 rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO); 1300 1301 rdma_writel(priv, 1, RDMA_MBDONE_INTR); 1302 1303 netif_dbg(priv, hw, priv->netdev, 1304 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n", 1305 priv->num_rx_bds, priv->rx_bds); 1306 1307 return 0; 1308 } 1309 1310 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv) 1311 { 1312 struct bcm_sysport_cb *cb; 1313 unsigned int i; 1314 u32 reg; 1315 1316 /* Caller should ensure RDMA is disabled */ 1317 reg = rdma_readl(priv, RDMA_STATUS); 1318 if (!(reg & RDMA_DISABLED)) 1319 netdev_warn(priv->netdev, "RDMA not stopped!\n"); 1320 1321 for (i = 0; i < priv->num_rx_bds; i++) { 1322 cb = &priv->rx_cbs[i]; 1323 if (dma_unmap_addr(cb, dma_addr)) 1324 dma_unmap_single(&priv->pdev->dev, 1325 dma_unmap_addr(cb, dma_addr), 1326 RX_BUF_LENGTH, DMA_FROM_DEVICE); 1327 bcm_sysport_free_cb(cb); 1328 } 1329 1330 kfree(priv->rx_cbs); 1331 priv->rx_cbs = NULL; 1332 1333 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n"); 1334 } 1335 1336 static void bcm_sysport_set_rx_mode(struct net_device *dev) 1337 { 1338 struct bcm_sysport_priv *priv = netdev_priv(dev); 1339 u32 reg; 1340 1341 reg = umac_readl(priv, UMAC_CMD); 1342 if (dev->flags & IFF_PROMISC) 1343 reg |= CMD_PROMISC; 1344 else 1345 reg &= ~CMD_PROMISC; 1346 umac_writel(priv, reg, UMAC_CMD); 1347 1348 /* No support for ALLMULTI */ 1349 if (dev->flags & IFF_ALLMULTI) 1350 return; 1351 } 1352 1353 static inline void umac_enable_set(struct bcm_sysport_priv *priv, 1354 u32 mask, unsigned int enable) 1355 { 1356 u32 reg; 1357 1358 reg = umac_readl(priv, UMAC_CMD); 1359 if (enable) 1360 reg |= mask; 1361 else 1362 reg &= ~mask; 1363 umac_writel(priv, reg, UMAC_CMD); 1364 1365 /* UniMAC stops on a packet boundary, wait for a full-sized packet 1366 * to be processed (1 msec). 1367 */ 1368 if (enable == 0) 1369 usleep_range(1000, 2000); 1370 } 1371 1372 static inline void umac_reset(struct bcm_sysport_priv *priv) 1373 { 1374 u32 reg; 1375 1376 reg = umac_readl(priv, UMAC_CMD); 1377 reg |= CMD_SW_RESET; 1378 umac_writel(priv, reg, UMAC_CMD); 1379 udelay(10); 1380 reg = umac_readl(priv, UMAC_CMD); 1381 reg &= ~CMD_SW_RESET; 1382 umac_writel(priv, reg, UMAC_CMD); 1383 } 1384 1385 static void umac_set_hw_addr(struct bcm_sysport_priv *priv, 1386 unsigned char *addr) 1387 { 1388 umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | 1389 (addr[2] << 8) | addr[3], UMAC_MAC0); 1390 umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); 1391 } 1392 1393 static void topctrl_flush(struct bcm_sysport_priv *priv) 1394 { 1395 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); 1396 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); 1397 mdelay(1); 1398 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 1399 topctrl_writel(priv, 0, TX_FLUSH_CNTL); 1400 } 1401 1402 static void bcm_sysport_netif_start(struct net_device *dev) 1403 { 1404 struct bcm_sysport_priv *priv = netdev_priv(dev); 1405 1406 /* Enable NAPI */ 1407 napi_enable(&priv->napi); 1408 1409 /* Enable RX interrupt and TX ring full interrupt */ 1410 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 1411 1412 phy_start(priv->phydev); 1413 1414 /* Enable TX interrupts for the 32 TXQs */ 1415 intrl2_1_mask_clear(priv, 0xffffffff); 1416 1417 /* Last call before we start the real business */ 1418 netif_tx_start_all_queues(dev); 1419 } 1420 1421 static void rbuf_init(struct bcm_sysport_priv *priv) 1422 { 1423 u32 reg; 1424 1425 reg = rbuf_readl(priv, RBUF_CONTROL); 1426 reg |= RBUF_4B_ALGN | RBUF_RSB_EN; 1427 rbuf_writel(priv, reg, RBUF_CONTROL); 1428 } 1429 1430 static int bcm_sysport_open(struct net_device *dev) 1431 { 1432 struct bcm_sysport_priv *priv = netdev_priv(dev); 1433 unsigned int i; 1434 int ret; 1435 1436 /* Reset UniMAC */ 1437 umac_reset(priv); 1438 1439 /* Flush TX and RX FIFOs at TOPCTRL level */ 1440 topctrl_flush(priv); 1441 1442 /* Disable the UniMAC RX/TX */ 1443 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0); 1444 1445 /* Enable RBUF 2bytes alignment and Receive Status Block */ 1446 rbuf_init(priv); 1447 1448 /* Set maximum frame length */ 1449 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 1450 1451 /* Set MAC address */ 1452 umac_set_hw_addr(priv, dev->dev_addr); 1453 1454 /* Read CRC forward */ 1455 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); 1456 1457 priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 1458 0, priv->phy_interface); 1459 if (!priv->phydev) { 1460 netdev_err(dev, "could not attach to PHY\n"); 1461 return -ENODEV; 1462 } 1463 1464 /* Reset house keeping link status */ 1465 priv->old_duplex = -1; 1466 priv->old_link = -1; 1467 priv->old_pause = -1; 1468 1469 /* mask all interrupts and request them */ 1470 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); 1471 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1472 intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); 1473 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); 1474 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1475 intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); 1476 1477 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev); 1478 if (ret) { 1479 netdev_err(dev, "failed to request RX interrupt\n"); 1480 goto out_phy_disconnect; 1481 } 1482 1483 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev); 1484 if (ret) { 1485 netdev_err(dev, "failed to request TX interrupt\n"); 1486 goto out_free_irq0; 1487 } 1488 1489 /* Initialize both hardware and software ring */ 1490 for (i = 0; i < dev->num_tx_queues; i++) { 1491 ret = bcm_sysport_init_tx_ring(priv, i); 1492 if (ret) { 1493 netdev_err(dev, "failed to initialize TX ring %d\n", 1494 i); 1495 goto out_free_tx_ring; 1496 } 1497 } 1498 1499 /* Initialize linked-list */ 1500 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); 1501 1502 /* Initialize RX ring */ 1503 ret = bcm_sysport_init_rx_ring(priv); 1504 if (ret) { 1505 netdev_err(dev, "failed to initialize RX ring\n"); 1506 goto out_free_rx_ring; 1507 } 1508 1509 /* Turn on RDMA */ 1510 ret = rdma_enable_set(priv, 1); 1511 if (ret) 1512 goto out_free_rx_ring; 1513 1514 /* Turn on TDMA */ 1515 ret = tdma_enable_set(priv, 1); 1516 if (ret) 1517 goto out_clear_rx_int; 1518 1519 /* Turn on UniMAC TX/RX */ 1520 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1); 1521 1522 bcm_sysport_netif_start(dev); 1523 1524 return 0; 1525 1526 out_clear_rx_int: 1527 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 1528 out_free_rx_ring: 1529 bcm_sysport_fini_rx_ring(priv); 1530 out_free_tx_ring: 1531 for (i = 0; i < dev->num_tx_queues; i++) 1532 bcm_sysport_fini_tx_ring(priv, i); 1533 free_irq(priv->irq1, dev); 1534 out_free_irq0: 1535 free_irq(priv->irq0, dev); 1536 out_phy_disconnect: 1537 phy_disconnect(priv->phydev); 1538 return ret; 1539 } 1540 1541 static void bcm_sysport_netif_stop(struct net_device *dev) 1542 { 1543 struct bcm_sysport_priv *priv = netdev_priv(dev); 1544 1545 /* stop all software from updating hardware */ 1546 netif_tx_stop_all_queues(dev); 1547 napi_disable(&priv->napi); 1548 phy_stop(priv->phydev); 1549 1550 /* mask all interrupts */ 1551 intrl2_0_mask_set(priv, 0xffffffff); 1552 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1553 intrl2_1_mask_set(priv, 0xffffffff); 1554 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1555 } 1556 1557 static int bcm_sysport_stop(struct net_device *dev) 1558 { 1559 struct bcm_sysport_priv *priv = netdev_priv(dev); 1560 unsigned int i; 1561 int ret; 1562 1563 bcm_sysport_netif_stop(dev); 1564 1565 /* Disable UniMAC RX */ 1566 umac_enable_set(priv, CMD_RX_EN, 0); 1567 1568 ret = tdma_enable_set(priv, 0); 1569 if (ret) { 1570 netdev_err(dev, "timeout disabling RDMA\n"); 1571 return ret; 1572 } 1573 1574 /* Wait for a maximum packet size to be drained */ 1575 usleep_range(2000, 3000); 1576 1577 ret = rdma_enable_set(priv, 0); 1578 if (ret) { 1579 netdev_err(dev, "timeout disabling TDMA\n"); 1580 return ret; 1581 } 1582 1583 /* Disable UniMAC TX */ 1584 umac_enable_set(priv, CMD_TX_EN, 0); 1585 1586 /* Free RX/TX rings SW structures */ 1587 for (i = 0; i < dev->num_tx_queues; i++) 1588 bcm_sysport_fini_tx_ring(priv, i); 1589 bcm_sysport_fini_rx_ring(priv); 1590 1591 free_irq(priv->irq0, dev); 1592 free_irq(priv->irq1, dev); 1593 1594 /* Disconnect from PHY */ 1595 phy_disconnect(priv->phydev); 1596 1597 return 0; 1598 } 1599 1600 static struct ethtool_ops bcm_sysport_ethtool_ops = { 1601 .get_settings = bcm_sysport_get_settings, 1602 .set_settings = bcm_sysport_set_settings, 1603 .get_drvinfo = bcm_sysport_get_drvinfo, 1604 .get_msglevel = bcm_sysport_get_msglvl, 1605 .set_msglevel = bcm_sysport_set_msglvl, 1606 .get_link = ethtool_op_get_link, 1607 .get_strings = bcm_sysport_get_strings, 1608 .get_ethtool_stats = bcm_sysport_get_stats, 1609 .get_sset_count = bcm_sysport_get_sset_count, 1610 .get_wol = bcm_sysport_get_wol, 1611 .set_wol = bcm_sysport_set_wol, 1612 }; 1613 1614 static const struct net_device_ops bcm_sysport_netdev_ops = { 1615 .ndo_start_xmit = bcm_sysport_xmit, 1616 .ndo_tx_timeout = bcm_sysport_tx_timeout, 1617 .ndo_open = bcm_sysport_open, 1618 .ndo_stop = bcm_sysport_stop, 1619 .ndo_set_features = bcm_sysport_set_features, 1620 .ndo_set_rx_mode = bcm_sysport_set_rx_mode, 1621 }; 1622 1623 #define REV_FMT "v%2x.%02x" 1624 1625 static int bcm_sysport_probe(struct platform_device *pdev) 1626 { 1627 struct bcm_sysport_priv *priv; 1628 struct device_node *dn; 1629 struct net_device *dev; 1630 const void *macaddr; 1631 struct resource *r; 1632 u32 txq, rxq; 1633 int ret; 1634 1635 dn = pdev->dev.of_node; 1636 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1637 1638 /* Read the Transmit/Receive Queue properties */ 1639 if (of_property_read_u32(dn, "systemport,num-txq", &txq)) 1640 txq = TDMA_NUM_RINGS; 1641 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq)) 1642 rxq = 1; 1643 1644 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq); 1645 if (!dev) 1646 return -ENOMEM; 1647 1648 /* Initialize private members */ 1649 priv = netdev_priv(dev); 1650 1651 priv->irq0 = platform_get_irq(pdev, 0); 1652 priv->irq1 = platform_get_irq(pdev, 1); 1653 priv->wol_irq = platform_get_irq(pdev, 2); 1654 if (priv->irq0 <= 0 || priv->irq1 <= 0) { 1655 dev_err(&pdev->dev, "invalid interrupts\n"); 1656 ret = -EINVAL; 1657 goto err; 1658 } 1659 1660 priv->base = devm_ioremap_resource(&pdev->dev, r); 1661 if (IS_ERR(priv->base)) { 1662 ret = PTR_ERR(priv->base); 1663 goto err; 1664 } 1665 1666 priv->netdev = dev; 1667 priv->pdev = pdev; 1668 1669 priv->phy_interface = of_get_phy_mode(dn); 1670 /* Default to GMII interface mode */ 1671 if (priv->phy_interface < 0) 1672 priv->phy_interface = PHY_INTERFACE_MODE_GMII; 1673 1674 /* In the case of a fixed PHY, the DT node associated 1675 * to the PHY is the Ethernet MAC DT node. 1676 */ 1677 if (of_phy_is_fixed_link(dn)) { 1678 ret = of_phy_register_fixed_link(dn); 1679 if (ret) { 1680 dev_err(&pdev->dev, "failed to register fixed PHY\n"); 1681 goto err; 1682 } 1683 1684 priv->phy_dn = dn; 1685 } 1686 1687 /* Initialize netdevice members */ 1688 macaddr = of_get_mac_address(dn); 1689 if (!macaddr || !is_valid_ether_addr(macaddr)) { 1690 dev_warn(&pdev->dev, "using random Ethernet MAC\n"); 1691 random_ether_addr(dev->dev_addr); 1692 } else { 1693 ether_addr_copy(dev->dev_addr, macaddr); 1694 } 1695 1696 SET_NETDEV_DEV(dev, &pdev->dev); 1697 dev_set_drvdata(&pdev->dev, dev); 1698 dev->ethtool_ops = &bcm_sysport_ethtool_ops; 1699 dev->netdev_ops = &bcm_sysport_netdev_ops; 1700 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64); 1701 1702 /* HW supported features, none enabled by default */ 1703 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | 1704 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1705 1706 /* Request the WOL interrupt and advertise suspend if available */ 1707 priv->wol_irq_disabled = 1; 1708 ret = devm_request_irq(&pdev->dev, priv->wol_irq, 1709 bcm_sysport_wol_isr, 0, dev->name, priv); 1710 if (!ret) 1711 device_set_wakeup_capable(&pdev->dev, 1); 1712 1713 /* Set the needed headroom once and for all */ 1714 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); 1715 dev->needed_headroom += sizeof(struct bcm_tsb); 1716 1717 /* libphy will adjust the link state accordingly */ 1718 netif_carrier_off(dev); 1719 1720 ret = register_netdev(dev); 1721 if (ret) { 1722 dev_err(&pdev->dev, "failed to register net_device\n"); 1723 goto err; 1724 } 1725 1726 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; 1727 dev_info(&pdev->dev, 1728 "Broadcom SYSTEMPORT" REV_FMT 1729 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", 1730 (priv->rev >> 8) & 0xff, priv->rev & 0xff, 1731 priv->base, priv->irq0, priv->irq1, txq, rxq); 1732 1733 return 0; 1734 err: 1735 free_netdev(dev); 1736 return ret; 1737 } 1738 1739 static int bcm_sysport_remove(struct platform_device *pdev) 1740 { 1741 struct net_device *dev = dev_get_drvdata(&pdev->dev); 1742 1743 /* Not much to do, ndo_close has been called 1744 * and we use managed allocations 1745 */ 1746 unregister_netdev(dev); 1747 free_netdev(dev); 1748 dev_set_drvdata(&pdev->dev, NULL); 1749 1750 return 0; 1751 } 1752 1753 #ifdef CONFIG_PM_SLEEP 1754 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) 1755 { 1756 struct net_device *ndev = priv->netdev; 1757 unsigned int timeout = 1000; 1758 u32 reg; 1759 1760 /* Password has already been programmed */ 1761 reg = umac_readl(priv, UMAC_MPD_CTRL); 1762 reg |= MPD_EN; 1763 reg &= ~PSW_EN; 1764 if (priv->wolopts & WAKE_MAGICSECURE) 1765 reg |= PSW_EN; 1766 umac_writel(priv, reg, UMAC_MPD_CTRL); 1767 1768 /* Make sure RBUF entered WoL mode as result */ 1769 do { 1770 reg = rbuf_readl(priv, RBUF_STATUS); 1771 if (reg & RBUF_WOL_MODE) 1772 break; 1773 1774 udelay(10); 1775 } while (timeout-- > 0); 1776 1777 /* Do not leave the UniMAC RBUF matching only MPD packets */ 1778 if (!timeout) { 1779 reg = umac_readl(priv, UMAC_MPD_CTRL); 1780 reg &= ~MPD_EN; 1781 umac_writel(priv, reg, UMAC_MPD_CTRL); 1782 netif_err(priv, wol, ndev, "failed to enter WOL mode\n"); 1783 return -ETIMEDOUT; 1784 } 1785 1786 /* UniMAC receive needs to be turned on */ 1787 umac_enable_set(priv, CMD_RX_EN, 1); 1788 1789 /* Enable the interrupt wake-up source */ 1790 intrl2_0_mask_clear(priv, INTRL2_0_MPD); 1791 1792 netif_dbg(priv, wol, ndev, "entered WOL mode\n"); 1793 1794 return 0; 1795 } 1796 1797 static int bcm_sysport_suspend(struct device *d) 1798 { 1799 struct net_device *dev = dev_get_drvdata(d); 1800 struct bcm_sysport_priv *priv = netdev_priv(dev); 1801 unsigned int i; 1802 int ret = 0; 1803 u32 reg; 1804 1805 if (!netif_running(dev)) 1806 return 0; 1807 1808 bcm_sysport_netif_stop(dev); 1809 1810 phy_suspend(priv->phydev); 1811 1812 netif_device_detach(dev); 1813 1814 /* Disable UniMAC RX */ 1815 umac_enable_set(priv, CMD_RX_EN, 0); 1816 1817 ret = rdma_enable_set(priv, 0); 1818 if (ret) { 1819 netdev_err(dev, "RDMA timeout!\n"); 1820 return ret; 1821 } 1822 1823 /* Disable RXCHK if enabled */ 1824 if (priv->rx_chk_en) { 1825 reg = rxchk_readl(priv, RXCHK_CONTROL); 1826 reg &= ~RXCHK_EN; 1827 rxchk_writel(priv, reg, RXCHK_CONTROL); 1828 } 1829 1830 /* Flush RX pipe */ 1831 if (!priv->wolopts) 1832 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); 1833 1834 ret = tdma_enable_set(priv, 0); 1835 if (ret) { 1836 netdev_err(dev, "TDMA timeout!\n"); 1837 return ret; 1838 } 1839 1840 /* Wait for a packet boundary */ 1841 usleep_range(2000, 3000); 1842 1843 umac_enable_set(priv, CMD_TX_EN, 0); 1844 1845 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); 1846 1847 /* Free RX/TX rings SW structures */ 1848 for (i = 0; i < dev->num_tx_queues; i++) 1849 bcm_sysport_fini_tx_ring(priv, i); 1850 bcm_sysport_fini_rx_ring(priv); 1851 1852 /* Get prepared for Wake-on-LAN */ 1853 if (device_may_wakeup(d) && priv->wolopts) 1854 ret = bcm_sysport_suspend_to_wol(priv); 1855 1856 return ret; 1857 } 1858 1859 static int bcm_sysport_resume(struct device *d) 1860 { 1861 struct net_device *dev = dev_get_drvdata(d); 1862 struct bcm_sysport_priv *priv = netdev_priv(dev); 1863 unsigned int i; 1864 u32 reg; 1865 int ret; 1866 1867 if (!netif_running(dev)) 1868 return 0; 1869 1870 umac_reset(priv); 1871 1872 /* We may have been suspended and never received a WOL event that 1873 * would turn off MPD detection, take care of that now 1874 */ 1875 bcm_sysport_resume_from_wol(priv); 1876 1877 /* Initialize both hardware and software ring */ 1878 for (i = 0; i < dev->num_tx_queues; i++) { 1879 ret = bcm_sysport_init_tx_ring(priv, i); 1880 if (ret) { 1881 netdev_err(dev, "failed to initialize TX ring %d\n", 1882 i); 1883 goto out_free_tx_rings; 1884 } 1885 } 1886 1887 /* Initialize linked-list */ 1888 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); 1889 1890 /* Initialize RX ring */ 1891 ret = bcm_sysport_init_rx_ring(priv); 1892 if (ret) { 1893 netdev_err(dev, "failed to initialize RX ring\n"); 1894 goto out_free_rx_ring; 1895 } 1896 1897 netif_device_attach(dev); 1898 1899 /* RX pipe enable */ 1900 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 1901 1902 ret = rdma_enable_set(priv, 1); 1903 if (ret) { 1904 netdev_err(dev, "failed to enable RDMA\n"); 1905 goto out_free_rx_ring; 1906 } 1907 1908 /* Enable rxhck */ 1909 if (priv->rx_chk_en) { 1910 reg = rxchk_readl(priv, RXCHK_CONTROL); 1911 reg |= RXCHK_EN; 1912 rxchk_writel(priv, reg, RXCHK_CONTROL); 1913 } 1914 1915 rbuf_init(priv); 1916 1917 /* Set maximum frame length */ 1918 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 1919 1920 /* Set MAC address */ 1921 umac_set_hw_addr(priv, dev->dev_addr); 1922 1923 umac_enable_set(priv, CMD_RX_EN, 1); 1924 1925 /* TX pipe enable */ 1926 topctrl_writel(priv, 0, TX_FLUSH_CNTL); 1927 1928 umac_enable_set(priv, CMD_TX_EN, 1); 1929 1930 ret = tdma_enable_set(priv, 1); 1931 if (ret) { 1932 netdev_err(dev, "TDMA timeout!\n"); 1933 goto out_free_rx_ring; 1934 } 1935 1936 phy_resume(priv->phydev); 1937 1938 bcm_sysport_netif_start(dev); 1939 1940 return 0; 1941 1942 out_free_rx_ring: 1943 bcm_sysport_fini_rx_ring(priv); 1944 out_free_tx_rings: 1945 for (i = 0; i < dev->num_tx_queues; i++) 1946 bcm_sysport_fini_tx_ring(priv, i); 1947 return ret; 1948 } 1949 #endif 1950 1951 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops, 1952 bcm_sysport_suspend, bcm_sysport_resume); 1953 1954 static const struct of_device_id bcm_sysport_of_match[] = { 1955 { .compatible = "brcm,systemport-v1.00" }, 1956 { .compatible = "brcm,systemport" }, 1957 { /* sentinel */ } 1958 }; 1959 1960 static struct platform_driver bcm_sysport_driver = { 1961 .probe = bcm_sysport_probe, 1962 .remove = bcm_sysport_remove, 1963 .driver = { 1964 .name = "brcm-systemport", 1965 .owner = THIS_MODULE, 1966 .of_match_table = bcm_sysport_of_match, 1967 .pm = &bcm_sysport_pm_ops, 1968 }, 1969 }; 1970 module_platform_driver(bcm_sysport_driver); 1971 1972 MODULE_AUTHOR("Broadcom Corporation"); 1973 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver"); 1974 MODULE_ALIAS("platform:brcm-systemport"); 1975 MODULE_LICENSE("GPL"); 1976