1 /* 2 * Broadcom BCM7xxx System Port Ethernet MAC driver 3 * 4 * Copyright (C) 2014 Broadcom Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/module.h> 16 #include <linux/kernel.h> 17 #include <linux/netdevice.h> 18 #include <linux/etherdevice.h> 19 #include <linux/platform_device.h> 20 #include <linux/of.h> 21 #include <linux/of_net.h> 22 #include <linux/of_mdio.h> 23 #include <linux/phy.h> 24 #include <linux/phy_fixed.h> 25 #include <net/dsa.h> 26 #include <net/ip.h> 27 #include <net/ipv6.h> 28 29 #include "bcmsysport.h" 30 31 /* I/O accessors register helpers */ 32 #define BCM_SYSPORT_IO_MACRO(name, offset) \ 33 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \ 34 { \ 35 u32 reg = __raw_readl(priv->base + offset + off); \ 36 return reg; \ 37 } \ 38 static inline void name##_writel(struct bcm_sysport_priv *priv, \ 39 u32 val, u32 off) \ 40 { \ 41 __raw_writel(val, priv->base + offset + off); \ 42 } \ 43 44 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET); 45 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET); 46 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET); 47 BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET); 48 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET); 49 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET); 50 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET); 51 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET); 52 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET); 53 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET); 54 55 /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact 56 * same layout, except it has been moved by 4 bytes up, *sigh* 57 */ 58 static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off) 59 { 60 if (priv->is_lite && off >= RDMA_STATUS) 61 off += 4; 62 return __raw_readl(priv->base + SYS_PORT_RDMA_OFFSET + off); 63 } 64 65 static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off) 66 { 67 if (priv->is_lite && off >= RDMA_STATUS) 68 off += 4; 69 __raw_writel(val, priv->base + SYS_PORT_RDMA_OFFSET + off); 70 } 71 72 static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit) 73 { 74 if (!priv->is_lite) { 75 return BIT(bit); 76 } else { 77 if (bit >= ACB_ALGO) 78 return BIT(bit + 1); 79 else 80 return BIT(bit); 81 } 82 } 83 84 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied 85 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths. 86 */ 87 #define BCM_SYSPORT_INTR_L2(which) \ 88 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \ 89 u32 mask) \ 90 { \ 91 priv->irq##which##_mask &= ~(mask); \ 92 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ 93 } \ 94 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \ 95 u32 mask) \ 96 { \ 97 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \ 98 priv->irq##which##_mask |= (mask); \ 99 } \ 100 101 BCM_SYSPORT_INTR_L2(0) 102 BCM_SYSPORT_INTR_L2(1) 103 104 /* Register accesses to GISB/RBUS registers are expensive (few hundred 105 * nanoseconds), so keep the check for 64-bits explicit here to save 106 * one register write per-packet on 32-bits platforms. 107 */ 108 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv, 109 void __iomem *d, 110 dma_addr_t addr) 111 { 112 #ifdef CONFIG_PHYS_ADDR_T_64BIT 113 __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK, 114 d + DESC_ADDR_HI_STATUS_LEN); 115 #endif 116 __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO); 117 } 118 119 static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, 120 struct dma_desc *desc, 121 unsigned int port) 122 { 123 /* Ports are latched, so write upper address first */ 124 tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port)); 125 tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port)); 126 } 127 128 /* Ethtool operations */ 129 static int bcm_sysport_set_rx_csum(struct net_device *dev, 130 netdev_features_t wanted) 131 { 132 struct bcm_sysport_priv *priv = netdev_priv(dev); 133 u32 reg; 134 135 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); 136 reg = rxchk_readl(priv, RXCHK_CONTROL); 137 if (priv->rx_chk_en) 138 reg |= RXCHK_EN; 139 else 140 reg &= ~RXCHK_EN; 141 142 /* If UniMAC forwards CRC, we need to skip over it to get 143 * a valid CHK bit to be set in the per-packet status word 144 */ 145 if (priv->rx_chk_en && priv->crc_fwd) 146 reg |= RXCHK_SKIP_FCS; 147 else 148 reg &= ~RXCHK_SKIP_FCS; 149 150 /* If Broadcom tags are enabled (e.g: using a switch), make 151 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom 152 * tag after the Ethernet MAC Source Address. 153 */ 154 if (netdev_uses_dsa(dev)) 155 reg |= RXCHK_BRCM_TAG_EN; 156 else 157 reg &= ~RXCHK_BRCM_TAG_EN; 158 159 rxchk_writel(priv, reg, RXCHK_CONTROL); 160 161 return 0; 162 } 163 164 static int bcm_sysport_set_tx_csum(struct net_device *dev, 165 netdev_features_t wanted) 166 { 167 struct bcm_sysport_priv *priv = netdev_priv(dev); 168 u32 reg; 169 170 /* Hardware transmit checksum requires us to enable the Transmit status 171 * block prepended to the packet contents 172 */ 173 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); 174 reg = tdma_readl(priv, TDMA_CONTROL); 175 if (priv->tsb_en) 176 reg |= tdma_control_bit(priv, TSB_EN); 177 else 178 reg &= ~tdma_control_bit(priv, TSB_EN); 179 tdma_writel(priv, reg, TDMA_CONTROL); 180 181 return 0; 182 } 183 184 static int bcm_sysport_set_features(struct net_device *dev, 185 netdev_features_t features) 186 { 187 netdev_features_t changed = features ^ dev->features; 188 netdev_features_t wanted = dev->wanted_features; 189 int ret = 0; 190 191 if (changed & NETIF_F_RXCSUM) 192 ret = bcm_sysport_set_rx_csum(dev, wanted); 193 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) 194 ret = bcm_sysport_set_tx_csum(dev, wanted); 195 196 return ret; 197 } 198 199 /* Hardware counters must be kept in sync because the order/offset 200 * is important here (order in structure declaration = order in hardware) 201 */ 202 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { 203 /* general stats */ 204 STAT_NETDEV(rx_packets), 205 STAT_NETDEV(tx_packets), 206 STAT_NETDEV(rx_bytes), 207 STAT_NETDEV(tx_bytes), 208 STAT_NETDEV(rx_errors), 209 STAT_NETDEV(tx_errors), 210 STAT_NETDEV(rx_dropped), 211 STAT_NETDEV(tx_dropped), 212 STAT_NETDEV(multicast), 213 /* UniMAC RSV counters */ 214 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), 215 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), 216 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), 217 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), 218 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), 219 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), 220 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), 221 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), 222 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), 223 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), 224 STAT_MIB_RX("rx_pkts", mib.rx.pkt), 225 STAT_MIB_RX("rx_bytes", mib.rx.bytes), 226 STAT_MIB_RX("rx_multicast", mib.rx.mca), 227 STAT_MIB_RX("rx_broadcast", mib.rx.bca), 228 STAT_MIB_RX("rx_fcs", mib.rx.fcs), 229 STAT_MIB_RX("rx_control", mib.rx.cf), 230 STAT_MIB_RX("rx_pause", mib.rx.pf), 231 STAT_MIB_RX("rx_unknown", mib.rx.uo), 232 STAT_MIB_RX("rx_align", mib.rx.aln), 233 STAT_MIB_RX("rx_outrange", mib.rx.flr), 234 STAT_MIB_RX("rx_code", mib.rx.cde), 235 STAT_MIB_RX("rx_carrier", mib.rx.fcr), 236 STAT_MIB_RX("rx_oversize", mib.rx.ovr), 237 STAT_MIB_RX("rx_jabber", mib.rx.jbr), 238 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue), 239 STAT_MIB_RX("rx_good_pkts", mib.rx.pok), 240 STAT_MIB_RX("rx_unicast", mib.rx.uc), 241 STAT_MIB_RX("rx_ppp", mib.rx.ppp), 242 STAT_MIB_RX("rx_crc", mib.rx.rcrc), 243 /* UniMAC TSV counters */ 244 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), 245 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), 246 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), 247 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), 248 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), 249 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), 250 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), 251 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), 252 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), 253 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), 254 STAT_MIB_TX("tx_pkts", mib.tx.pkts), 255 STAT_MIB_TX("tx_multicast", mib.tx.mca), 256 STAT_MIB_TX("tx_broadcast", mib.tx.bca), 257 STAT_MIB_TX("tx_pause", mib.tx.pf), 258 STAT_MIB_TX("tx_control", mib.tx.cf), 259 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs), 260 STAT_MIB_TX("tx_oversize", mib.tx.ovr), 261 STAT_MIB_TX("tx_defer", mib.tx.drf), 262 STAT_MIB_TX("tx_excess_defer", mib.tx.edf), 263 STAT_MIB_TX("tx_single_col", mib.tx.scl), 264 STAT_MIB_TX("tx_multi_col", mib.tx.mcl), 265 STAT_MIB_TX("tx_late_col", mib.tx.lcl), 266 STAT_MIB_TX("tx_excess_col", mib.tx.ecl), 267 STAT_MIB_TX("tx_frags", mib.tx.frg), 268 STAT_MIB_TX("tx_total_col", mib.tx.ncl), 269 STAT_MIB_TX("tx_jabber", mib.tx.jbr), 270 STAT_MIB_TX("tx_bytes", mib.tx.bytes), 271 STAT_MIB_TX("tx_good_pkts", mib.tx.pok), 272 STAT_MIB_TX("tx_unicast", mib.tx.uc), 273 /* UniMAC RUNT counters */ 274 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt), 275 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), 276 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), 277 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes), 278 /* RXCHK misc statistics */ 279 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR), 280 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc, 281 RXCHK_OTHER_DISC_CNTR), 282 /* RBUF misc statistics */ 283 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), 284 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), 285 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 286 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed), 287 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed), 288 /* Per TX-queue statistics are dynamically appended */ 289 }; 290 291 #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) 292 293 static void bcm_sysport_get_drvinfo(struct net_device *dev, 294 struct ethtool_drvinfo *info) 295 { 296 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 297 strlcpy(info->version, "0.1", sizeof(info->version)); 298 strlcpy(info->bus_info, "platform", sizeof(info->bus_info)); 299 } 300 301 static u32 bcm_sysport_get_msglvl(struct net_device *dev) 302 { 303 struct bcm_sysport_priv *priv = netdev_priv(dev); 304 305 return priv->msg_enable; 306 } 307 308 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable) 309 { 310 struct bcm_sysport_priv *priv = netdev_priv(dev); 311 312 priv->msg_enable = enable; 313 } 314 315 static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type) 316 { 317 switch (type) { 318 case BCM_SYSPORT_STAT_NETDEV: 319 case BCM_SYSPORT_STAT_RXCHK: 320 case BCM_SYSPORT_STAT_RBUF: 321 case BCM_SYSPORT_STAT_SOFT: 322 return true; 323 default: 324 return false; 325 } 326 } 327 328 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) 329 { 330 struct bcm_sysport_priv *priv = netdev_priv(dev); 331 const struct bcm_sysport_stats *s; 332 unsigned int i, j; 333 334 switch (string_set) { 335 case ETH_SS_STATS: 336 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 337 s = &bcm_sysport_gstrings_stats[i]; 338 if (priv->is_lite && 339 !bcm_sysport_lite_stat_valid(s->type)) 340 continue; 341 j++; 342 } 343 /* Include per-queue statistics */ 344 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; 345 default: 346 return -EOPNOTSUPP; 347 } 348 } 349 350 static void bcm_sysport_get_strings(struct net_device *dev, 351 u32 stringset, u8 *data) 352 { 353 struct bcm_sysport_priv *priv = netdev_priv(dev); 354 const struct bcm_sysport_stats *s; 355 char buf[128]; 356 int i, j; 357 358 switch (stringset) { 359 case ETH_SS_STATS: 360 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 361 s = &bcm_sysport_gstrings_stats[i]; 362 if (priv->is_lite && 363 !bcm_sysport_lite_stat_valid(s->type)) 364 continue; 365 366 memcpy(data + j * ETH_GSTRING_LEN, s->stat_string, 367 ETH_GSTRING_LEN); 368 j++; 369 } 370 371 for (i = 0; i < dev->num_tx_queues; i++) { 372 snprintf(buf, sizeof(buf), "txq%d_packets", i); 373 memcpy(data + j * ETH_GSTRING_LEN, buf, 374 ETH_GSTRING_LEN); 375 j++; 376 377 snprintf(buf, sizeof(buf), "txq%d_bytes", i); 378 memcpy(data + j * ETH_GSTRING_LEN, buf, 379 ETH_GSTRING_LEN); 380 j++; 381 } 382 break; 383 default: 384 break; 385 } 386 } 387 388 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) 389 { 390 int i, j = 0; 391 392 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 393 const struct bcm_sysport_stats *s; 394 u8 offset = 0; 395 u32 val = 0; 396 char *p; 397 398 s = &bcm_sysport_gstrings_stats[i]; 399 switch (s->type) { 400 case BCM_SYSPORT_STAT_NETDEV: 401 case BCM_SYSPORT_STAT_SOFT: 402 continue; 403 case BCM_SYSPORT_STAT_MIB_RX: 404 case BCM_SYSPORT_STAT_MIB_TX: 405 case BCM_SYSPORT_STAT_RUNT: 406 if (priv->is_lite) 407 continue; 408 409 if (s->type != BCM_SYSPORT_STAT_MIB_RX) 410 offset = UMAC_MIB_STAT_OFFSET; 411 val = umac_readl(priv, UMAC_MIB_START + j + offset); 412 break; 413 case BCM_SYSPORT_STAT_RXCHK: 414 val = rxchk_readl(priv, s->reg_offset); 415 if (val == ~0) 416 rxchk_writel(priv, 0, s->reg_offset); 417 break; 418 case BCM_SYSPORT_STAT_RBUF: 419 val = rbuf_readl(priv, s->reg_offset); 420 if (val == ~0) 421 rbuf_writel(priv, 0, s->reg_offset); 422 break; 423 } 424 425 j += s->stat_sizeof; 426 p = (char *)priv + s->stat_offset; 427 *(u32 *)p = val; 428 } 429 430 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); 431 } 432 433 static void bcm_sysport_get_stats(struct net_device *dev, 434 struct ethtool_stats *stats, u64 *data) 435 { 436 struct bcm_sysport_priv *priv = netdev_priv(dev); 437 struct bcm_sysport_tx_ring *ring; 438 int i, j; 439 440 if (netif_running(dev)) 441 bcm_sysport_update_mib_counters(priv); 442 443 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 444 const struct bcm_sysport_stats *s; 445 char *p; 446 447 s = &bcm_sysport_gstrings_stats[i]; 448 if (s->type == BCM_SYSPORT_STAT_NETDEV) 449 p = (char *)&dev->stats; 450 else 451 p = (char *)priv; 452 p += s->stat_offset; 453 data[j] = *(unsigned long *)p; 454 j++; 455 } 456 457 /* For SYSTEMPORT Lite since we have holes in our statistics, j would 458 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it 459 * needs to point to how many total statistics we have minus the 460 * number of per TX queue statistics 461 */ 462 j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) - 463 dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; 464 465 for (i = 0; i < dev->num_tx_queues; i++) { 466 ring = &priv->tx_rings[i]; 467 data[j] = ring->packets; 468 j++; 469 data[j] = ring->bytes; 470 j++; 471 } 472 } 473 474 static void bcm_sysport_get_wol(struct net_device *dev, 475 struct ethtool_wolinfo *wol) 476 { 477 struct bcm_sysport_priv *priv = netdev_priv(dev); 478 u32 reg; 479 480 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE; 481 wol->wolopts = priv->wolopts; 482 483 if (!(priv->wolopts & WAKE_MAGICSECURE)) 484 return; 485 486 /* Return the programmed SecureOn password */ 487 reg = umac_readl(priv, UMAC_PSW_MS); 488 put_unaligned_be16(reg, &wol->sopass[0]); 489 reg = umac_readl(priv, UMAC_PSW_LS); 490 put_unaligned_be32(reg, &wol->sopass[2]); 491 } 492 493 static int bcm_sysport_set_wol(struct net_device *dev, 494 struct ethtool_wolinfo *wol) 495 { 496 struct bcm_sysport_priv *priv = netdev_priv(dev); 497 struct device *kdev = &priv->pdev->dev; 498 u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE; 499 500 if (!device_can_wakeup(kdev)) 501 return -ENOTSUPP; 502 503 if (wol->wolopts & ~supported) 504 return -EINVAL; 505 506 /* Program the SecureOn password */ 507 if (wol->wolopts & WAKE_MAGICSECURE) { 508 umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), 509 UMAC_PSW_MS); 510 umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), 511 UMAC_PSW_LS); 512 } 513 514 /* Flag the device and relevant IRQ as wakeup capable */ 515 if (wol->wolopts) { 516 device_set_wakeup_enable(kdev, 1); 517 if (priv->wol_irq_disabled) 518 enable_irq_wake(priv->wol_irq); 519 priv->wol_irq_disabled = 0; 520 } else { 521 device_set_wakeup_enable(kdev, 0); 522 /* Avoid unbalanced disable_irq_wake calls */ 523 if (!priv->wol_irq_disabled) 524 disable_irq_wake(priv->wol_irq); 525 priv->wol_irq_disabled = 1; 526 } 527 528 priv->wolopts = wol->wolopts; 529 530 return 0; 531 } 532 533 static int bcm_sysport_get_coalesce(struct net_device *dev, 534 struct ethtool_coalesce *ec) 535 { 536 struct bcm_sysport_priv *priv = netdev_priv(dev); 537 u32 reg; 538 539 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0)); 540 541 ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000; 542 ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK; 543 544 reg = rdma_readl(priv, RDMA_MBDONE_INTR); 545 546 ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000; 547 ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK; 548 549 return 0; 550 } 551 552 static int bcm_sysport_set_coalesce(struct net_device *dev, 553 struct ethtool_coalesce *ec) 554 { 555 struct bcm_sysport_priv *priv = netdev_priv(dev); 556 unsigned int i; 557 u32 reg; 558 559 /* Base system clock is 125Mhz, DMA timeout is this reference clock 560 * divided by 1024, which yield roughly 8.192 us, our maximum value has 561 * to fit in the RING_TIMEOUT_MASK (16 bits). 562 */ 563 if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK || 564 ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 || 565 ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK || 566 ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1) 567 return -EINVAL; 568 569 if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) || 570 (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)) 571 return -EINVAL; 572 573 for (i = 0; i < dev->num_tx_queues; i++) { 574 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(i)); 575 reg &= ~(RING_INTR_THRESH_MASK | 576 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT); 577 reg |= ec->tx_max_coalesced_frames; 578 reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) << 579 RING_TIMEOUT_SHIFT; 580 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(i)); 581 } 582 583 reg = rdma_readl(priv, RDMA_MBDONE_INTR); 584 reg &= ~(RDMA_INTR_THRESH_MASK | 585 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT); 586 reg |= ec->rx_max_coalesced_frames; 587 reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192) << 588 RDMA_TIMEOUT_SHIFT; 589 rdma_writel(priv, reg, RDMA_MBDONE_INTR); 590 591 return 0; 592 } 593 594 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) 595 { 596 dev_kfree_skb_any(cb->skb); 597 cb->skb = NULL; 598 dma_unmap_addr_set(cb, dma_addr, 0); 599 } 600 601 static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, 602 struct bcm_sysport_cb *cb) 603 { 604 struct device *kdev = &priv->pdev->dev; 605 struct net_device *ndev = priv->netdev; 606 struct sk_buff *skb, *rx_skb; 607 dma_addr_t mapping; 608 609 /* Allocate a new SKB for a new packet */ 610 skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); 611 if (!skb) { 612 priv->mib.alloc_rx_buff_failed++; 613 netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); 614 return NULL; 615 } 616 617 mapping = dma_map_single(kdev, skb->data, 618 RX_BUF_LENGTH, DMA_FROM_DEVICE); 619 if (dma_mapping_error(kdev, mapping)) { 620 priv->mib.rx_dma_failed++; 621 dev_kfree_skb_any(skb); 622 netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); 623 return NULL; 624 } 625 626 /* Grab the current SKB on the ring */ 627 rx_skb = cb->skb; 628 if (likely(rx_skb)) 629 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 630 RX_BUF_LENGTH, DMA_FROM_DEVICE); 631 632 /* Put the new SKB on the ring */ 633 cb->skb = skb; 634 dma_unmap_addr_set(cb, dma_addr, mapping); 635 dma_desc_set_addr(priv, cb->bd_addr, mapping); 636 637 netif_dbg(priv, rx_status, ndev, "RX refill\n"); 638 639 /* Return the current SKB to the caller */ 640 return rx_skb; 641 } 642 643 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) 644 { 645 struct bcm_sysport_cb *cb; 646 struct sk_buff *skb; 647 unsigned int i; 648 649 for (i = 0; i < priv->num_rx_bds; i++) { 650 cb = &priv->rx_cbs[i]; 651 skb = bcm_sysport_rx_refill(priv, cb); 652 if (skb) 653 dev_kfree_skb(skb); 654 if (!cb->skb) 655 return -ENOMEM; 656 } 657 658 return 0; 659 } 660 661 /* Poll the hardware for up to budget packets to process */ 662 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, 663 unsigned int budget) 664 { 665 struct net_device *ndev = priv->netdev; 666 unsigned int processed = 0, to_process; 667 struct bcm_sysport_cb *cb; 668 struct sk_buff *skb; 669 unsigned int p_index; 670 u16 len, status; 671 struct bcm_rsb *rsb; 672 673 /* Clear status before servicing to reduce spurious interrupts */ 674 intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR); 675 676 /* Determine how much we should process since last call, SYSTEMPORT Lite 677 * groups the producer and consumer indexes into the same 32-bit 678 * which we access using RDMA_CONS_INDEX 679 */ 680 if (!priv->is_lite) 681 p_index = rdma_readl(priv, RDMA_PROD_INDEX); 682 else 683 p_index = rdma_readl(priv, RDMA_CONS_INDEX); 684 p_index &= RDMA_PROD_INDEX_MASK; 685 686 to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK; 687 688 netif_dbg(priv, rx_status, ndev, 689 "p_index=%d rx_c_index=%d to_process=%d\n", 690 p_index, priv->rx_c_index, to_process); 691 692 while ((processed < to_process) && (processed < budget)) { 693 cb = &priv->rx_cbs[priv->rx_read_ptr]; 694 skb = bcm_sysport_rx_refill(priv, cb); 695 696 697 /* We do not have a backing SKB, so we do not a corresponding 698 * DMA mapping for this incoming packet since 699 * bcm_sysport_rx_refill always either has both skb and mapping 700 * or none. 701 */ 702 if (unlikely(!skb)) { 703 netif_err(priv, rx_err, ndev, "out of memory!\n"); 704 ndev->stats.rx_dropped++; 705 ndev->stats.rx_errors++; 706 goto next; 707 } 708 709 /* Extract the Receive Status Block prepended */ 710 rsb = (struct bcm_rsb *)skb->data; 711 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; 712 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & 713 DESC_STATUS_MASK; 714 715 netif_dbg(priv, rx_status, ndev, 716 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", 717 p_index, priv->rx_c_index, priv->rx_read_ptr, 718 len, status); 719 720 if (unlikely(len > RX_BUF_LENGTH)) { 721 netif_err(priv, rx_status, ndev, "oversized packet\n"); 722 ndev->stats.rx_length_errors++; 723 ndev->stats.rx_errors++; 724 dev_kfree_skb_any(skb); 725 goto next; 726 } 727 728 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { 729 netif_err(priv, rx_status, ndev, "fragmented packet!\n"); 730 ndev->stats.rx_dropped++; 731 ndev->stats.rx_errors++; 732 dev_kfree_skb_any(skb); 733 goto next; 734 } 735 736 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) { 737 netif_err(priv, rx_err, ndev, "error packet\n"); 738 if (status & RX_STATUS_OVFLOW) 739 ndev->stats.rx_over_errors++; 740 ndev->stats.rx_dropped++; 741 ndev->stats.rx_errors++; 742 dev_kfree_skb_any(skb); 743 goto next; 744 } 745 746 skb_put(skb, len); 747 748 /* Hardware validated our checksum */ 749 if (likely(status & DESC_L4_CSUM)) 750 skb->ip_summed = CHECKSUM_UNNECESSARY; 751 752 /* Hardware pre-pends packets with 2bytes before Ethernet 753 * header plus we have the Receive Status Block, strip off all 754 * of this from the SKB. 755 */ 756 skb_pull(skb, sizeof(*rsb) + 2); 757 len -= (sizeof(*rsb) + 2); 758 759 /* UniMAC may forward CRC */ 760 if (priv->crc_fwd) { 761 skb_trim(skb, len - ETH_FCS_LEN); 762 len -= ETH_FCS_LEN; 763 } 764 765 skb->protocol = eth_type_trans(skb, ndev); 766 ndev->stats.rx_packets++; 767 ndev->stats.rx_bytes += len; 768 769 napi_gro_receive(&priv->napi, skb); 770 next: 771 processed++; 772 priv->rx_read_ptr++; 773 774 if (priv->rx_read_ptr == priv->num_rx_bds) 775 priv->rx_read_ptr = 0; 776 } 777 778 return processed; 779 } 780 781 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, 782 struct bcm_sysport_cb *cb, 783 unsigned int *bytes_compl, 784 unsigned int *pkts_compl) 785 { 786 struct bcm_sysport_priv *priv = ring->priv; 787 struct device *kdev = &priv->pdev->dev; 788 789 if (cb->skb) { 790 ring->bytes += cb->skb->len; 791 *bytes_compl += cb->skb->len; 792 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 793 dma_unmap_len(cb, dma_len), 794 DMA_TO_DEVICE); 795 ring->packets++; 796 (*pkts_compl)++; 797 bcm_sysport_free_cb(cb); 798 /* SKB fragment */ 799 } else if (dma_unmap_addr(cb, dma_addr)) { 800 ring->bytes += dma_unmap_len(cb, dma_len); 801 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr), 802 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); 803 dma_unmap_addr_set(cb, dma_addr, 0); 804 } 805 } 806 807 /* Reclaim queued SKBs for transmission completion, lockless version */ 808 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 809 struct bcm_sysport_tx_ring *ring) 810 { 811 struct net_device *ndev = priv->netdev; 812 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; 813 unsigned int pkts_compl = 0, bytes_compl = 0; 814 struct bcm_sysport_cb *cb; 815 u32 hw_ind; 816 817 /* Clear status before servicing to reduce spurious interrupts */ 818 if (!ring->priv->is_lite) 819 intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR); 820 else 821 intrl2_0_writel(ring->priv, BIT(ring->index + 822 INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR); 823 824 /* Compute how many descriptors have been processed since last call */ 825 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); 826 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; 827 ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); 828 829 last_c_index = ring->c_index; 830 num_tx_cbs = ring->size; 831 832 c_index &= (num_tx_cbs - 1); 833 834 if (c_index >= last_c_index) 835 last_tx_cn = c_index - last_c_index; 836 else 837 last_tx_cn = num_tx_cbs - last_c_index + c_index; 838 839 netif_dbg(priv, tx_done, ndev, 840 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", 841 ring->index, c_index, last_tx_cn, last_c_index); 842 843 while (last_tx_cn-- > 0) { 844 cb = ring->cbs + last_c_index; 845 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl); 846 847 ring->desc_count++; 848 last_c_index++; 849 last_c_index &= (num_tx_cbs - 1); 850 } 851 852 ring->c_index = c_index; 853 854 netif_dbg(priv, tx_done, ndev, 855 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", 856 ring->index, ring->c_index, pkts_compl, bytes_compl); 857 858 return pkts_compl; 859 } 860 861 /* Locked version of the per-ring TX reclaim routine */ 862 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 863 struct bcm_sysport_tx_ring *ring) 864 { 865 struct netdev_queue *txq; 866 unsigned int released; 867 unsigned long flags; 868 869 txq = netdev_get_tx_queue(priv->netdev, ring->index); 870 871 spin_lock_irqsave(&ring->lock, flags); 872 released = __bcm_sysport_tx_reclaim(priv, ring); 873 if (released) 874 netif_tx_wake_queue(txq); 875 876 spin_unlock_irqrestore(&ring->lock, flags); 877 878 return released; 879 } 880 881 /* Locked version of the per-ring TX reclaim, but does not wake the queue */ 882 static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv, 883 struct bcm_sysport_tx_ring *ring) 884 { 885 unsigned long flags; 886 887 spin_lock_irqsave(&ring->lock, flags); 888 __bcm_sysport_tx_reclaim(priv, ring); 889 spin_unlock_irqrestore(&ring->lock, flags); 890 } 891 892 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) 893 { 894 struct bcm_sysport_tx_ring *ring = 895 container_of(napi, struct bcm_sysport_tx_ring, napi); 896 unsigned int work_done = 0; 897 898 work_done = bcm_sysport_tx_reclaim(ring->priv, ring); 899 900 if (work_done == 0) { 901 napi_complete(napi); 902 /* re-enable TX interrupt */ 903 if (!ring->priv->is_lite) 904 intrl2_1_mask_clear(ring->priv, BIT(ring->index)); 905 else 906 intrl2_0_mask_clear(ring->priv, BIT(ring->index + 907 INTRL2_0_TDMA_MBDONE_SHIFT)); 908 909 return 0; 910 } 911 912 return budget; 913 } 914 915 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) 916 { 917 unsigned int q; 918 919 for (q = 0; q < priv->netdev->num_tx_queues; q++) 920 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]); 921 } 922 923 static int bcm_sysport_poll(struct napi_struct *napi, int budget) 924 { 925 struct bcm_sysport_priv *priv = 926 container_of(napi, struct bcm_sysport_priv, napi); 927 unsigned int work_done = 0; 928 929 work_done = bcm_sysport_desc_rx(priv, budget); 930 931 priv->rx_c_index += work_done; 932 priv->rx_c_index &= RDMA_CONS_INDEX_MASK; 933 934 /* SYSTEMPORT Lite groups the producer/consumer index, producer is 935 * maintained by HW, but writes to it will be ignore while RDMA 936 * is active 937 */ 938 if (!priv->is_lite) 939 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); 940 else 941 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX); 942 943 if (work_done < budget) { 944 napi_complete_done(napi, work_done); 945 /* re-enable RX interrupts */ 946 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE); 947 } 948 949 return work_done; 950 } 951 952 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) 953 { 954 u32 reg; 955 956 /* Stop monitoring MPD interrupt */ 957 intrl2_0_mask_set(priv, INTRL2_0_MPD); 958 959 /* Clear the MagicPacket detection logic */ 960 reg = umac_readl(priv, UMAC_MPD_CTRL); 961 reg &= ~MPD_EN; 962 umac_writel(priv, reg, UMAC_MPD_CTRL); 963 964 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n"); 965 } 966 967 /* RX and misc interrupt routine */ 968 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) 969 { 970 struct net_device *dev = dev_id; 971 struct bcm_sysport_priv *priv = netdev_priv(dev); 972 struct bcm_sysport_tx_ring *txr; 973 unsigned int ring, ring_bit; 974 975 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & 976 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); 977 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 978 979 if (unlikely(priv->irq0_stat == 0)) { 980 netdev_warn(priv->netdev, "spurious RX interrupt\n"); 981 return IRQ_NONE; 982 } 983 984 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) { 985 if (likely(napi_schedule_prep(&priv->napi))) { 986 /* disable RX interrupts */ 987 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE); 988 __napi_schedule_irqoff(&priv->napi); 989 } 990 } 991 992 /* TX ring is full, perform a full reclaim since we do not know 993 * which one would trigger this interrupt 994 */ 995 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL) 996 bcm_sysport_tx_reclaim_all(priv); 997 998 if (priv->irq0_stat & INTRL2_0_MPD) { 999 netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n"); 1000 bcm_sysport_resume_from_wol(priv); 1001 } 1002 1003 if (!priv->is_lite) 1004 goto out; 1005 1006 for (ring = 0; ring < dev->num_tx_queues; ring++) { 1007 ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT); 1008 if (!(priv->irq0_stat & ring_bit)) 1009 continue; 1010 1011 txr = &priv->tx_rings[ring]; 1012 1013 if (likely(napi_schedule_prep(&txr->napi))) { 1014 intrl2_0_mask_set(priv, ring_bit); 1015 __napi_schedule(&txr->napi); 1016 } 1017 } 1018 out: 1019 return IRQ_HANDLED; 1020 } 1021 1022 /* TX interrupt service routine */ 1023 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id) 1024 { 1025 struct net_device *dev = dev_id; 1026 struct bcm_sysport_priv *priv = netdev_priv(dev); 1027 struct bcm_sysport_tx_ring *txr; 1028 unsigned int ring; 1029 1030 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & 1031 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); 1032 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1033 1034 if (unlikely(priv->irq1_stat == 0)) { 1035 netdev_warn(priv->netdev, "spurious TX interrupt\n"); 1036 return IRQ_NONE; 1037 } 1038 1039 for (ring = 0; ring < dev->num_tx_queues; ring++) { 1040 if (!(priv->irq1_stat & BIT(ring))) 1041 continue; 1042 1043 txr = &priv->tx_rings[ring]; 1044 1045 if (likely(napi_schedule_prep(&txr->napi))) { 1046 intrl2_1_mask_set(priv, BIT(ring)); 1047 __napi_schedule_irqoff(&txr->napi); 1048 } 1049 } 1050 1051 return IRQ_HANDLED; 1052 } 1053 1054 static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id) 1055 { 1056 struct bcm_sysport_priv *priv = dev_id; 1057 1058 pm_wakeup_event(&priv->pdev->dev, 0); 1059 1060 return IRQ_HANDLED; 1061 } 1062 1063 #ifdef CONFIG_NET_POLL_CONTROLLER 1064 static void bcm_sysport_poll_controller(struct net_device *dev) 1065 { 1066 struct bcm_sysport_priv *priv = netdev_priv(dev); 1067 1068 disable_irq(priv->irq0); 1069 bcm_sysport_rx_isr(priv->irq0, priv); 1070 enable_irq(priv->irq0); 1071 1072 if (!priv->is_lite) { 1073 disable_irq(priv->irq1); 1074 bcm_sysport_tx_isr(priv->irq1, priv); 1075 enable_irq(priv->irq1); 1076 } 1077 } 1078 #endif 1079 1080 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, 1081 struct net_device *dev) 1082 { 1083 struct sk_buff *nskb; 1084 struct bcm_tsb *tsb; 1085 u32 csum_info; 1086 u8 ip_proto; 1087 u16 csum_start; 1088 u16 ip_ver; 1089 1090 /* Re-allocate SKB if needed */ 1091 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) { 1092 nskb = skb_realloc_headroom(skb, sizeof(*tsb)); 1093 dev_kfree_skb(skb); 1094 if (!nskb) { 1095 dev->stats.tx_errors++; 1096 dev->stats.tx_dropped++; 1097 return NULL; 1098 } 1099 skb = nskb; 1100 } 1101 1102 tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb)); 1103 /* Zero-out TSB by default */ 1104 memset(tsb, 0, sizeof(*tsb)); 1105 1106 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1107 ip_ver = htons(skb->protocol); 1108 switch (ip_ver) { 1109 case ETH_P_IP: 1110 ip_proto = ip_hdr(skb)->protocol; 1111 break; 1112 case ETH_P_IPV6: 1113 ip_proto = ipv6_hdr(skb)->nexthdr; 1114 break; 1115 default: 1116 return skb; 1117 } 1118 1119 /* Get the checksum offset and the L4 (transport) offset */ 1120 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb); 1121 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK; 1122 csum_info |= (csum_start << L4_PTR_SHIFT); 1123 1124 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { 1125 csum_info |= L4_LENGTH_VALID; 1126 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) 1127 csum_info |= L4_UDP; 1128 } else { 1129 csum_info = 0; 1130 } 1131 1132 tsb->l4_ptr_dest_map = csum_info; 1133 } 1134 1135 return skb; 1136 } 1137 1138 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, 1139 struct net_device *dev) 1140 { 1141 struct bcm_sysport_priv *priv = netdev_priv(dev); 1142 struct device *kdev = &priv->pdev->dev; 1143 struct bcm_sysport_tx_ring *ring; 1144 struct bcm_sysport_cb *cb; 1145 struct netdev_queue *txq; 1146 struct dma_desc *desc; 1147 unsigned int skb_len; 1148 unsigned long flags; 1149 dma_addr_t mapping; 1150 u32 len_status; 1151 u16 queue; 1152 int ret; 1153 1154 queue = skb_get_queue_mapping(skb); 1155 txq = netdev_get_tx_queue(dev, queue); 1156 ring = &priv->tx_rings[queue]; 1157 1158 /* lock against tx reclaim in BH context and TX ring full interrupt */ 1159 spin_lock_irqsave(&ring->lock, flags); 1160 if (unlikely(ring->desc_count == 0)) { 1161 netif_tx_stop_queue(txq); 1162 netdev_err(dev, "queue %d awake and ring full!\n", queue); 1163 ret = NETDEV_TX_BUSY; 1164 goto out; 1165 } 1166 1167 /* The Ethernet switch we are interfaced with needs packets to be at 1168 * least 64 bytes (including FCS) otherwise they will be discarded when 1169 * they enter the switch port logic. When Broadcom tags are enabled, we 1170 * need to make sure that packets are at least 68 bytes 1171 * (including FCS and tag) because the length verification is done after 1172 * the Broadcom tag is stripped off the ingress packet. 1173 */ 1174 if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) { 1175 ret = NETDEV_TX_OK; 1176 goto out; 1177 } 1178 1179 /* Insert TSB and checksum infos */ 1180 if (priv->tsb_en) { 1181 skb = bcm_sysport_insert_tsb(skb, dev); 1182 if (!skb) { 1183 ret = NETDEV_TX_OK; 1184 goto out; 1185 } 1186 } 1187 1188 skb_len = skb->len; 1189 1190 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); 1191 if (dma_mapping_error(kdev, mapping)) { 1192 priv->mib.tx_dma_failed++; 1193 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n", 1194 skb->data, skb_len); 1195 ret = NETDEV_TX_OK; 1196 goto out; 1197 } 1198 1199 /* Remember the SKB for future freeing */ 1200 cb = &ring->cbs[ring->curr_desc]; 1201 cb->skb = skb; 1202 dma_unmap_addr_set(cb, dma_addr, mapping); 1203 dma_unmap_len_set(cb, dma_len, skb_len); 1204 1205 /* Fetch a descriptor entry from our pool */ 1206 desc = ring->desc_cpu; 1207 1208 desc->addr_lo = lower_32_bits(mapping); 1209 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK; 1210 len_status |= (skb_len << DESC_LEN_SHIFT); 1211 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) << 1212 DESC_STATUS_SHIFT; 1213 if (skb->ip_summed == CHECKSUM_PARTIAL) 1214 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT); 1215 1216 ring->curr_desc++; 1217 if (ring->curr_desc == ring->size) 1218 ring->curr_desc = 0; 1219 ring->desc_count--; 1220 1221 /* Ensure write completion of the descriptor status/length 1222 * in DRAM before the System Port WRITE_PORT register latches 1223 * the value 1224 */ 1225 wmb(); 1226 desc->addr_status_len = len_status; 1227 wmb(); 1228 1229 /* Write this descriptor address to the RING write port */ 1230 tdma_port_write_desc_addr(priv, desc, ring->index); 1231 1232 /* Check ring space and update SW control flow */ 1233 if (ring->desc_count == 0) 1234 netif_tx_stop_queue(txq); 1235 1236 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n", 1237 ring->index, ring->desc_count, ring->curr_desc); 1238 1239 ret = NETDEV_TX_OK; 1240 out: 1241 spin_unlock_irqrestore(&ring->lock, flags); 1242 return ret; 1243 } 1244 1245 static void bcm_sysport_tx_timeout(struct net_device *dev) 1246 { 1247 netdev_warn(dev, "transmit timeout!\n"); 1248 1249 netif_trans_update(dev); 1250 dev->stats.tx_errors++; 1251 1252 netif_tx_wake_all_queues(dev); 1253 } 1254 1255 /* phylib adjust link callback */ 1256 static void bcm_sysport_adj_link(struct net_device *dev) 1257 { 1258 struct bcm_sysport_priv *priv = netdev_priv(dev); 1259 struct phy_device *phydev = dev->phydev; 1260 unsigned int changed = 0; 1261 u32 cmd_bits = 0, reg; 1262 1263 if (priv->old_link != phydev->link) { 1264 changed = 1; 1265 priv->old_link = phydev->link; 1266 } 1267 1268 if (priv->old_duplex != phydev->duplex) { 1269 changed = 1; 1270 priv->old_duplex = phydev->duplex; 1271 } 1272 1273 if (priv->is_lite) 1274 goto out; 1275 1276 switch (phydev->speed) { 1277 case SPEED_2500: 1278 cmd_bits = CMD_SPEED_2500; 1279 break; 1280 case SPEED_1000: 1281 cmd_bits = CMD_SPEED_1000; 1282 break; 1283 case SPEED_100: 1284 cmd_bits = CMD_SPEED_100; 1285 break; 1286 case SPEED_10: 1287 cmd_bits = CMD_SPEED_10; 1288 break; 1289 default: 1290 break; 1291 } 1292 cmd_bits <<= CMD_SPEED_SHIFT; 1293 1294 if (phydev->duplex == DUPLEX_HALF) 1295 cmd_bits |= CMD_HD_EN; 1296 1297 if (priv->old_pause != phydev->pause) { 1298 changed = 1; 1299 priv->old_pause = phydev->pause; 1300 } 1301 1302 if (!phydev->pause) 1303 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; 1304 1305 if (!changed) 1306 return; 1307 1308 if (phydev->link) { 1309 reg = umac_readl(priv, UMAC_CMD); 1310 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | 1311 CMD_HD_EN | CMD_RX_PAUSE_IGNORE | 1312 CMD_TX_PAUSE_IGNORE); 1313 reg |= cmd_bits; 1314 umac_writel(priv, reg, UMAC_CMD); 1315 } 1316 out: 1317 if (changed) 1318 phy_print_status(phydev); 1319 } 1320 1321 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, 1322 unsigned int index) 1323 { 1324 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; 1325 struct device *kdev = &priv->pdev->dev; 1326 size_t size; 1327 void *p; 1328 u32 reg; 1329 1330 /* Simple descriptors partitioning for now */ 1331 size = 256; 1332 1333 /* We just need one DMA descriptor which is DMA-able, since writing to 1334 * the port will allocate a new descriptor in its internal linked-list 1335 */ 1336 p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, 1337 GFP_KERNEL); 1338 if (!p) { 1339 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); 1340 return -ENOMEM; 1341 } 1342 1343 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL); 1344 if (!ring->cbs) { 1345 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1346 return -ENOMEM; 1347 } 1348 1349 /* Initialize SW view of the ring */ 1350 spin_lock_init(&ring->lock); 1351 ring->priv = priv; 1352 netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); 1353 ring->index = index; 1354 ring->size = size; 1355 ring->alloc_size = ring->size; 1356 ring->desc_cpu = p; 1357 ring->desc_count = ring->size; 1358 ring->curr_desc = 0; 1359 1360 /* Initialize HW ring */ 1361 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index)); 1362 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index)); 1363 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index)); 1364 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index)); 1365 tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index)); 1366 tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index)); 1367 1368 /* Program the number of descriptors as MAX_THRESHOLD and half of 1369 * its size for the hysteresis trigger 1370 */ 1371 tdma_writel(priv, ring->size | 1372 1 << RING_HYST_THRESH_SHIFT, 1373 TDMA_DESC_RING_MAX_HYST(index)); 1374 1375 /* Enable the ring queue in the arbiter */ 1376 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN); 1377 reg |= (1 << index); 1378 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN); 1379 1380 napi_enable(&ring->napi); 1381 1382 netif_dbg(priv, hw, priv->netdev, 1383 "TDMA cfg, size=%d, desc_cpu=%p\n", 1384 ring->size, ring->desc_cpu); 1385 1386 return 0; 1387 } 1388 1389 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, 1390 unsigned int index) 1391 { 1392 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; 1393 struct device *kdev = &priv->pdev->dev; 1394 u32 reg; 1395 1396 /* Caller should stop the TDMA engine */ 1397 reg = tdma_readl(priv, TDMA_STATUS); 1398 if (!(reg & TDMA_DISABLED)) 1399 netdev_warn(priv->netdev, "TDMA not stopped!\n"); 1400 1401 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could 1402 * fail, so by checking this pointer we know whether the TX ring was 1403 * fully initialized or not. 1404 */ 1405 if (!ring->cbs) 1406 return; 1407 1408 napi_disable(&ring->napi); 1409 netif_napi_del(&ring->napi); 1410 1411 bcm_sysport_tx_clean(priv, ring); 1412 1413 kfree(ring->cbs); 1414 ring->cbs = NULL; 1415 1416 if (ring->desc_dma) { 1417 dma_free_coherent(kdev, sizeof(struct dma_desc), 1418 ring->desc_cpu, ring->desc_dma); 1419 ring->desc_dma = 0; 1420 } 1421 ring->size = 0; 1422 ring->alloc_size = 0; 1423 1424 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n"); 1425 } 1426 1427 /* RDMA helper */ 1428 static inline int rdma_enable_set(struct bcm_sysport_priv *priv, 1429 unsigned int enable) 1430 { 1431 unsigned int timeout = 1000; 1432 u32 reg; 1433 1434 reg = rdma_readl(priv, RDMA_CONTROL); 1435 if (enable) 1436 reg |= RDMA_EN; 1437 else 1438 reg &= ~RDMA_EN; 1439 rdma_writel(priv, reg, RDMA_CONTROL); 1440 1441 /* Poll for RMDA disabling completion */ 1442 do { 1443 reg = rdma_readl(priv, RDMA_STATUS); 1444 if (!!(reg & RDMA_DISABLED) == !enable) 1445 return 0; 1446 usleep_range(1000, 2000); 1447 } while (timeout-- > 0); 1448 1449 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n"); 1450 1451 return -ETIMEDOUT; 1452 } 1453 1454 /* TDMA helper */ 1455 static inline int tdma_enable_set(struct bcm_sysport_priv *priv, 1456 unsigned int enable) 1457 { 1458 unsigned int timeout = 1000; 1459 u32 reg; 1460 1461 reg = tdma_readl(priv, TDMA_CONTROL); 1462 if (enable) 1463 reg |= tdma_control_bit(priv, TDMA_EN); 1464 else 1465 reg &= ~tdma_control_bit(priv, TDMA_EN); 1466 tdma_writel(priv, reg, TDMA_CONTROL); 1467 1468 /* Poll for TMDA disabling completion */ 1469 do { 1470 reg = tdma_readl(priv, TDMA_STATUS); 1471 if (!!(reg & TDMA_DISABLED) == !enable) 1472 return 0; 1473 1474 usleep_range(1000, 2000); 1475 } while (timeout-- > 0); 1476 1477 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n"); 1478 1479 return -ETIMEDOUT; 1480 } 1481 1482 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) 1483 { 1484 struct bcm_sysport_cb *cb; 1485 u32 reg; 1486 int ret; 1487 int i; 1488 1489 /* Initialize SW view of the RX ring */ 1490 priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC; 1491 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; 1492 priv->rx_c_index = 0; 1493 priv->rx_read_ptr = 0; 1494 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb), 1495 GFP_KERNEL); 1496 if (!priv->rx_cbs) { 1497 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1498 return -ENOMEM; 1499 } 1500 1501 for (i = 0; i < priv->num_rx_bds; i++) { 1502 cb = priv->rx_cbs + i; 1503 cb->bd_addr = priv->rx_bds + i * DESC_SIZE; 1504 } 1505 1506 ret = bcm_sysport_alloc_rx_bufs(priv); 1507 if (ret) { 1508 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n"); 1509 return ret; 1510 } 1511 1512 /* Initialize HW, ensure RDMA is disabled */ 1513 reg = rdma_readl(priv, RDMA_STATUS); 1514 if (!(reg & RDMA_DISABLED)) 1515 rdma_enable_set(priv, 0); 1516 1517 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO); 1518 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI); 1519 rdma_writel(priv, 0, RDMA_PROD_INDEX); 1520 rdma_writel(priv, 0, RDMA_CONS_INDEX); 1521 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT | 1522 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE); 1523 /* Operate the queue in ring mode */ 1524 rdma_writel(priv, 0, RDMA_START_ADDR_HI); 1525 rdma_writel(priv, 0, RDMA_START_ADDR_LO); 1526 rdma_writel(priv, 0, RDMA_END_ADDR_HI); 1527 rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO); 1528 1529 rdma_writel(priv, 1, RDMA_MBDONE_INTR); 1530 1531 netif_dbg(priv, hw, priv->netdev, 1532 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n", 1533 priv->num_rx_bds, priv->rx_bds); 1534 1535 return 0; 1536 } 1537 1538 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv) 1539 { 1540 struct bcm_sysport_cb *cb; 1541 unsigned int i; 1542 u32 reg; 1543 1544 /* Caller should ensure RDMA is disabled */ 1545 reg = rdma_readl(priv, RDMA_STATUS); 1546 if (!(reg & RDMA_DISABLED)) 1547 netdev_warn(priv->netdev, "RDMA not stopped!\n"); 1548 1549 for (i = 0; i < priv->num_rx_bds; i++) { 1550 cb = &priv->rx_cbs[i]; 1551 if (dma_unmap_addr(cb, dma_addr)) 1552 dma_unmap_single(&priv->pdev->dev, 1553 dma_unmap_addr(cb, dma_addr), 1554 RX_BUF_LENGTH, DMA_FROM_DEVICE); 1555 bcm_sysport_free_cb(cb); 1556 } 1557 1558 kfree(priv->rx_cbs); 1559 priv->rx_cbs = NULL; 1560 1561 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n"); 1562 } 1563 1564 static void bcm_sysport_set_rx_mode(struct net_device *dev) 1565 { 1566 struct bcm_sysport_priv *priv = netdev_priv(dev); 1567 u32 reg; 1568 1569 if (priv->is_lite) 1570 return; 1571 1572 reg = umac_readl(priv, UMAC_CMD); 1573 if (dev->flags & IFF_PROMISC) 1574 reg |= CMD_PROMISC; 1575 else 1576 reg &= ~CMD_PROMISC; 1577 umac_writel(priv, reg, UMAC_CMD); 1578 1579 /* No support for ALLMULTI */ 1580 if (dev->flags & IFF_ALLMULTI) 1581 return; 1582 } 1583 1584 static inline void umac_enable_set(struct bcm_sysport_priv *priv, 1585 u32 mask, unsigned int enable) 1586 { 1587 u32 reg; 1588 1589 if (!priv->is_lite) { 1590 reg = umac_readl(priv, UMAC_CMD); 1591 if (enable) 1592 reg |= mask; 1593 else 1594 reg &= ~mask; 1595 umac_writel(priv, reg, UMAC_CMD); 1596 } else { 1597 reg = gib_readl(priv, GIB_CONTROL); 1598 if (enable) 1599 reg |= mask; 1600 else 1601 reg &= ~mask; 1602 gib_writel(priv, reg, GIB_CONTROL); 1603 } 1604 1605 /* UniMAC stops on a packet boundary, wait for a full-sized packet 1606 * to be processed (1 msec). 1607 */ 1608 if (enable == 0) 1609 usleep_range(1000, 2000); 1610 } 1611 1612 static inline void umac_reset(struct bcm_sysport_priv *priv) 1613 { 1614 u32 reg; 1615 1616 if (priv->is_lite) 1617 return; 1618 1619 reg = umac_readl(priv, UMAC_CMD); 1620 reg |= CMD_SW_RESET; 1621 umac_writel(priv, reg, UMAC_CMD); 1622 udelay(10); 1623 reg = umac_readl(priv, UMAC_CMD); 1624 reg &= ~CMD_SW_RESET; 1625 umac_writel(priv, reg, UMAC_CMD); 1626 } 1627 1628 static void umac_set_hw_addr(struct bcm_sysport_priv *priv, 1629 unsigned char *addr) 1630 { 1631 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | 1632 addr[3]; 1633 u32 mac1 = (addr[4] << 8) | addr[5]; 1634 1635 if (!priv->is_lite) { 1636 umac_writel(priv, mac0, UMAC_MAC0); 1637 umac_writel(priv, mac1, UMAC_MAC1); 1638 } else { 1639 gib_writel(priv, mac0, GIB_MAC0); 1640 gib_writel(priv, mac1, GIB_MAC1); 1641 } 1642 } 1643 1644 static void topctrl_flush(struct bcm_sysport_priv *priv) 1645 { 1646 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); 1647 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); 1648 mdelay(1); 1649 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 1650 topctrl_writel(priv, 0, TX_FLUSH_CNTL); 1651 } 1652 1653 static int bcm_sysport_change_mac(struct net_device *dev, void *p) 1654 { 1655 struct bcm_sysport_priv *priv = netdev_priv(dev); 1656 struct sockaddr *addr = p; 1657 1658 if (!is_valid_ether_addr(addr->sa_data)) 1659 return -EINVAL; 1660 1661 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1662 1663 /* interface is disabled, changes to MAC will be reflected on next 1664 * open call 1665 */ 1666 if (!netif_running(dev)) 1667 return 0; 1668 1669 umac_set_hw_addr(priv, dev->dev_addr); 1670 1671 return 0; 1672 } 1673 1674 static struct net_device_stats *bcm_sysport_get_nstats(struct net_device *dev) 1675 { 1676 struct bcm_sysport_priv *priv = netdev_priv(dev); 1677 unsigned long tx_bytes = 0, tx_packets = 0; 1678 struct bcm_sysport_tx_ring *ring; 1679 unsigned int q; 1680 1681 for (q = 0; q < dev->num_tx_queues; q++) { 1682 ring = &priv->tx_rings[q]; 1683 tx_bytes += ring->bytes; 1684 tx_packets += ring->packets; 1685 } 1686 1687 dev->stats.tx_bytes = tx_bytes; 1688 dev->stats.tx_packets = tx_packets; 1689 return &dev->stats; 1690 } 1691 1692 static void bcm_sysport_netif_start(struct net_device *dev) 1693 { 1694 struct bcm_sysport_priv *priv = netdev_priv(dev); 1695 1696 /* Enable NAPI */ 1697 napi_enable(&priv->napi); 1698 1699 /* Enable RX interrupt and TX ring full interrupt */ 1700 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 1701 1702 phy_start(dev->phydev); 1703 1704 /* Enable TX interrupts for the TXQs */ 1705 if (!priv->is_lite) 1706 intrl2_1_mask_clear(priv, 0xffffffff); 1707 else 1708 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK); 1709 1710 /* Last call before we start the real business */ 1711 netif_tx_start_all_queues(dev); 1712 } 1713 1714 static void rbuf_init(struct bcm_sysport_priv *priv) 1715 { 1716 u32 reg; 1717 1718 reg = rbuf_readl(priv, RBUF_CONTROL); 1719 reg |= RBUF_4B_ALGN | RBUF_RSB_EN; 1720 /* Set a correct RSB format on SYSTEMPORT Lite */ 1721 if (priv->is_lite) { 1722 reg &= ~RBUF_RSB_SWAP1; 1723 reg |= RBUF_RSB_SWAP0; 1724 } 1725 rbuf_writel(priv, reg, RBUF_CONTROL); 1726 } 1727 1728 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv) 1729 { 1730 intrl2_0_mask_set(priv, 0xffffffff); 1731 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1732 if (!priv->is_lite) { 1733 intrl2_1_mask_set(priv, 0xffffffff); 1734 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1735 } 1736 } 1737 1738 static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv) 1739 { 1740 u32 __maybe_unused reg; 1741 1742 /* Include Broadcom tag in pad extension */ 1743 if (netdev_uses_dsa(priv->netdev)) { 1744 reg = gib_readl(priv, GIB_CONTROL); 1745 reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT); 1746 reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT; 1747 gib_writel(priv, reg, GIB_CONTROL); 1748 } 1749 } 1750 1751 static int bcm_sysport_open(struct net_device *dev) 1752 { 1753 struct bcm_sysport_priv *priv = netdev_priv(dev); 1754 struct phy_device *phydev; 1755 unsigned int i; 1756 int ret; 1757 1758 /* Reset UniMAC */ 1759 umac_reset(priv); 1760 1761 /* Flush TX and RX FIFOs at TOPCTRL level */ 1762 topctrl_flush(priv); 1763 1764 /* Disable the UniMAC RX/TX */ 1765 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0); 1766 1767 /* Enable RBUF 2bytes alignment and Receive Status Block */ 1768 rbuf_init(priv); 1769 1770 /* Set maximum frame length */ 1771 if (!priv->is_lite) 1772 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 1773 else 1774 gib_set_pad_extension(priv); 1775 1776 /* Set MAC address */ 1777 umac_set_hw_addr(priv, dev->dev_addr); 1778 1779 /* Read CRC forward */ 1780 if (!priv->is_lite) 1781 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); 1782 else 1783 priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) & 1784 GIB_FCS_STRIP); 1785 1786 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 1787 0, priv->phy_interface); 1788 if (!phydev) { 1789 netdev_err(dev, "could not attach to PHY\n"); 1790 return -ENODEV; 1791 } 1792 1793 /* Reset house keeping link status */ 1794 priv->old_duplex = -1; 1795 priv->old_link = -1; 1796 priv->old_pause = -1; 1797 1798 /* mask all interrupts and request them */ 1799 bcm_sysport_mask_all_intrs(priv); 1800 1801 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev); 1802 if (ret) { 1803 netdev_err(dev, "failed to request RX interrupt\n"); 1804 goto out_phy_disconnect; 1805 } 1806 1807 if (!priv->is_lite) { 1808 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, 1809 dev->name, dev); 1810 if (ret) { 1811 netdev_err(dev, "failed to request TX interrupt\n"); 1812 goto out_free_irq0; 1813 } 1814 } 1815 1816 /* Initialize both hardware and software ring */ 1817 for (i = 0; i < dev->num_tx_queues; i++) { 1818 ret = bcm_sysport_init_tx_ring(priv, i); 1819 if (ret) { 1820 netdev_err(dev, "failed to initialize TX ring %d\n", 1821 i); 1822 goto out_free_tx_ring; 1823 } 1824 } 1825 1826 /* Initialize linked-list */ 1827 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); 1828 1829 /* Initialize RX ring */ 1830 ret = bcm_sysport_init_rx_ring(priv); 1831 if (ret) { 1832 netdev_err(dev, "failed to initialize RX ring\n"); 1833 goto out_free_rx_ring; 1834 } 1835 1836 /* Turn on RDMA */ 1837 ret = rdma_enable_set(priv, 1); 1838 if (ret) 1839 goto out_free_rx_ring; 1840 1841 /* Turn on TDMA */ 1842 ret = tdma_enable_set(priv, 1); 1843 if (ret) 1844 goto out_clear_rx_int; 1845 1846 /* Turn on UniMAC TX/RX */ 1847 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1); 1848 1849 bcm_sysport_netif_start(dev); 1850 1851 return 0; 1852 1853 out_clear_rx_int: 1854 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 1855 out_free_rx_ring: 1856 bcm_sysport_fini_rx_ring(priv); 1857 out_free_tx_ring: 1858 for (i = 0; i < dev->num_tx_queues; i++) 1859 bcm_sysport_fini_tx_ring(priv, i); 1860 if (!priv->is_lite) 1861 free_irq(priv->irq1, dev); 1862 out_free_irq0: 1863 free_irq(priv->irq0, dev); 1864 out_phy_disconnect: 1865 phy_disconnect(phydev); 1866 return ret; 1867 } 1868 1869 static void bcm_sysport_netif_stop(struct net_device *dev) 1870 { 1871 struct bcm_sysport_priv *priv = netdev_priv(dev); 1872 1873 /* stop all software from updating hardware */ 1874 netif_tx_stop_all_queues(dev); 1875 napi_disable(&priv->napi); 1876 phy_stop(dev->phydev); 1877 1878 /* mask all interrupts */ 1879 bcm_sysport_mask_all_intrs(priv); 1880 } 1881 1882 static int bcm_sysport_stop(struct net_device *dev) 1883 { 1884 struct bcm_sysport_priv *priv = netdev_priv(dev); 1885 unsigned int i; 1886 int ret; 1887 1888 bcm_sysport_netif_stop(dev); 1889 1890 /* Disable UniMAC RX */ 1891 umac_enable_set(priv, CMD_RX_EN, 0); 1892 1893 ret = tdma_enable_set(priv, 0); 1894 if (ret) { 1895 netdev_err(dev, "timeout disabling RDMA\n"); 1896 return ret; 1897 } 1898 1899 /* Wait for a maximum packet size to be drained */ 1900 usleep_range(2000, 3000); 1901 1902 ret = rdma_enable_set(priv, 0); 1903 if (ret) { 1904 netdev_err(dev, "timeout disabling TDMA\n"); 1905 return ret; 1906 } 1907 1908 /* Disable UniMAC TX */ 1909 umac_enable_set(priv, CMD_TX_EN, 0); 1910 1911 /* Free RX/TX rings SW structures */ 1912 for (i = 0; i < dev->num_tx_queues; i++) 1913 bcm_sysport_fini_tx_ring(priv, i); 1914 bcm_sysport_fini_rx_ring(priv); 1915 1916 free_irq(priv->irq0, dev); 1917 if (!priv->is_lite) 1918 free_irq(priv->irq1, dev); 1919 1920 /* Disconnect from PHY */ 1921 phy_disconnect(dev->phydev); 1922 1923 return 0; 1924 } 1925 1926 static const struct ethtool_ops bcm_sysport_ethtool_ops = { 1927 .get_drvinfo = bcm_sysport_get_drvinfo, 1928 .get_msglevel = bcm_sysport_get_msglvl, 1929 .set_msglevel = bcm_sysport_set_msglvl, 1930 .get_link = ethtool_op_get_link, 1931 .get_strings = bcm_sysport_get_strings, 1932 .get_ethtool_stats = bcm_sysport_get_stats, 1933 .get_sset_count = bcm_sysport_get_sset_count, 1934 .get_wol = bcm_sysport_get_wol, 1935 .set_wol = bcm_sysport_set_wol, 1936 .get_coalesce = bcm_sysport_get_coalesce, 1937 .set_coalesce = bcm_sysport_set_coalesce, 1938 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1939 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1940 }; 1941 1942 static const struct net_device_ops bcm_sysport_netdev_ops = { 1943 .ndo_start_xmit = bcm_sysport_xmit, 1944 .ndo_tx_timeout = bcm_sysport_tx_timeout, 1945 .ndo_open = bcm_sysport_open, 1946 .ndo_stop = bcm_sysport_stop, 1947 .ndo_set_features = bcm_sysport_set_features, 1948 .ndo_set_rx_mode = bcm_sysport_set_rx_mode, 1949 .ndo_set_mac_address = bcm_sysport_change_mac, 1950 #ifdef CONFIG_NET_POLL_CONTROLLER 1951 .ndo_poll_controller = bcm_sysport_poll_controller, 1952 #endif 1953 .ndo_get_stats = bcm_sysport_get_nstats, 1954 }; 1955 1956 #define REV_FMT "v%2x.%02x" 1957 1958 static const struct bcm_sysport_hw_params bcm_sysport_params[] = { 1959 [SYSTEMPORT] = { 1960 .is_lite = false, 1961 .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS, 1962 }, 1963 [SYSTEMPORT_LITE] = { 1964 .is_lite = true, 1965 .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS, 1966 }, 1967 }; 1968 1969 static const struct of_device_id bcm_sysport_of_match[] = { 1970 { .compatible = "brcm,systemportlite-v1.00", 1971 .data = &bcm_sysport_params[SYSTEMPORT_LITE] }, 1972 { .compatible = "brcm,systemport-v1.00", 1973 .data = &bcm_sysport_params[SYSTEMPORT] }, 1974 { .compatible = "brcm,systemport", 1975 .data = &bcm_sysport_params[SYSTEMPORT] }, 1976 { /* sentinel */ } 1977 }; 1978 MODULE_DEVICE_TABLE(of, bcm_sysport_of_match); 1979 1980 static int bcm_sysport_probe(struct platform_device *pdev) 1981 { 1982 const struct bcm_sysport_hw_params *params; 1983 const struct of_device_id *of_id = NULL; 1984 struct bcm_sysport_priv *priv; 1985 struct device_node *dn; 1986 struct net_device *dev; 1987 const void *macaddr; 1988 struct resource *r; 1989 u32 txq, rxq; 1990 int ret; 1991 1992 dn = pdev->dev.of_node; 1993 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1994 of_id = of_match_node(bcm_sysport_of_match, dn); 1995 if (!of_id || !of_id->data) 1996 return -EINVAL; 1997 1998 /* Fairly quickly we need to know the type of adapter we have */ 1999 params = of_id->data; 2000 2001 /* Read the Transmit/Receive Queue properties */ 2002 if (of_property_read_u32(dn, "systemport,num-txq", &txq)) 2003 txq = TDMA_NUM_RINGS; 2004 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq)) 2005 rxq = 1; 2006 2007 /* Sanity check the number of transmit queues */ 2008 if (!txq || txq > TDMA_NUM_RINGS) 2009 return -EINVAL; 2010 2011 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq); 2012 if (!dev) 2013 return -ENOMEM; 2014 2015 /* Initialize private members */ 2016 priv = netdev_priv(dev); 2017 2018 /* Allocate number of TX rings */ 2019 priv->tx_rings = devm_kcalloc(&pdev->dev, txq, 2020 sizeof(struct bcm_sysport_tx_ring), 2021 GFP_KERNEL); 2022 if (!priv->tx_rings) 2023 return -ENOMEM; 2024 2025 priv->is_lite = params->is_lite; 2026 priv->num_rx_desc_words = params->num_rx_desc_words; 2027 2028 priv->irq0 = platform_get_irq(pdev, 0); 2029 if (!priv->is_lite) 2030 priv->irq1 = platform_get_irq(pdev, 1); 2031 priv->wol_irq = platform_get_irq(pdev, 2); 2032 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { 2033 dev_err(&pdev->dev, "invalid interrupts\n"); 2034 ret = -EINVAL; 2035 goto err_free_netdev; 2036 } 2037 2038 priv->base = devm_ioremap_resource(&pdev->dev, r); 2039 if (IS_ERR(priv->base)) { 2040 ret = PTR_ERR(priv->base); 2041 goto err_free_netdev; 2042 } 2043 2044 priv->netdev = dev; 2045 priv->pdev = pdev; 2046 2047 priv->phy_interface = of_get_phy_mode(dn); 2048 /* Default to GMII interface mode */ 2049 if (priv->phy_interface < 0) 2050 priv->phy_interface = PHY_INTERFACE_MODE_GMII; 2051 2052 /* In the case of a fixed PHY, the DT node associated 2053 * to the PHY is the Ethernet MAC DT node. 2054 */ 2055 if (of_phy_is_fixed_link(dn)) { 2056 ret = of_phy_register_fixed_link(dn); 2057 if (ret) { 2058 dev_err(&pdev->dev, "failed to register fixed PHY\n"); 2059 goto err_free_netdev; 2060 } 2061 2062 priv->phy_dn = dn; 2063 } 2064 2065 /* Initialize netdevice members */ 2066 macaddr = of_get_mac_address(dn); 2067 if (!macaddr || !is_valid_ether_addr(macaddr)) { 2068 dev_warn(&pdev->dev, "using random Ethernet MAC\n"); 2069 eth_hw_addr_random(dev); 2070 } else { 2071 ether_addr_copy(dev->dev_addr, macaddr); 2072 } 2073 2074 SET_NETDEV_DEV(dev, &pdev->dev); 2075 dev_set_drvdata(&pdev->dev, dev); 2076 dev->ethtool_ops = &bcm_sysport_ethtool_ops; 2077 dev->netdev_ops = &bcm_sysport_netdev_ops; 2078 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64); 2079 2080 /* HW supported features, none enabled by default */ 2081 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | 2082 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2083 2084 /* Request the WOL interrupt and advertise suspend if available */ 2085 priv->wol_irq_disabled = 1; 2086 ret = devm_request_irq(&pdev->dev, priv->wol_irq, 2087 bcm_sysport_wol_isr, 0, dev->name, priv); 2088 if (!ret) 2089 device_set_wakeup_capable(&pdev->dev, 1); 2090 2091 /* Set the needed headroom once and for all */ 2092 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); 2093 dev->needed_headroom += sizeof(struct bcm_tsb); 2094 2095 /* libphy will adjust the link state accordingly */ 2096 netif_carrier_off(dev); 2097 2098 ret = register_netdev(dev); 2099 if (ret) { 2100 dev_err(&pdev->dev, "failed to register net_device\n"); 2101 goto err_deregister_fixed_link; 2102 } 2103 2104 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; 2105 dev_info(&pdev->dev, 2106 "Broadcom SYSTEMPORT%s" REV_FMT 2107 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", 2108 priv->is_lite ? " Lite" : "", 2109 (priv->rev >> 8) & 0xff, priv->rev & 0xff, 2110 priv->base, priv->irq0, priv->irq1, txq, rxq); 2111 2112 return 0; 2113 2114 err_deregister_fixed_link: 2115 if (of_phy_is_fixed_link(dn)) 2116 of_phy_deregister_fixed_link(dn); 2117 err_free_netdev: 2118 free_netdev(dev); 2119 return ret; 2120 } 2121 2122 static int bcm_sysport_remove(struct platform_device *pdev) 2123 { 2124 struct net_device *dev = dev_get_drvdata(&pdev->dev); 2125 struct device_node *dn = pdev->dev.of_node; 2126 2127 /* Not much to do, ndo_close has been called 2128 * and we use managed allocations 2129 */ 2130 unregister_netdev(dev); 2131 if (of_phy_is_fixed_link(dn)) 2132 of_phy_deregister_fixed_link(dn); 2133 free_netdev(dev); 2134 dev_set_drvdata(&pdev->dev, NULL); 2135 2136 return 0; 2137 } 2138 2139 #ifdef CONFIG_PM_SLEEP 2140 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) 2141 { 2142 struct net_device *ndev = priv->netdev; 2143 unsigned int timeout = 1000; 2144 u32 reg; 2145 2146 /* Password has already been programmed */ 2147 reg = umac_readl(priv, UMAC_MPD_CTRL); 2148 reg |= MPD_EN; 2149 reg &= ~PSW_EN; 2150 if (priv->wolopts & WAKE_MAGICSECURE) 2151 reg |= PSW_EN; 2152 umac_writel(priv, reg, UMAC_MPD_CTRL); 2153 2154 /* Make sure RBUF entered WoL mode as result */ 2155 do { 2156 reg = rbuf_readl(priv, RBUF_STATUS); 2157 if (reg & RBUF_WOL_MODE) 2158 break; 2159 2160 udelay(10); 2161 } while (timeout-- > 0); 2162 2163 /* Do not leave the UniMAC RBUF matching only MPD packets */ 2164 if (!timeout) { 2165 reg = umac_readl(priv, UMAC_MPD_CTRL); 2166 reg &= ~MPD_EN; 2167 umac_writel(priv, reg, UMAC_MPD_CTRL); 2168 netif_err(priv, wol, ndev, "failed to enter WOL mode\n"); 2169 return -ETIMEDOUT; 2170 } 2171 2172 /* UniMAC receive needs to be turned on */ 2173 umac_enable_set(priv, CMD_RX_EN, 1); 2174 2175 /* Enable the interrupt wake-up source */ 2176 intrl2_0_mask_clear(priv, INTRL2_0_MPD); 2177 2178 netif_dbg(priv, wol, ndev, "entered WOL mode\n"); 2179 2180 return 0; 2181 } 2182 2183 static int bcm_sysport_suspend(struct device *d) 2184 { 2185 struct net_device *dev = dev_get_drvdata(d); 2186 struct bcm_sysport_priv *priv = netdev_priv(dev); 2187 unsigned int i; 2188 int ret = 0; 2189 u32 reg; 2190 2191 if (!netif_running(dev)) 2192 return 0; 2193 2194 bcm_sysport_netif_stop(dev); 2195 2196 phy_suspend(dev->phydev); 2197 2198 netif_device_detach(dev); 2199 2200 /* Disable UniMAC RX */ 2201 umac_enable_set(priv, CMD_RX_EN, 0); 2202 2203 ret = rdma_enable_set(priv, 0); 2204 if (ret) { 2205 netdev_err(dev, "RDMA timeout!\n"); 2206 return ret; 2207 } 2208 2209 /* Disable RXCHK if enabled */ 2210 if (priv->rx_chk_en) { 2211 reg = rxchk_readl(priv, RXCHK_CONTROL); 2212 reg &= ~RXCHK_EN; 2213 rxchk_writel(priv, reg, RXCHK_CONTROL); 2214 } 2215 2216 /* Flush RX pipe */ 2217 if (!priv->wolopts) 2218 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); 2219 2220 ret = tdma_enable_set(priv, 0); 2221 if (ret) { 2222 netdev_err(dev, "TDMA timeout!\n"); 2223 return ret; 2224 } 2225 2226 /* Wait for a packet boundary */ 2227 usleep_range(2000, 3000); 2228 2229 umac_enable_set(priv, CMD_TX_EN, 0); 2230 2231 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); 2232 2233 /* Free RX/TX rings SW structures */ 2234 for (i = 0; i < dev->num_tx_queues; i++) 2235 bcm_sysport_fini_tx_ring(priv, i); 2236 bcm_sysport_fini_rx_ring(priv); 2237 2238 /* Get prepared for Wake-on-LAN */ 2239 if (device_may_wakeup(d) && priv->wolopts) 2240 ret = bcm_sysport_suspend_to_wol(priv); 2241 2242 return ret; 2243 } 2244 2245 static int bcm_sysport_resume(struct device *d) 2246 { 2247 struct net_device *dev = dev_get_drvdata(d); 2248 struct bcm_sysport_priv *priv = netdev_priv(dev); 2249 unsigned int i; 2250 u32 reg; 2251 int ret; 2252 2253 if (!netif_running(dev)) 2254 return 0; 2255 2256 umac_reset(priv); 2257 2258 /* We may have been suspended and never received a WOL event that 2259 * would turn off MPD detection, take care of that now 2260 */ 2261 bcm_sysport_resume_from_wol(priv); 2262 2263 /* Initialize both hardware and software ring */ 2264 for (i = 0; i < dev->num_tx_queues; i++) { 2265 ret = bcm_sysport_init_tx_ring(priv, i); 2266 if (ret) { 2267 netdev_err(dev, "failed to initialize TX ring %d\n", 2268 i); 2269 goto out_free_tx_rings; 2270 } 2271 } 2272 2273 /* Initialize linked-list */ 2274 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); 2275 2276 /* Initialize RX ring */ 2277 ret = bcm_sysport_init_rx_ring(priv); 2278 if (ret) { 2279 netdev_err(dev, "failed to initialize RX ring\n"); 2280 goto out_free_rx_ring; 2281 } 2282 2283 netif_device_attach(dev); 2284 2285 /* RX pipe enable */ 2286 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 2287 2288 ret = rdma_enable_set(priv, 1); 2289 if (ret) { 2290 netdev_err(dev, "failed to enable RDMA\n"); 2291 goto out_free_rx_ring; 2292 } 2293 2294 /* Enable rxhck */ 2295 if (priv->rx_chk_en) { 2296 reg = rxchk_readl(priv, RXCHK_CONTROL); 2297 reg |= RXCHK_EN; 2298 rxchk_writel(priv, reg, RXCHK_CONTROL); 2299 } 2300 2301 rbuf_init(priv); 2302 2303 /* Set maximum frame length */ 2304 if (!priv->is_lite) 2305 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 2306 else 2307 gib_set_pad_extension(priv); 2308 2309 /* Set MAC address */ 2310 umac_set_hw_addr(priv, dev->dev_addr); 2311 2312 umac_enable_set(priv, CMD_RX_EN, 1); 2313 2314 /* TX pipe enable */ 2315 topctrl_writel(priv, 0, TX_FLUSH_CNTL); 2316 2317 umac_enable_set(priv, CMD_TX_EN, 1); 2318 2319 ret = tdma_enable_set(priv, 1); 2320 if (ret) { 2321 netdev_err(dev, "TDMA timeout!\n"); 2322 goto out_free_rx_ring; 2323 } 2324 2325 phy_resume(dev->phydev); 2326 2327 bcm_sysport_netif_start(dev); 2328 2329 return 0; 2330 2331 out_free_rx_ring: 2332 bcm_sysport_fini_rx_ring(priv); 2333 out_free_tx_rings: 2334 for (i = 0; i < dev->num_tx_queues; i++) 2335 bcm_sysport_fini_tx_ring(priv, i); 2336 return ret; 2337 } 2338 #endif 2339 2340 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops, 2341 bcm_sysport_suspend, bcm_sysport_resume); 2342 2343 static struct platform_driver bcm_sysport_driver = { 2344 .probe = bcm_sysport_probe, 2345 .remove = bcm_sysport_remove, 2346 .driver = { 2347 .name = "brcm-systemport", 2348 .of_match_table = bcm_sysport_of_match, 2349 .pm = &bcm_sysport_pm_ops, 2350 }, 2351 }; 2352 module_platform_driver(bcm_sysport_driver); 2353 2354 MODULE_AUTHOR("Broadcom Corporation"); 2355 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver"); 2356 MODULE_ALIAS("platform:brcm-systemport"); 2357 MODULE_LICENSE("GPL"); 2358