1 /* 2 * Broadcom BCM7xxx System Port Ethernet MAC driver 3 * 4 * Copyright (C) 2014 Broadcom Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/module.h> 16 #include <linux/kernel.h> 17 #include <linux/netdevice.h> 18 #include <linux/etherdevice.h> 19 #include <linux/platform_device.h> 20 #include <linux/of.h> 21 #include <linux/of_net.h> 22 #include <linux/of_mdio.h> 23 #include <linux/phy.h> 24 #include <linux/phy_fixed.h> 25 #include <net/ip.h> 26 #include <net/ipv6.h> 27 28 #include "bcmsysport.h" 29 30 /* I/O accessors register helpers */ 31 #define BCM_SYSPORT_IO_MACRO(name, offset) \ 32 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \ 33 { \ 34 u32 reg = __raw_readl(priv->base + offset + off); \ 35 return reg; \ 36 } \ 37 static inline void name##_writel(struct bcm_sysport_priv *priv, \ 38 u32 val, u32 off) \ 39 { \ 40 __raw_writel(val, priv->base + offset + off); \ 41 } \ 42 43 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET); 44 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET); 45 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET); 46 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET); 47 BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET); 48 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET); 49 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET); 50 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET); 51 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET); 52 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET); 53 54 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied 55 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths. 56 */ 57 #define BCM_SYSPORT_INTR_L2(which) \ 58 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \ 59 u32 mask) \ 60 { \ 61 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ 62 priv->irq##which##_mask &= ~(mask); \ 63 } \ 64 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \ 65 u32 mask) \ 66 { \ 67 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \ 68 priv->irq##which##_mask |= (mask); \ 69 } \ 70 71 BCM_SYSPORT_INTR_L2(0) 72 BCM_SYSPORT_INTR_L2(1) 73 74 /* Register accesses to GISB/RBUS registers are expensive (few hundred 75 * nanoseconds), so keep the check for 64-bits explicit here to save 76 * one register write per-packet on 32-bits platforms. 77 */ 78 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv, 79 void __iomem *d, 80 dma_addr_t addr) 81 { 82 #ifdef CONFIG_PHYS_ADDR_T_64BIT 83 __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK, 84 d + DESC_ADDR_HI_STATUS_LEN); 85 #endif 86 __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO); 87 } 88 89 static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, 90 struct dma_desc *desc, 91 unsigned int port) 92 { 93 /* Ports are latched, so write upper address first */ 94 tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port)); 95 tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port)); 96 } 97 98 /* Ethtool operations */ 99 static int bcm_sysport_set_settings(struct net_device *dev, 100 struct ethtool_cmd *cmd) 101 { 102 struct bcm_sysport_priv *priv = netdev_priv(dev); 103 104 if (!netif_running(dev)) 105 return -EINVAL; 106 107 return phy_ethtool_sset(priv->phydev, cmd); 108 } 109 110 static int bcm_sysport_get_settings(struct net_device *dev, 111 struct ethtool_cmd *cmd) 112 { 113 struct bcm_sysport_priv *priv = netdev_priv(dev); 114 115 if (!netif_running(dev)) 116 return -EINVAL; 117 118 return phy_ethtool_gset(priv->phydev, cmd); 119 } 120 121 static int bcm_sysport_set_rx_csum(struct net_device *dev, 122 netdev_features_t wanted) 123 { 124 struct bcm_sysport_priv *priv = netdev_priv(dev); 125 u32 reg; 126 127 priv->rx_csum_en = !!(wanted & NETIF_F_RXCSUM); 128 reg = rxchk_readl(priv, RXCHK_CONTROL); 129 if (priv->rx_csum_en) 130 reg |= RXCHK_EN; 131 else 132 reg &= ~RXCHK_EN; 133 134 /* If UniMAC forwards CRC, we need to skip over it to get 135 * a valid CHK bit to be set in the per-packet status word 136 */ 137 if (priv->rx_csum_en && priv->crc_fwd) 138 reg |= RXCHK_SKIP_FCS; 139 else 140 reg &= ~RXCHK_SKIP_FCS; 141 142 rxchk_writel(priv, reg, RXCHK_CONTROL); 143 144 return 0; 145 } 146 147 static int bcm_sysport_set_tx_csum(struct net_device *dev, 148 netdev_features_t wanted) 149 { 150 struct bcm_sysport_priv *priv = netdev_priv(dev); 151 u32 reg; 152 153 /* Hardware transmit checksum requires us to enable the Transmit status 154 * block prepended to the packet contents 155 */ 156 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); 157 reg = tdma_readl(priv, TDMA_CONTROL); 158 if (priv->tsb_en) 159 reg |= TSB_EN; 160 else 161 reg &= ~TSB_EN; 162 tdma_writel(priv, reg, TDMA_CONTROL); 163 164 return 0; 165 } 166 167 static int bcm_sysport_set_features(struct net_device *dev, 168 netdev_features_t features) 169 { 170 netdev_features_t changed = features ^ dev->features; 171 netdev_features_t wanted = dev->wanted_features; 172 int ret = 0; 173 174 if (changed & NETIF_F_RXCSUM) 175 ret = bcm_sysport_set_rx_csum(dev, wanted); 176 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) 177 ret = bcm_sysport_set_tx_csum(dev, wanted); 178 179 return ret; 180 } 181 182 /* Hardware counters must be kept in sync because the order/offset 183 * is important here (order in structure declaration = order in hardware) 184 */ 185 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { 186 /* general stats */ 187 STAT_NETDEV(rx_packets), 188 STAT_NETDEV(tx_packets), 189 STAT_NETDEV(rx_bytes), 190 STAT_NETDEV(tx_bytes), 191 STAT_NETDEV(rx_errors), 192 STAT_NETDEV(tx_errors), 193 STAT_NETDEV(rx_dropped), 194 STAT_NETDEV(tx_dropped), 195 STAT_NETDEV(multicast), 196 /* UniMAC RSV counters */ 197 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), 198 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), 199 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), 200 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), 201 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), 202 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), 203 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), 204 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), 205 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), 206 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), 207 STAT_MIB_RX("rx_pkts", mib.rx.pkt), 208 STAT_MIB_RX("rx_bytes", mib.rx.bytes), 209 STAT_MIB_RX("rx_multicast", mib.rx.mca), 210 STAT_MIB_RX("rx_broadcast", mib.rx.bca), 211 STAT_MIB_RX("rx_fcs", mib.rx.fcs), 212 STAT_MIB_RX("rx_control", mib.rx.cf), 213 STAT_MIB_RX("rx_pause", mib.rx.pf), 214 STAT_MIB_RX("rx_unknown", mib.rx.uo), 215 STAT_MIB_RX("rx_align", mib.rx.aln), 216 STAT_MIB_RX("rx_outrange", mib.rx.flr), 217 STAT_MIB_RX("rx_code", mib.rx.cde), 218 STAT_MIB_RX("rx_carrier", mib.rx.fcr), 219 STAT_MIB_RX("rx_oversize", mib.rx.ovr), 220 STAT_MIB_RX("rx_jabber", mib.rx.jbr), 221 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue), 222 STAT_MIB_RX("rx_good_pkts", mib.rx.pok), 223 STAT_MIB_RX("rx_unicast", mib.rx.uc), 224 STAT_MIB_RX("rx_ppp", mib.rx.ppp), 225 STAT_MIB_RX("rx_crc", mib.rx.rcrc), 226 /* UniMAC TSV counters */ 227 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), 228 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), 229 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), 230 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), 231 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), 232 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), 233 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), 234 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), 235 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), 236 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), 237 STAT_MIB_TX("tx_pkts", mib.tx.pkts), 238 STAT_MIB_TX("tx_multicast", mib.tx.mca), 239 STAT_MIB_TX("tx_broadcast", mib.tx.bca), 240 STAT_MIB_TX("tx_pause", mib.tx.pf), 241 STAT_MIB_TX("tx_control", mib.tx.cf), 242 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs), 243 STAT_MIB_TX("tx_oversize", mib.tx.ovr), 244 STAT_MIB_TX("tx_defer", mib.tx.drf), 245 STAT_MIB_TX("tx_excess_defer", mib.tx.edf), 246 STAT_MIB_TX("tx_single_col", mib.tx.scl), 247 STAT_MIB_TX("tx_multi_col", mib.tx.mcl), 248 STAT_MIB_TX("tx_late_col", mib.tx.lcl), 249 STAT_MIB_TX("tx_excess_col", mib.tx.ecl), 250 STAT_MIB_TX("tx_frags", mib.tx.frg), 251 STAT_MIB_TX("tx_total_col", mib.tx.ncl), 252 STAT_MIB_TX("tx_jabber", mib.tx.jbr), 253 STAT_MIB_TX("tx_bytes", mib.tx.bytes), 254 STAT_MIB_TX("tx_good_pkts", mib.tx.pok), 255 STAT_MIB_TX("tx_unicast", mib.tx.uc), 256 /* UniMAC RUNT counters */ 257 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt), 258 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), 259 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), 260 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes), 261 /* RXCHK misc statistics */ 262 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR), 263 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc, 264 RXCHK_OTHER_DISC_CNTR), 265 /* RBUF misc statistics */ 266 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), 267 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), 268 }; 269 270 #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) 271 272 static void bcm_sysport_get_drvinfo(struct net_device *dev, 273 struct ethtool_drvinfo *info) 274 { 275 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 276 strlcpy(info->version, "0.1", sizeof(info->version)); 277 strlcpy(info->bus_info, "platform", sizeof(info->bus_info)); 278 info->n_stats = BCM_SYSPORT_STATS_LEN; 279 } 280 281 static u32 bcm_sysport_get_msglvl(struct net_device *dev) 282 { 283 struct bcm_sysport_priv *priv = netdev_priv(dev); 284 285 return priv->msg_enable; 286 } 287 288 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable) 289 { 290 struct bcm_sysport_priv *priv = netdev_priv(dev); 291 292 priv->msg_enable = enable; 293 } 294 295 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) 296 { 297 switch (string_set) { 298 case ETH_SS_STATS: 299 return BCM_SYSPORT_STATS_LEN; 300 default: 301 return -EOPNOTSUPP; 302 } 303 } 304 305 static void bcm_sysport_get_strings(struct net_device *dev, 306 u32 stringset, u8 *data) 307 { 308 int i; 309 310 switch (stringset) { 311 case ETH_SS_STATS: 312 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 313 memcpy(data + i * ETH_GSTRING_LEN, 314 bcm_sysport_gstrings_stats[i].stat_string, 315 ETH_GSTRING_LEN); 316 } 317 break; 318 default: 319 break; 320 } 321 } 322 323 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) 324 { 325 int i, j = 0; 326 327 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 328 const struct bcm_sysport_stats *s; 329 u8 offset = 0; 330 u32 val = 0; 331 char *p; 332 333 s = &bcm_sysport_gstrings_stats[i]; 334 switch (s->type) { 335 case BCM_SYSPORT_STAT_NETDEV: 336 continue; 337 case BCM_SYSPORT_STAT_MIB_RX: 338 case BCM_SYSPORT_STAT_MIB_TX: 339 case BCM_SYSPORT_STAT_RUNT: 340 if (s->type != BCM_SYSPORT_STAT_MIB_RX) 341 offset = UMAC_MIB_STAT_OFFSET; 342 val = umac_readl(priv, UMAC_MIB_START + j + offset); 343 break; 344 case BCM_SYSPORT_STAT_RXCHK: 345 val = rxchk_readl(priv, s->reg_offset); 346 if (val == ~0) 347 rxchk_writel(priv, 0, s->reg_offset); 348 break; 349 case BCM_SYSPORT_STAT_RBUF: 350 val = rbuf_readl(priv, s->reg_offset); 351 if (val == ~0) 352 rbuf_writel(priv, 0, s->reg_offset); 353 break; 354 } 355 356 j += s->stat_sizeof; 357 p = (char *)priv + s->stat_offset; 358 *(u32 *)p = val; 359 } 360 361 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); 362 } 363 364 static void bcm_sysport_get_stats(struct net_device *dev, 365 struct ethtool_stats *stats, u64 *data) 366 { 367 struct bcm_sysport_priv *priv = netdev_priv(dev); 368 int i; 369 370 if (netif_running(dev)) 371 bcm_sysport_update_mib_counters(priv); 372 373 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 374 const struct bcm_sysport_stats *s; 375 char *p; 376 377 s = &bcm_sysport_gstrings_stats[i]; 378 if (s->type == BCM_SYSPORT_STAT_NETDEV) 379 p = (char *)&dev->stats; 380 else 381 p = (char *)priv; 382 p += s->stat_offset; 383 data[i] = *(u32 *)p; 384 } 385 } 386 387 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) 388 { 389 dev_kfree_skb_any(cb->skb); 390 cb->skb = NULL; 391 dma_unmap_addr_set(cb, dma_addr, 0); 392 } 393 394 static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, 395 struct bcm_sysport_cb *cb) 396 { 397 struct device *kdev = &priv->pdev->dev; 398 struct net_device *ndev = priv->netdev; 399 dma_addr_t mapping; 400 int ret; 401 402 cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); 403 if (!cb->skb) { 404 netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); 405 return -ENOMEM; 406 } 407 408 mapping = dma_map_single(kdev, cb->skb->data, 409 RX_BUF_LENGTH, DMA_FROM_DEVICE); 410 ret = dma_mapping_error(kdev, mapping); 411 if (ret) { 412 bcm_sysport_free_cb(cb); 413 netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); 414 return ret; 415 } 416 417 dma_unmap_addr_set(cb, dma_addr, mapping); 418 dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping); 419 420 priv->rx_bd_assign_index++; 421 priv->rx_bd_assign_index &= (priv->num_rx_bds - 1); 422 priv->rx_bd_assign_ptr = priv->rx_bds + 423 (priv->rx_bd_assign_index * DESC_SIZE); 424 425 netif_dbg(priv, rx_status, ndev, "RX refill\n"); 426 427 return 0; 428 } 429 430 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) 431 { 432 struct bcm_sysport_cb *cb; 433 int ret = 0; 434 unsigned int i; 435 436 for (i = 0; i < priv->num_rx_bds; i++) { 437 cb = &priv->rx_cbs[priv->rx_bd_assign_index]; 438 if (cb->skb) 439 continue; 440 441 ret = bcm_sysport_rx_refill(priv, cb); 442 if (ret) 443 break; 444 } 445 446 return ret; 447 } 448 449 /* Poll the hardware for up to budget packets to process */ 450 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, 451 unsigned int budget) 452 { 453 struct device *kdev = &priv->pdev->dev; 454 struct net_device *ndev = priv->netdev; 455 unsigned int processed = 0, to_process; 456 struct bcm_sysport_cb *cb; 457 struct sk_buff *skb; 458 unsigned int p_index; 459 u16 len, status; 460 struct bcm_rsb *rsb; 461 462 /* Determine how much we should process since last call */ 463 p_index = rdma_readl(priv, RDMA_PROD_INDEX); 464 p_index &= RDMA_PROD_INDEX_MASK; 465 466 if (p_index < priv->rx_c_index) 467 to_process = (RDMA_CONS_INDEX_MASK + 1) - 468 priv->rx_c_index + p_index; 469 else 470 to_process = p_index - priv->rx_c_index; 471 472 netif_dbg(priv, rx_status, ndev, 473 "p_index=%d rx_c_index=%d to_process=%d\n", 474 p_index, priv->rx_c_index, to_process); 475 476 while ((processed < to_process) && 477 (processed < budget)) { 478 479 cb = &priv->rx_cbs[priv->rx_read_ptr]; 480 skb = cb->skb; 481 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 482 RX_BUF_LENGTH, DMA_FROM_DEVICE); 483 484 /* Extract the Receive Status Block prepended */ 485 rsb = (struct bcm_rsb *)skb->data; 486 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; 487 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & 488 DESC_STATUS_MASK; 489 490 processed++; 491 priv->rx_read_ptr++; 492 if (priv->rx_read_ptr == priv->num_rx_bds) 493 priv->rx_read_ptr = 0; 494 495 netif_dbg(priv, rx_status, ndev, 496 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", 497 p_index, priv->rx_c_index, priv->rx_read_ptr, 498 len, status); 499 500 if (unlikely(!skb)) { 501 netif_err(priv, rx_err, ndev, "out of memory!\n"); 502 ndev->stats.rx_dropped++; 503 ndev->stats.rx_errors++; 504 goto refill; 505 } 506 507 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { 508 netif_err(priv, rx_status, ndev, "fragmented packet!\n"); 509 ndev->stats.rx_dropped++; 510 ndev->stats.rx_errors++; 511 bcm_sysport_free_cb(cb); 512 goto refill; 513 } 514 515 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) { 516 netif_err(priv, rx_err, ndev, "error packet\n"); 517 if (status & RX_STATUS_OVFLOW) 518 ndev->stats.rx_over_errors++; 519 ndev->stats.rx_dropped++; 520 ndev->stats.rx_errors++; 521 bcm_sysport_free_cb(cb); 522 goto refill; 523 } 524 525 skb_put(skb, len); 526 527 /* Hardware validated our checksum */ 528 if (likely(status & DESC_L4_CSUM)) 529 skb->ip_summed = CHECKSUM_UNNECESSARY; 530 531 /* Hardware pre-pends packets with 2bytes before Ethernet 532 * header plus we have the Receive Status Block, strip off all 533 * of this from the SKB. 534 */ 535 skb_pull(skb, sizeof(*rsb) + 2); 536 len -= (sizeof(*rsb) + 2); 537 538 /* UniMAC may forward CRC */ 539 if (priv->crc_fwd) { 540 skb_trim(skb, len - ETH_FCS_LEN); 541 len -= ETH_FCS_LEN; 542 } 543 544 skb->protocol = eth_type_trans(skb, ndev); 545 ndev->stats.rx_packets++; 546 ndev->stats.rx_bytes += len; 547 548 napi_gro_receive(&priv->napi, skb); 549 refill: 550 bcm_sysport_rx_refill(priv, cb); 551 } 552 553 return processed; 554 } 555 556 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv, 557 struct bcm_sysport_cb *cb, 558 unsigned int *bytes_compl, 559 unsigned int *pkts_compl) 560 { 561 struct device *kdev = &priv->pdev->dev; 562 struct net_device *ndev = priv->netdev; 563 564 if (cb->skb) { 565 ndev->stats.tx_bytes += cb->skb->len; 566 *bytes_compl += cb->skb->len; 567 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 568 dma_unmap_len(cb, dma_len), 569 DMA_TO_DEVICE); 570 ndev->stats.tx_packets++; 571 (*pkts_compl)++; 572 bcm_sysport_free_cb(cb); 573 /* SKB fragment */ 574 } else if (dma_unmap_addr(cb, dma_addr)) { 575 ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len); 576 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr), 577 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); 578 dma_unmap_addr_set(cb, dma_addr, 0); 579 } 580 } 581 582 /* Reclaim queued SKBs for transmission completion, lockless version */ 583 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 584 struct bcm_sysport_tx_ring *ring) 585 { 586 struct net_device *ndev = priv->netdev; 587 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; 588 unsigned int pkts_compl = 0, bytes_compl = 0; 589 struct bcm_sysport_cb *cb; 590 struct netdev_queue *txq; 591 u32 hw_ind; 592 593 txq = netdev_get_tx_queue(ndev, ring->index); 594 595 /* Compute how many descriptors have been processed since last call */ 596 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); 597 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; 598 ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); 599 600 last_c_index = ring->c_index; 601 num_tx_cbs = ring->size; 602 603 c_index &= (num_tx_cbs - 1); 604 605 if (c_index >= last_c_index) 606 last_tx_cn = c_index - last_c_index; 607 else 608 last_tx_cn = num_tx_cbs - last_c_index + c_index; 609 610 netif_dbg(priv, tx_done, ndev, 611 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", 612 ring->index, c_index, last_tx_cn, last_c_index); 613 614 while (last_tx_cn-- > 0) { 615 cb = ring->cbs + last_c_index; 616 bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl); 617 618 ring->desc_count++; 619 last_c_index++; 620 last_c_index &= (num_tx_cbs - 1); 621 } 622 623 ring->c_index = c_index; 624 625 if (netif_tx_queue_stopped(txq) && pkts_compl) 626 netif_tx_wake_queue(txq); 627 628 netif_dbg(priv, tx_done, ndev, 629 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", 630 ring->index, ring->c_index, pkts_compl, bytes_compl); 631 632 return pkts_compl; 633 } 634 635 /* Locked version of the per-ring TX reclaim routine */ 636 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 637 struct bcm_sysport_tx_ring *ring) 638 { 639 unsigned int released; 640 unsigned long flags; 641 642 spin_lock_irqsave(&ring->lock, flags); 643 released = __bcm_sysport_tx_reclaim(priv, ring); 644 spin_unlock_irqrestore(&ring->lock, flags); 645 646 return released; 647 } 648 649 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) 650 { 651 struct bcm_sysport_tx_ring *ring = 652 container_of(napi, struct bcm_sysport_tx_ring, napi); 653 unsigned int work_done = 0; 654 655 work_done = bcm_sysport_tx_reclaim(ring->priv, ring); 656 657 if (work_done < budget) { 658 napi_complete(napi); 659 /* re-enable TX interrupt */ 660 intrl2_1_mask_clear(ring->priv, BIT(ring->index)); 661 } 662 663 return work_done; 664 } 665 666 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) 667 { 668 unsigned int q; 669 670 for (q = 0; q < priv->netdev->num_tx_queues; q++) 671 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]); 672 } 673 674 static int bcm_sysport_poll(struct napi_struct *napi, int budget) 675 { 676 struct bcm_sysport_priv *priv = 677 container_of(napi, struct bcm_sysport_priv, napi); 678 unsigned int work_done = 0; 679 680 work_done = bcm_sysport_desc_rx(priv, budget); 681 682 priv->rx_c_index += work_done; 683 priv->rx_c_index &= RDMA_CONS_INDEX_MASK; 684 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); 685 686 if (work_done < budget) { 687 napi_complete(napi); 688 /* re-enable RX interrupts */ 689 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE); 690 } 691 692 return work_done; 693 } 694 695 696 /* RX and misc interrupt routine */ 697 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) 698 { 699 struct net_device *dev = dev_id; 700 struct bcm_sysport_priv *priv = netdev_priv(dev); 701 702 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & 703 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); 704 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 705 706 if (unlikely(priv->irq0_stat == 0)) { 707 netdev_warn(priv->netdev, "spurious RX interrupt\n"); 708 return IRQ_NONE; 709 } 710 711 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) { 712 if (likely(napi_schedule_prep(&priv->napi))) { 713 /* disable RX interrupts */ 714 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE); 715 __napi_schedule(&priv->napi); 716 } 717 } 718 719 /* TX ring is full, perform a full reclaim since we do not know 720 * which one would trigger this interrupt 721 */ 722 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL) 723 bcm_sysport_tx_reclaim_all(priv); 724 725 return IRQ_HANDLED; 726 } 727 728 /* TX interrupt service routine */ 729 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id) 730 { 731 struct net_device *dev = dev_id; 732 struct bcm_sysport_priv *priv = netdev_priv(dev); 733 struct bcm_sysport_tx_ring *txr; 734 unsigned int ring; 735 736 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & 737 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); 738 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 739 740 if (unlikely(priv->irq1_stat == 0)) { 741 netdev_warn(priv->netdev, "spurious TX interrupt\n"); 742 return IRQ_NONE; 743 } 744 745 for (ring = 0; ring < dev->num_tx_queues; ring++) { 746 if (!(priv->irq1_stat & BIT(ring))) 747 continue; 748 749 txr = &priv->tx_rings[ring]; 750 751 if (likely(napi_schedule_prep(&txr->napi))) { 752 intrl2_1_mask_set(priv, BIT(ring)); 753 __napi_schedule(&txr->napi); 754 } 755 } 756 757 return IRQ_HANDLED; 758 } 759 760 static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) 761 { 762 struct sk_buff *nskb; 763 struct bcm_tsb *tsb; 764 u32 csum_info; 765 u8 ip_proto; 766 u16 csum_start; 767 u16 ip_ver; 768 769 /* Re-allocate SKB if needed */ 770 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) { 771 nskb = skb_realloc_headroom(skb, sizeof(*tsb)); 772 dev_kfree_skb(skb); 773 if (!nskb) { 774 dev->stats.tx_errors++; 775 dev->stats.tx_dropped++; 776 return -ENOMEM; 777 } 778 skb = nskb; 779 } 780 781 tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb)); 782 /* Zero-out TSB by default */ 783 memset(tsb, 0, sizeof(*tsb)); 784 785 if (skb->ip_summed == CHECKSUM_PARTIAL) { 786 ip_ver = htons(skb->protocol); 787 switch (ip_ver) { 788 case ETH_P_IP: 789 ip_proto = ip_hdr(skb)->protocol; 790 break; 791 case ETH_P_IPV6: 792 ip_proto = ipv6_hdr(skb)->nexthdr; 793 break; 794 default: 795 return 0; 796 } 797 798 /* Get the checksum offset and the L4 (transport) offset */ 799 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb); 800 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK; 801 csum_info |= (csum_start << L4_PTR_SHIFT); 802 803 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { 804 csum_info |= L4_LENGTH_VALID; 805 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) 806 csum_info |= L4_UDP; 807 } else 808 csum_info = 0; 809 810 tsb->l4_ptr_dest_map = csum_info; 811 } 812 813 return 0; 814 } 815 816 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, 817 struct net_device *dev) 818 { 819 struct bcm_sysport_priv *priv = netdev_priv(dev); 820 struct device *kdev = &priv->pdev->dev; 821 struct bcm_sysport_tx_ring *ring; 822 struct bcm_sysport_cb *cb; 823 struct netdev_queue *txq; 824 struct dma_desc *desc; 825 unsigned int skb_len; 826 unsigned long flags; 827 dma_addr_t mapping; 828 u32 len_status; 829 u16 queue; 830 int ret; 831 832 queue = skb_get_queue_mapping(skb); 833 txq = netdev_get_tx_queue(dev, queue); 834 ring = &priv->tx_rings[queue]; 835 836 /* lock against tx reclaim in BH context and TX ring full interrupt */ 837 spin_lock_irqsave(&ring->lock, flags); 838 if (unlikely(ring->desc_count == 0)) { 839 netif_tx_stop_queue(txq); 840 netdev_err(dev, "queue %d awake and ring full!\n", queue); 841 ret = NETDEV_TX_BUSY; 842 goto out; 843 } 844 845 /* Insert TSB and checksum infos */ 846 if (priv->tsb_en) { 847 ret = bcm_sysport_insert_tsb(skb, dev); 848 if (ret) { 849 ret = NETDEV_TX_OK; 850 goto out; 851 } 852 } 853 854 /* The Ethernet switch we are interfaced with needs packets to be at 855 * least 64 bytes (including FCS) otherwise they will be discarded when 856 * they enter the switch port logic. When Broadcom tags are enabled, we 857 * need to make sure that packets are at least 68 bytes 858 * (including FCS and tag) because the length verification is done after 859 * the Broadcom tag is stripped off the ingress packet. 860 */ 861 if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) { 862 ret = NETDEV_TX_OK; 863 goto out; 864 } 865 866 skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ? 867 ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len; 868 869 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); 870 if (dma_mapping_error(kdev, mapping)) { 871 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n", 872 skb->data, skb_len); 873 ret = NETDEV_TX_OK; 874 goto out; 875 } 876 877 /* Remember the SKB for future freeing */ 878 cb = &ring->cbs[ring->curr_desc]; 879 cb->skb = skb; 880 dma_unmap_addr_set(cb, dma_addr, mapping); 881 dma_unmap_len_set(cb, dma_len, skb_len); 882 883 /* Fetch a descriptor entry from our pool */ 884 desc = ring->desc_cpu; 885 886 desc->addr_lo = lower_32_bits(mapping); 887 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK; 888 len_status |= (skb_len << DESC_LEN_SHIFT); 889 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) << 890 DESC_STATUS_SHIFT; 891 if (skb->ip_summed == CHECKSUM_PARTIAL) 892 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT); 893 894 ring->curr_desc++; 895 if (ring->curr_desc == ring->size) 896 ring->curr_desc = 0; 897 ring->desc_count--; 898 899 /* Ensure write completion of the descriptor status/length 900 * in DRAM before the System Port WRITE_PORT register latches 901 * the value 902 */ 903 wmb(); 904 desc->addr_status_len = len_status; 905 wmb(); 906 907 /* Write this descriptor address to the RING write port */ 908 tdma_port_write_desc_addr(priv, desc, ring->index); 909 910 /* Check ring space and update SW control flow */ 911 if (ring->desc_count == 0) 912 netif_tx_stop_queue(txq); 913 914 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n", 915 ring->index, ring->desc_count, ring->curr_desc); 916 917 ret = NETDEV_TX_OK; 918 out: 919 spin_unlock_irqrestore(&ring->lock, flags); 920 return ret; 921 } 922 923 static void bcm_sysport_tx_timeout(struct net_device *dev) 924 { 925 netdev_warn(dev, "transmit timeout!\n"); 926 927 dev->trans_start = jiffies; 928 dev->stats.tx_errors++; 929 930 netif_tx_wake_all_queues(dev); 931 } 932 933 /* phylib adjust link callback */ 934 static void bcm_sysport_adj_link(struct net_device *dev) 935 { 936 struct bcm_sysport_priv *priv = netdev_priv(dev); 937 struct phy_device *phydev = priv->phydev; 938 unsigned int changed = 0; 939 u32 cmd_bits = 0, reg; 940 941 if (priv->old_link != phydev->link) { 942 changed = 1; 943 priv->old_link = phydev->link; 944 } 945 946 if (priv->old_duplex != phydev->duplex) { 947 changed = 1; 948 priv->old_duplex = phydev->duplex; 949 } 950 951 switch (phydev->speed) { 952 case SPEED_2500: 953 cmd_bits = CMD_SPEED_2500; 954 break; 955 case SPEED_1000: 956 cmd_bits = CMD_SPEED_1000; 957 break; 958 case SPEED_100: 959 cmd_bits = CMD_SPEED_100; 960 break; 961 case SPEED_10: 962 cmd_bits = CMD_SPEED_10; 963 break; 964 default: 965 break; 966 } 967 cmd_bits <<= CMD_SPEED_SHIFT; 968 969 if (phydev->duplex == DUPLEX_HALF) 970 cmd_bits |= CMD_HD_EN; 971 972 if (priv->old_pause != phydev->pause) { 973 changed = 1; 974 priv->old_pause = phydev->pause; 975 } 976 977 if (!phydev->pause) 978 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; 979 980 if (changed) { 981 reg = umac_readl(priv, UMAC_CMD); 982 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | 983 CMD_HD_EN | CMD_RX_PAUSE_IGNORE | 984 CMD_TX_PAUSE_IGNORE); 985 reg |= cmd_bits; 986 umac_writel(priv, reg, UMAC_CMD); 987 988 phy_print_status(priv->phydev); 989 } 990 } 991 992 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, 993 unsigned int index) 994 { 995 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; 996 struct device *kdev = &priv->pdev->dev; 997 size_t size; 998 void *p; 999 u32 reg; 1000 1001 /* Simple descriptors partitioning for now */ 1002 size = 256; 1003 1004 /* We just need one DMA descriptor which is DMA-able, since writing to 1005 * the port will allocate a new descriptor in its internal linked-list 1006 */ 1007 p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL); 1008 if (!p) { 1009 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); 1010 return -ENOMEM; 1011 } 1012 1013 ring->cbs = kzalloc(sizeof(struct bcm_sysport_cb) * size, GFP_KERNEL); 1014 if (!ring->cbs) { 1015 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1016 return -ENOMEM; 1017 } 1018 1019 /* Initialize SW view of the ring */ 1020 spin_lock_init(&ring->lock); 1021 ring->priv = priv; 1022 netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); 1023 ring->index = index; 1024 ring->size = size; 1025 ring->alloc_size = ring->size; 1026 ring->desc_cpu = p; 1027 ring->desc_count = ring->size; 1028 ring->curr_desc = 0; 1029 1030 /* Initialize HW ring */ 1031 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index)); 1032 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index)); 1033 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index)); 1034 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index)); 1035 tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index)); 1036 tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index)); 1037 1038 /* Program the number of descriptors as MAX_THRESHOLD and half of 1039 * its size for the hysteresis trigger 1040 */ 1041 tdma_writel(priv, ring->size | 1042 1 << RING_HYST_THRESH_SHIFT, 1043 TDMA_DESC_RING_MAX_HYST(index)); 1044 1045 /* Enable the ring queue in the arbiter */ 1046 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN); 1047 reg |= (1 << index); 1048 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN); 1049 1050 napi_enable(&ring->napi); 1051 1052 netif_dbg(priv, hw, priv->netdev, 1053 "TDMA cfg, size=%d, desc_cpu=%p\n", 1054 ring->size, ring->desc_cpu); 1055 1056 return 0; 1057 } 1058 1059 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, 1060 unsigned int index) 1061 { 1062 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; 1063 struct device *kdev = &priv->pdev->dev; 1064 u32 reg; 1065 1066 /* Caller should stop the TDMA engine */ 1067 reg = tdma_readl(priv, TDMA_STATUS); 1068 if (!(reg & TDMA_DISABLED)) 1069 netdev_warn(priv->netdev, "TDMA not stopped!\n"); 1070 1071 napi_disable(&ring->napi); 1072 netif_napi_del(&ring->napi); 1073 1074 bcm_sysport_tx_reclaim(priv, ring); 1075 1076 kfree(ring->cbs); 1077 ring->cbs = NULL; 1078 1079 if (ring->desc_dma) { 1080 dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma); 1081 ring->desc_dma = 0; 1082 } 1083 ring->size = 0; 1084 ring->alloc_size = 0; 1085 1086 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n"); 1087 } 1088 1089 /* RDMA helper */ 1090 static inline int rdma_enable_set(struct bcm_sysport_priv *priv, 1091 unsigned int enable) 1092 { 1093 unsigned int timeout = 1000; 1094 u32 reg; 1095 1096 reg = rdma_readl(priv, RDMA_CONTROL); 1097 if (enable) 1098 reg |= RDMA_EN; 1099 else 1100 reg &= ~RDMA_EN; 1101 rdma_writel(priv, reg, RDMA_CONTROL); 1102 1103 /* Poll for RMDA disabling completion */ 1104 do { 1105 reg = rdma_readl(priv, RDMA_STATUS); 1106 if (!!(reg & RDMA_DISABLED) == !enable) 1107 return 0; 1108 usleep_range(1000, 2000); 1109 } while (timeout-- > 0); 1110 1111 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n"); 1112 1113 return -ETIMEDOUT; 1114 } 1115 1116 /* TDMA helper */ 1117 static inline int tdma_enable_set(struct bcm_sysport_priv *priv, 1118 unsigned int enable) 1119 { 1120 unsigned int timeout = 1000; 1121 u32 reg; 1122 1123 reg = tdma_readl(priv, TDMA_CONTROL); 1124 if (enable) 1125 reg |= TDMA_EN; 1126 else 1127 reg &= ~TDMA_EN; 1128 tdma_writel(priv, reg, TDMA_CONTROL); 1129 1130 /* Poll for TMDA disabling completion */ 1131 do { 1132 reg = tdma_readl(priv, TDMA_STATUS); 1133 if (!!(reg & TDMA_DISABLED) == !enable) 1134 return 0; 1135 1136 usleep_range(1000, 2000); 1137 } while (timeout-- > 0); 1138 1139 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n"); 1140 1141 return -ETIMEDOUT; 1142 } 1143 1144 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) 1145 { 1146 u32 reg; 1147 int ret; 1148 1149 /* Initialize SW view of the RX ring */ 1150 priv->num_rx_bds = NUM_RX_DESC; 1151 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; 1152 priv->rx_bd_assign_ptr = priv->rx_bds; 1153 priv->rx_bd_assign_index = 0; 1154 priv->rx_c_index = 0; 1155 priv->rx_read_ptr = 0; 1156 priv->rx_cbs = kzalloc(priv->num_rx_bds * 1157 sizeof(struct bcm_sysport_cb), GFP_KERNEL); 1158 if (!priv->rx_cbs) { 1159 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1160 return -ENOMEM; 1161 } 1162 1163 ret = bcm_sysport_alloc_rx_bufs(priv); 1164 if (ret) { 1165 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n"); 1166 return ret; 1167 } 1168 1169 /* Initialize HW, ensure RDMA is disabled */ 1170 reg = rdma_readl(priv, RDMA_STATUS); 1171 if (!(reg & RDMA_DISABLED)) 1172 rdma_enable_set(priv, 0); 1173 1174 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO); 1175 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI); 1176 rdma_writel(priv, 0, RDMA_PROD_INDEX); 1177 rdma_writel(priv, 0, RDMA_CONS_INDEX); 1178 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT | 1179 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE); 1180 /* Operate the queue in ring mode */ 1181 rdma_writel(priv, 0, RDMA_START_ADDR_HI); 1182 rdma_writel(priv, 0, RDMA_START_ADDR_LO); 1183 rdma_writel(priv, 0, RDMA_END_ADDR_HI); 1184 rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO); 1185 1186 rdma_writel(priv, 1, RDMA_MBDONE_INTR); 1187 1188 netif_dbg(priv, hw, priv->netdev, 1189 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n", 1190 priv->num_rx_bds, priv->rx_bds); 1191 1192 return 0; 1193 } 1194 1195 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv) 1196 { 1197 struct bcm_sysport_cb *cb; 1198 unsigned int i; 1199 u32 reg; 1200 1201 /* Caller should ensure RDMA is disabled */ 1202 reg = rdma_readl(priv, RDMA_STATUS); 1203 if (!(reg & RDMA_DISABLED)) 1204 netdev_warn(priv->netdev, "RDMA not stopped!\n"); 1205 1206 for (i = 0; i < priv->num_rx_bds; i++) { 1207 cb = &priv->rx_cbs[i]; 1208 if (dma_unmap_addr(cb, dma_addr)) 1209 dma_unmap_single(&priv->pdev->dev, 1210 dma_unmap_addr(cb, dma_addr), 1211 RX_BUF_LENGTH, DMA_FROM_DEVICE); 1212 bcm_sysport_free_cb(cb); 1213 } 1214 1215 kfree(priv->rx_cbs); 1216 priv->rx_cbs = NULL; 1217 1218 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n"); 1219 } 1220 1221 static void bcm_sysport_set_rx_mode(struct net_device *dev) 1222 { 1223 struct bcm_sysport_priv *priv = netdev_priv(dev); 1224 u32 reg; 1225 1226 reg = umac_readl(priv, UMAC_CMD); 1227 if (dev->flags & IFF_PROMISC) 1228 reg |= CMD_PROMISC; 1229 else 1230 reg &= ~CMD_PROMISC; 1231 umac_writel(priv, reg, UMAC_CMD); 1232 1233 /* No support for ALLMULTI */ 1234 if (dev->flags & IFF_ALLMULTI) 1235 return; 1236 } 1237 1238 static inline void umac_enable_set(struct bcm_sysport_priv *priv, 1239 unsigned int enable) 1240 { 1241 u32 reg; 1242 1243 reg = umac_readl(priv, UMAC_CMD); 1244 if (enable) 1245 reg |= CMD_RX_EN | CMD_TX_EN; 1246 else 1247 reg &= ~(CMD_RX_EN | CMD_TX_EN); 1248 umac_writel(priv, reg, UMAC_CMD); 1249 1250 /* UniMAC stops on a packet boundary, wait for a full-sized packet 1251 * to be processed (1 msec). 1252 */ 1253 if (enable == 0) 1254 usleep_range(1000, 2000); 1255 } 1256 1257 static inline int umac_reset(struct bcm_sysport_priv *priv) 1258 { 1259 unsigned int timeout = 0; 1260 u32 reg; 1261 int ret = 0; 1262 1263 umac_writel(priv, 0, UMAC_CMD); 1264 while (timeout++ < 1000) { 1265 reg = umac_readl(priv, UMAC_CMD); 1266 if (!(reg & CMD_SW_RESET)) 1267 break; 1268 1269 udelay(1); 1270 } 1271 1272 if (timeout == 1000) { 1273 dev_err(&priv->pdev->dev, 1274 "timeout waiting for MAC to come out of reset\n"); 1275 ret = -ETIMEDOUT; 1276 } 1277 1278 return ret; 1279 } 1280 1281 static void umac_set_hw_addr(struct bcm_sysport_priv *priv, 1282 unsigned char *addr) 1283 { 1284 umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | 1285 (addr[2] << 8) | addr[3], UMAC_MAC0); 1286 umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); 1287 } 1288 1289 static void topctrl_flush(struct bcm_sysport_priv *priv) 1290 { 1291 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); 1292 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); 1293 mdelay(1); 1294 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 1295 topctrl_writel(priv, 0, TX_FLUSH_CNTL); 1296 } 1297 1298 static int bcm_sysport_open(struct net_device *dev) 1299 { 1300 struct bcm_sysport_priv *priv = netdev_priv(dev); 1301 unsigned int i; 1302 u32 reg; 1303 int ret; 1304 1305 /* Reset UniMAC */ 1306 ret = umac_reset(priv); 1307 if (ret) { 1308 netdev_err(dev, "UniMAC reset failed\n"); 1309 return ret; 1310 } 1311 1312 /* Flush TX and RX FIFOs at TOPCTRL level */ 1313 topctrl_flush(priv); 1314 1315 /* Disable the UniMAC RX/TX */ 1316 umac_enable_set(priv, 0); 1317 1318 /* Enable RBUF 2bytes alignment and Receive Status Block */ 1319 reg = rbuf_readl(priv, RBUF_CONTROL); 1320 reg |= RBUF_4B_ALGN | RBUF_RSB_EN; 1321 rbuf_writel(priv, reg, RBUF_CONTROL); 1322 1323 /* Set maximum frame length */ 1324 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 1325 1326 /* Set MAC address */ 1327 umac_set_hw_addr(priv, dev->dev_addr); 1328 1329 /* Read CRC forward */ 1330 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); 1331 1332 priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 1333 0, priv->phy_interface); 1334 if (!priv->phydev) { 1335 netdev_err(dev, "could not attach to PHY\n"); 1336 return -ENODEV; 1337 } 1338 1339 /* Reset house keeping link status */ 1340 priv->old_duplex = -1; 1341 priv->old_link = -1; 1342 priv->old_pause = -1; 1343 1344 /* mask all interrupts and request them */ 1345 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); 1346 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1347 intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); 1348 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); 1349 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1350 intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); 1351 1352 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev); 1353 if (ret) { 1354 netdev_err(dev, "failed to request RX interrupt\n"); 1355 goto out_phy_disconnect; 1356 } 1357 1358 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev); 1359 if (ret) { 1360 netdev_err(dev, "failed to request TX interrupt\n"); 1361 goto out_free_irq0; 1362 } 1363 1364 /* Initialize both hardware and software ring */ 1365 for (i = 0; i < dev->num_tx_queues; i++) { 1366 ret = bcm_sysport_init_tx_ring(priv, i); 1367 if (ret) { 1368 netdev_err(dev, "failed to initialize TX ring %d\n", 1369 i); 1370 goto out_free_tx_ring; 1371 } 1372 } 1373 1374 /* Initialize linked-list */ 1375 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); 1376 1377 /* Initialize RX ring */ 1378 ret = bcm_sysport_init_rx_ring(priv); 1379 if (ret) { 1380 netdev_err(dev, "failed to initialize RX ring\n"); 1381 goto out_free_rx_ring; 1382 } 1383 1384 /* Turn on RDMA */ 1385 ret = rdma_enable_set(priv, 1); 1386 if (ret) 1387 goto out_free_rx_ring; 1388 1389 /* Enable RX interrupt and TX ring full interrupt */ 1390 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 1391 1392 /* Turn on TDMA */ 1393 ret = tdma_enable_set(priv, 1); 1394 if (ret) 1395 goto out_clear_rx_int; 1396 1397 /* Enable NAPI */ 1398 napi_enable(&priv->napi); 1399 1400 /* Turn on UniMAC TX/RX */ 1401 umac_enable_set(priv, 1); 1402 1403 phy_start(priv->phydev); 1404 1405 /* Enable TX interrupts for the 32 TXQs */ 1406 intrl2_1_mask_clear(priv, 0xffffffff); 1407 1408 /* Last call before we start the real business */ 1409 netif_tx_start_all_queues(dev); 1410 1411 return 0; 1412 1413 out_clear_rx_int: 1414 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 1415 out_free_rx_ring: 1416 bcm_sysport_fini_rx_ring(priv); 1417 out_free_tx_ring: 1418 for (i = 0; i < dev->num_tx_queues; i++) 1419 bcm_sysport_fini_tx_ring(priv, i); 1420 free_irq(priv->irq1, dev); 1421 out_free_irq0: 1422 free_irq(priv->irq0, dev); 1423 out_phy_disconnect: 1424 phy_disconnect(priv->phydev); 1425 return ret; 1426 } 1427 1428 static int bcm_sysport_stop(struct net_device *dev) 1429 { 1430 struct bcm_sysport_priv *priv = netdev_priv(dev); 1431 unsigned int i; 1432 u32 reg; 1433 int ret; 1434 1435 /* stop all software from updating hardware */ 1436 netif_tx_stop_all_queues(dev); 1437 napi_disable(&priv->napi); 1438 phy_stop(priv->phydev); 1439 1440 /* mask all interrupts */ 1441 intrl2_0_mask_set(priv, 0xffffffff); 1442 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1443 intrl2_1_mask_set(priv, 0xffffffff); 1444 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1445 1446 /* Disable UniMAC RX */ 1447 reg = umac_readl(priv, UMAC_CMD); 1448 reg &= ~CMD_RX_EN; 1449 umac_writel(priv, reg, UMAC_CMD); 1450 1451 ret = tdma_enable_set(priv, 0); 1452 if (ret) { 1453 netdev_err(dev, "timeout disabling RDMA\n"); 1454 return ret; 1455 } 1456 1457 /* Wait for a maximum packet size to be drained */ 1458 usleep_range(2000, 3000); 1459 1460 ret = rdma_enable_set(priv, 0); 1461 if (ret) { 1462 netdev_err(dev, "timeout disabling TDMA\n"); 1463 return ret; 1464 } 1465 1466 /* Disable UniMAC TX */ 1467 reg = umac_readl(priv, UMAC_CMD); 1468 reg &= ~CMD_TX_EN; 1469 umac_writel(priv, reg, UMAC_CMD); 1470 1471 /* Free RX/TX rings SW structures */ 1472 for (i = 0; i < dev->num_tx_queues; i++) 1473 bcm_sysport_fini_tx_ring(priv, i); 1474 bcm_sysport_fini_rx_ring(priv); 1475 1476 free_irq(priv->irq0, dev); 1477 free_irq(priv->irq1, dev); 1478 1479 /* Disconnect from PHY */ 1480 phy_disconnect(priv->phydev); 1481 1482 return 0; 1483 } 1484 1485 static struct ethtool_ops bcm_sysport_ethtool_ops = { 1486 .get_settings = bcm_sysport_get_settings, 1487 .set_settings = bcm_sysport_set_settings, 1488 .get_drvinfo = bcm_sysport_get_drvinfo, 1489 .get_msglevel = bcm_sysport_get_msglvl, 1490 .set_msglevel = bcm_sysport_set_msglvl, 1491 .get_link = ethtool_op_get_link, 1492 .get_strings = bcm_sysport_get_strings, 1493 .get_ethtool_stats = bcm_sysport_get_stats, 1494 .get_sset_count = bcm_sysport_get_sset_count, 1495 }; 1496 1497 static const struct net_device_ops bcm_sysport_netdev_ops = { 1498 .ndo_start_xmit = bcm_sysport_xmit, 1499 .ndo_tx_timeout = bcm_sysport_tx_timeout, 1500 .ndo_open = bcm_sysport_open, 1501 .ndo_stop = bcm_sysport_stop, 1502 .ndo_set_features = bcm_sysport_set_features, 1503 .ndo_set_rx_mode = bcm_sysport_set_rx_mode, 1504 }; 1505 1506 #define REV_FMT "v%2x.%02x" 1507 1508 static int bcm_sysport_probe(struct platform_device *pdev) 1509 { 1510 struct bcm_sysport_priv *priv; 1511 struct device_node *dn; 1512 struct net_device *dev; 1513 const void *macaddr; 1514 struct resource *r; 1515 u32 txq, rxq; 1516 int ret; 1517 1518 dn = pdev->dev.of_node; 1519 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1520 1521 /* Read the Transmit/Receive Queue properties */ 1522 if (of_property_read_u32(dn, "systemport,num-txq", &txq)) 1523 txq = TDMA_NUM_RINGS; 1524 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq)) 1525 rxq = 1; 1526 1527 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq); 1528 if (!dev) 1529 return -ENOMEM; 1530 1531 /* Initialize private members */ 1532 priv = netdev_priv(dev); 1533 1534 priv->irq0 = platform_get_irq(pdev, 0); 1535 priv->irq1 = platform_get_irq(pdev, 1); 1536 if (priv->irq0 <= 0 || priv->irq1 <= 0) { 1537 dev_err(&pdev->dev, "invalid interrupts\n"); 1538 ret = -EINVAL; 1539 goto err; 1540 } 1541 1542 priv->base = devm_ioremap_resource(&pdev->dev, r); 1543 if (IS_ERR(priv->base)) { 1544 ret = PTR_ERR(priv->base); 1545 goto err; 1546 } 1547 1548 priv->netdev = dev; 1549 priv->pdev = pdev; 1550 1551 priv->phy_interface = of_get_phy_mode(dn); 1552 /* Default to GMII interface mode */ 1553 if (priv->phy_interface < 0) 1554 priv->phy_interface = PHY_INTERFACE_MODE_GMII; 1555 1556 /* In the case of a fixed PHY, the DT node associated 1557 * to the PHY is the Ethernet MAC DT node. 1558 */ 1559 if (of_phy_is_fixed_link(dn)) { 1560 ret = of_phy_register_fixed_link(dn); 1561 if (ret) { 1562 dev_err(&pdev->dev, "failed to register fixed PHY\n"); 1563 goto err; 1564 } 1565 1566 priv->phy_dn = dn; 1567 } 1568 1569 /* Initialize netdevice members */ 1570 macaddr = of_get_mac_address(dn); 1571 if (!macaddr || !is_valid_ether_addr(macaddr)) { 1572 dev_warn(&pdev->dev, "using random Ethernet MAC\n"); 1573 random_ether_addr(dev->dev_addr); 1574 } else { 1575 ether_addr_copy(dev->dev_addr, macaddr); 1576 } 1577 1578 SET_NETDEV_DEV(dev, &pdev->dev); 1579 dev_set_drvdata(&pdev->dev, dev); 1580 dev->ethtool_ops = &bcm_sysport_ethtool_ops; 1581 dev->netdev_ops = &bcm_sysport_netdev_ops; 1582 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64); 1583 1584 /* HW supported features, none enabled by default */ 1585 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | 1586 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1587 1588 /* Set the needed headroom once and for all */ 1589 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); 1590 dev->needed_headroom += sizeof(struct bcm_tsb); 1591 1592 /* We are interfaced to a switch which handles the multicast 1593 * filtering for us, so we do not support programming any 1594 * multicast hash table in this Ethernet MAC. 1595 */ 1596 dev->flags &= ~IFF_MULTICAST; 1597 1598 /* libphy will adjust the link state accordingly */ 1599 netif_carrier_off(dev); 1600 1601 ret = register_netdev(dev); 1602 if (ret) { 1603 dev_err(&pdev->dev, "failed to register net_device\n"); 1604 goto err; 1605 } 1606 1607 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; 1608 dev_info(&pdev->dev, 1609 "Broadcom SYSTEMPORT" REV_FMT 1610 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", 1611 (priv->rev >> 8) & 0xff, priv->rev & 0xff, 1612 priv->base, priv->irq0, priv->irq1, txq, rxq); 1613 1614 return 0; 1615 err: 1616 free_netdev(dev); 1617 return ret; 1618 } 1619 1620 static int bcm_sysport_remove(struct platform_device *pdev) 1621 { 1622 struct net_device *dev = dev_get_drvdata(&pdev->dev); 1623 1624 /* Not much to do, ndo_close has been called 1625 * and we use managed allocations 1626 */ 1627 unregister_netdev(dev); 1628 free_netdev(dev); 1629 dev_set_drvdata(&pdev->dev, NULL); 1630 1631 return 0; 1632 } 1633 1634 static const struct of_device_id bcm_sysport_of_match[] = { 1635 { .compatible = "brcm,systemport-v1.00" }, 1636 { .compatible = "brcm,systemport" }, 1637 { /* sentinel */ } 1638 }; 1639 1640 static struct platform_driver bcm_sysport_driver = { 1641 .probe = bcm_sysport_probe, 1642 .remove = bcm_sysport_remove, 1643 .driver = { 1644 .name = "brcm-systemport", 1645 .owner = THIS_MODULE, 1646 .of_match_table = bcm_sysport_of_match, 1647 }, 1648 }; 1649 module_platform_driver(bcm_sysport_driver); 1650 1651 MODULE_AUTHOR("Broadcom Corporation"); 1652 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver"); 1653 MODULE_ALIAS("platform:brcm-systemport"); 1654 MODULE_LICENSE("GPL"); 1655