1 /* 2 * Broadcom BCM7xxx System Port Ethernet MAC driver 3 * 4 * Copyright (C) 2014 Broadcom Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/module.h> 16 #include <linux/kernel.h> 17 #include <linux/netdevice.h> 18 #include <linux/etherdevice.h> 19 #include <linux/platform_device.h> 20 #include <linux/of.h> 21 #include <linux/of_net.h> 22 #include <linux/of_mdio.h> 23 #include <linux/phy.h> 24 #include <linux/phy_fixed.h> 25 #include <net/dsa.h> 26 #include <net/ip.h> 27 #include <net/ipv6.h> 28 29 #include "bcmsysport.h" 30 31 /* I/O accessors register helpers */ 32 #define BCM_SYSPORT_IO_MACRO(name, offset) \ 33 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \ 34 { \ 35 u32 reg = readl_relaxed(priv->base + offset + off); \ 36 return reg; \ 37 } \ 38 static inline void name##_writel(struct bcm_sysport_priv *priv, \ 39 u32 val, u32 off) \ 40 { \ 41 writel_relaxed(val, priv->base + offset + off); \ 42 } \ 43 44 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET); 45 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET); 46 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET); 47 BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET); 48 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET); 49 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET); 50 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET); 51 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET); 52 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET); 53 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET); 54 55 /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact 56 * same layout, except it has been moved by 4 bytes up, *sigh* 57 */ 58 static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off) 59 { 60 if (priv->is_lite && off >= RDMA_STATUS) 61 off += 4; 62 return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off); 63 } 64 65 static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off) 66 { 67 if (priv->is_lite && off >= RDMA_STATUS) 68 off += 4; 69 writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off); 70 } 71 72 static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit) 73 { 74 if (!priv->is_lite) { 75 return BIT(bit); 76 } else { 77 if (bit >= ACB_ALGO) 78 return BIT(bit + 1); 79 else 80 return BIT(bit); 81 } 82 } 83 84 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied 85 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths. 86 */ 87 #define BCM_SYSPORT_INTR_L2(which) \ 88 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \ 89 u32 mask) \ 90 { \ 91 priv->irq##which##_mask &= ~(mask); \ 92 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ 93 } \ 94 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \ 95 u32 mask) \ 96 { \ 97 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \ 98 priv->irq##which##_mask |= (mask); \ 99 } \ 100 101 BCM_SYSPORT_INTR_L2(0) 102 BCM_SYSPORT_INTR_L2(1) 103 104 /* Register accesses to GISB/RBUS registers are expensive (few hundred 105 * nanoseconds), so keep the check for 64-bits explicit here to save 106 * one register write per-packet on 32-bits platforms. 107 */ 108 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv, 109 void __iomem *d, 110 dma_addr_t addr) 111 { 112 #ifdef CONFIG_PHYS_ADDR_T_64BIT 113 writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK, 114 d + DESC_ADDR_HI_STATUS_LEN); 115 #endif 116 writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO); 117 } 118 119 static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, 120 struct dma_desc *desc, 121 unsigned int port) 122 { 123 /* Ports are latched, so write upper address first */ 124 tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port)); 125 tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port)); 126 } 127 128 /* Ethtool operations */ 129 static int bcm_sysport_set_rx_csum(struct net_device *dev, 130 netdev_features_t wanted) 131 { 132 struct bcm_sysport_priv *priv = netdev_priv(dev); 133 u32 reg; 134 135 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); 136 reg = rxchk_readl(priv, RXCHK_CONTROL); 137 if (priv->rx_chk_en) 138 reg |= RXCHK_EN; 139 else 140 reg &= ~RXCHK_EN; 141 142 /* If UniMAC forwards CRC, we need to skip over it to get 143 * a valid CHK bit to be set in the per-packet status word 144 */ 145 if (priv->rx_chk_en && priv->crc_fwd) 146 reg |= RXCHK_SKIP_FCS; 147 else 148 reg &= ~RXCHK_SKIP_FCS; 149 150 /* If Broadcom tags are enabled (e.g: using a switch), make 151 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom 152 * tag after the Ethernet MAC Source Address. 153 */ 154 if (netdev_uses_dsa(dev)) 155 reg |= RXCHK_BRCM_TAG_EN; 156 else 157 reg &= ~RXCHK_BRCM_TAG_EN; 158 159 rxchk_writel(priv, reg, RXCHK_CONTROL); 160 161 return 0; 162 } 163 164 static int bcm_sysport_set_tx_csum(struct net_device *dev, 165 netdev_features_t wanted) 166 { 167 struct bcm_sysport_priv *priv = netdev_priv(dev); 168 u32 reg; 169 170 /* Hardware transmit checksum requires us to enable the Transmit status 171 * block prepended to the packet contents 172 */ 173 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); 174 reg = tdma_readl(priv, TDMA_CONTROL); 175 if (priv->tsb_en) 176 reg |= tdma_control_bit(priv, TSB_EN); 177 else 178 reg &= ~tdma_control_bit(priv, TSB_EN); 179 tdma_writel(priv, reg, TDMA_CONTROL); 180 181 return 0; 182 } 183 184 static int bcm_sysport_set_features(struct net_device *dev, 185 netdev_features_t features) 186 { 187 netdev_features_t changed = features ^ dev->features; 188 netdev_features_t wanted = dev->wanted_features; 189 int ret = 0; 190 191 if (changed & NETIF_F_RXCSUM) 192 ret = bcm_sysport_set_rx_csum(dev, wanted); 193 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) 194 ret = bcm_sysport_set_tx_csum(dev, wanted); 195 196 return ret; 197 } 198 199 /* Hardware counters must be kept in sync because the order/offset 200 * is important here (order in structure declaration = order in hardware) 201 */ 202 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { 203 /* general stats */ 204 STAT_NETDEV64(rx_packets), 205 STAT_NETDEV64(tx_packets), 206 STAT_NETDEV64(rx_bytes), 207 STAT_NETDEV64(tx_bytes), 208 STAT_NETDEV(rx_errors), 209 STAT_NETDEV(tx_errors), 210 STAT_NETDEV(rx_dropped), 211 STAT_NETDEV(tx_dropped), 212 STAT_NETDEV(multicast), 213 /* UniMAC RSV counters */ 214 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), 215 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), 216 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), 217 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), 218 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), 219 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), 220 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), 221 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), 222 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), 223 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), 224 STAT_MIB_RX("rx_pkts", mib.rx.pkt), 225 STAT_MIB_RX("rx_bytes", mib.rx.bytes), 226 STAT_MIB_RX("rx_multicast", mib.rx.mca), 227 STAT_MIB_RX("rx_broadcast", mib.rx.bca), 228 STAT_MIB_RX("rx_fcs", mib.rx.fcs), 229 STAT_MIB_RX("rx_control", mib.rx.cf), 230 STAT_MIB_RX("rx_pause", mib.rx.pf), 231 STAT_MIB_RX("rx_unknown", mib.rx.uo), 232 STAT_MIB_RX("rx_align", mib.rx.aln), 233 STAT_MIB_RX("rx_outrange", mib.rx.flr), 234 STAT_MIB_RX("rx_code", mib.rx.cde), 235 STAT_MIB_RX("rx_carrier", mib.rx.fcr), 236 STAT_MIB_RX("rx_oversize", mib.rx.ovr), 237 STAT_MIB_RX("rx_jabber", mib.rx.jbr), 238 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue), 239 STAT_MIB_RX("rx_good_pkts", mib.rx.pok), 240 STAT_MIB_RX("rx_unicast", mib.rx.uc), 241 STAT_MIB_RX("rx_ppp", mib.rx.ppp), 242 STAT_MIB_RX("rx_crc", mib.rx.rcrc), 243 /* UniMAC TSV counters */ 244 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), 245 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), 246 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), 247 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), 248 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), 249 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), 250 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), 251 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), 252 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), 253 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), 254 STAT_MIB_TX("tx_pkts", mib.tx.pkts), 255 STAT_MIB_TX("tx_multicast", mib.tx.mca), 256 STAT_MIB_TX("tx_broadcast", mib.tx.bca), 257 STAT_MIB_TX("tx_pause", mib.tx.pf), 258 STAT_MIB_TX("tx_control", mib.tx.cf), 259 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs), 260 STAT_MIB_TX("tx_oversize", mib.tx.ovr), 261 STAT_MIB_TX("tx_defer", mib.tx.drf), 262 STAT_MIB_TX("tx_excess_defer", mib.tx.edf), 263 STAT_MIB_TX("tx_single_col", mib.tx.scl), 264 STAT_MIB_TX("tx_multi_col", mib.tx.mcl), 265 STAT_MIB_TX("tx_late_col", mib.tx.lcl), 266 STAT_MIB_TX("tx_excess_col", mib.tx.ecl), 267 STAT_MIB_TX("tx_frags", mib.tx.frg), 268 STAT_MIB_TX("tx_total_col", mib.tx.ncl), 269 STAT_MIB_TX("tx_jabber", mib.tx.jbr), 270 STAT_MIB_TX("tx_bytes", mib.tx.bytes), 271 STAT_MIB_TX("tx_good_pkts", mib.tx.pok), 272 STAT_MIB_TX("tx_unicast", mib.tx.uc), 273 /* UniMAC RUNT counters */ 274 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt), 275 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), 276 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), 277 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes), 278 /* RXCHK misc statistics */ 279 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR), 280 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc, 281 RXCHK_OTHER_DISC_CNTR), 282 /* RBUF misc statistics */ 283 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), 284 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), 285 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 286 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed), 287 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed), 288 /* Per TX-queue statistics are dynamically appended */ 289 }; 290 291 #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) 292 293 static void bcm_sysport_get_drvinfo(struct net_device *dev, 294 struct ethtool_drvinfo *info) 295 { 296 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 297 strlcpy(info->version, "0.1", sizeof(info->version)); 298 strlcpy(info->bus_info, "platform", sizeof(info->bus_info)); 299 } 300 301 static u32 bcm_sysport_get_msglvl(struct net_device *dev) 302 { 303 struct bcm_sysport_priv *priv = netdev_priv(dev); 304 305 return priv->msg_enable; 306 } 307 308 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable) 309 { 310 struct bcm_sysport_priv *priv = netdev_priv(dev); 311 312 priv->msg_enable = enable; 313 } 314 315 static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type) 316 { 317 switch (type) { 318 case BCM_SYSPORT_STAT_NETDEV: 319 case BCM_SYSPORT_STAT_NETDEV64: 320 case BCM_SYSPORT_STAT_RXCHK: 321 case BCM_SYSPORT_STAT_RBUF: 322 case BCM_SYSPORT_STAT_SOFT: 323 return true; 324 default: 325 return false; 326 } 327 } 328 329 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) 330 { 331 struct bcm_sysport_priv *priv = netdev_priv(dev); 332 const struct bcm_sysport_stats *s; 333 unsigned int i, j; 334 335 switch (string_set) { 336 case ETH_SS_STATS: 337 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 338 s = &bcm_sysport_gstrings_stats[i]; 339 if (priv->is_lite && 340 !bcm_sysport_lite_stat_valid(s->type)) 341 continue; 342 j++; 343 } 344 /* Include per-queue statistics */ 345 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; 346 default: 347 return -EOPNOTSUPP; 348 } 349 } 350 351 static void bcm_sysport_get_strings(struct net_device *dev, 352 u32 stringset, u8 *data) 353 { 354 struct bcm_sysport_priv *priv = netdev_priv(dev); 355 const struct bcm_sysport_stats *s; 356 char buf[128]; 357 int i, j; 358 359 switch (stringset) { 360 case ETH_SS_STATS: 361 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 362 s = &bcm_sysport_gstrings_stats[i]; 363 if (priv->is_lite && 364 !bcm_sysport_lite_stat_valid(s->type)) 365 continue; 366 367 memcpy(data + j * ETH_GSTRING_LEN, s->stat_string, 368 ETH_GSTRING_LEN); 369 j++; 370 } 371 372 for (i = 0; i < dev->num_tx_queues; i++) { 373 snprintf(buf, sizeof(buf), "txq%d_packets", i); 374 memcpy(data + j * ETH_GSTRING_LEN, buf, 375 ETH_GSTRING_LEN); 376 j++; 377 378 snprintf(buf, sizeof(buf), "txq%d_bytes", i); 379 memcpy(data + j * ETH_GSTRING_LEN, buf, 380 ETH_GSTRING_LEN); 381 j++; 382 } 383 break; 384 default: 385 break; 386 } 387 } 388 389 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) 390 { 391 int i, j = 0; 392 393 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 394 const struct bcm_sysport_stats *s; 395 u8 offset = 0; 396 u32 val = 0; 397 char *p; 398 399 s = &bcm_sysport_gstrings_stats[i]; 400 switch (s->type) { 401 case BCM_SYSPORT_STAT_NETDEV: 402 case BCM_SYSPORT_STAT_NETDEV64: 403 case BCM_SYSPORT_STAT_SOFT: 404 continue; 405 case BCM_SYSPORT_STAT_MIB_RX: 406 case BCM_SYSPORT_STAT_MIB_TX: 407 case BCM_SYSPORT_STAT_RUNT: 408 if (priv->is_lite) 409 continue; 410 411 if (s->type != BCM_SYSPORT_STAT_MIB_RX) 412 offset = UMAC_MIB_STAT_OFFSET; 413 val = umac_readl(priv, UMAC_MIB_START + j + offset); 414 break; 415 case BCM_SYSPORT_STAT_RXCHK: 416 val = rxchk_readl(priv, s->reg_offset); 417 if (val == ~0) 418 rxchk_writel(priv, 0, s->reg_offset); 419 break; 420 case BCM_SYSPORT_STAT_RBUF: 421 val = rbuf_readl(priv, s->reg_offset); 422 if (val == ~0) 423 rbuf_writel(priv, 0, s->reg_offset); 424 break; 425 } 426 427 j += s->stat_sizeof; 428 p = (char *)priv + s->stat_offset; 429 *(u32 *)p = val; 430 } 431 432 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); 433 } 434 435 static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv, 436 u64 *tx_bytes, u64 *tx_packets) 437 { 438 struct bcm_sysport_tx_ring *ring; 439 u64 bytes = 0, packets = 0; 440 unsigned int start; 441 unsigned int q; 442 443 for (q = 0; q < priv->netdev->num_tx_queues; q++) { 444 ring = &priv->tx_rings[q]; 445 do { 446 start = u64_stats_fetch_begin_irq(&priv->syncp); 447 bytes = ring->bytes; 448 packets = ring->packets; 449 } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); 450 451 *tx_bytes += bytes; 452 *tx_packets += packets; 453 } 454 } 455 456 static void bcm_sysport_get_stats(struct net_device *dev, 457 struct ethtool_stats *stats, u64 *data) 458 { 459 struct bcm_sysport_priv *priv = netdev_priv(dev); 460 struct bcm_sysport_stats64 *stats64 = &priv->stats64; 461 struct u64_stats_sync *syncp = &priv->syncp; 462 struct bcm_sysport_tx_ring *ring; 463 u64 tx_bytes = 0, tx_packets = 0; 464 unsigned int start; 465 int i, j; 466 467 if (netif_running(dev)) { 468 bcm_sysport_update_mib_counters(priv); 469 bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets); 470 stats64->tx_bytes = tx_bytes; 471 stats64->tx_packets = tx_packets; 472 } 473 474 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 475 const struct bcm_sysport_stats *s; 476 char *p; 477 478 s = &bcm_sysport_gstrings_stats[i]; 479 if (s->type == BCM_SYSPORT_STAT_NETDEV) 480 p = (char *)&dev->stats; 481 else if (s->type == BCM_SYSPORT_STAT_NETDEV64) 482 p = (char *)stats64; 483 else 484 p = (char *)priv; 485 486 if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type)) 487 continue; 488 p += s->stat_offset; 489 490 if (s->stat_sizeof == sizeof(u64) && 491 s->type == BCM_SYSPORT_STAT_NETDEV64) { 492 do { 493 start = u64_stats_fetch_begin_irq(syncp); 494 data[i] = *(u64 *)p; 495 } while (u64_stats_fetch_retry_irq(syncp, start)); 496 } else 497 data[i] = *(u32 *)p; 498 j++; 499 } 500 501 /* For SYSTEMPORT Lite since we have holes in our statistics, j would 502 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it 503 * needs to point to how many total statistics we have minus the 504 * number of per TX queue statistics 505 */ 506 j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) - 507 dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; 508 509 for (i = 0; i < dev->num_tx_queues; i++) { 510 ring = &priv->tx_rings[i]; 511 data[j] = ring->packets; 512 j++; 513 data[j] = ring->bytes; 514 j++; 515 } 516 } 517 518 static void bcm_sysport_get_wol(struct net_device *dev, 519 struct ethtool_wolinfo *wol) 520 { 521 struct bcm_sysport_priv *priv = netdev_priv(dev); 522 u32 reg; 523 524 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; 525 wol->wolopts = priv->wolopts; 526 527 if (!(priv->wolopts & WAKE_MAGICSECURE)) 528 return; 529 530 /* Return the programmed SecureOn password */ 531 reg = umac_readl(priv, UMAC_PSW_MS); 532 put_unaligned_be16(reg, &wol->sopass[0]); 533 reg = umac_readl(priv, UMAC_PSW_LS); 534 put_unaligned_be32(reg, &wol->sopass[2]); 535 } 536 537 static int bcm_sysport_set_wol(struct net_device *dev, 538 struct ethtool_wolinfo *wol) 539 { 540 struct bcm_sysport_priv *priv = netdev_priv(dev); 541 struct device *kdev = &priv->pdev->dev; 542 u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; 543 544 if (!device_can_wakeup(kdev)) 545 return -ENOTSUPP; 546 547 if (wol->wolopts & ~supported) 548 return -EINVAL; 549 550 /* Program the SecureOn password */ 551 if (wol->wolopts & WAKE_MAGICSECURE) { 552 umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), 553 UMAC_PSW_MS); 554 umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), 555 UMAC_PSW_LS); 556 } 557 558 /* Flag the device and relevant IRQ as wakeup capable */ 559 if (wol->wolopts) { 560 device_set_wakeup_enable(kdev, 1); 561 if (priv->wol_irq_disabled) 562 enable_irq_wake(priv->wol_irq); 563 priv->wol_irq_disabled = 0; 564 } else { 565 device_set_wakeup_enable(kdev, 0); 566 /* Avoid unbalanced disable_irq_wake calls */ 567 if (!priv->wol_irq_disabled) 568 disable_irq_wake(priv->wol_irq); 569 priv->wol_irq_disabled = 1; 570 } 571 572 priv->wolopts = wol->wolopts; 573 574 return 0; 575 } 576 577 static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv, 578 u32 usecs, u32 pkts) 579 { 580 u32 reg; 581 582 reg = rdma_readl(priv, RDMA_MBDONE_INTR); 583 reg &= ~(RDMA_INTR_THRESH_MASK | 584 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT); 585 reg |= pkts; 586 reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT; 587 rdma_writel(priv, reg, RDMA_MBDONE_INTR); 588 } 589 590 static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring, 591 struct ethtool_coalesce *ec) 592 { 593 struct bcm_sysport_priv *priv = ring->priv; 594 u32 reg; 595 596 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index)); 597 reg &= ~(RING_INTR_THRESH_MASK | 598 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT); 599 reg |= ec->tx_max_coalesced_frames; 600 reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) << 601 RING_TIMEOUT_SHIFT; 602 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index)); 603 } 604 605 static int bcm_sysport_get_coalesce(struct net_device *dev, 606 struct ethtool_coalesce *ec) 607 { 608 struct bcm_sysport_priv *priv = netdev_priv(dev); 609 u32 reg; 610 611 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0)); 612 613 ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000; 614 ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK; 615 616 reg = rdma_readl(priv, RDMA_MBDONE_INTR); 617 618 ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000; 619 ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK; 620 ec->use_adaptive_rx_coalesce = priv->dim.use_dim; 621 622 return 0; 623 } 624 625 static int bcm_sysport_set_coalesce(struct net_device *dev, 626 struct ethtool_coalesce *ec) 627 { 628 struct bcm_sysport_priv *priv = netdev_priv(dev); 629 struct net_dim_cq_moder moder; 630 u32 usecs, pkts; 631 unsigned int i; 632 633 /* Base system clock is 125Mhz, DMA timeout is this reference clock 634 * divided by 1024, which yield roughly 8.192 us, our maximum value has 635 * to fit in the RING_TIMEOUT_MASK (16 bits). 636 */ 637 if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK || 638 ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 || 639 ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK || 640 ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1) 641 return -EINVAL; 642 643 if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) || 644 (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) || 645 ec->use_adaptive_tx_coalesce) 646 return -EINVAL; 647 648 for (i = 0; i < dev->num_tx_queues; i++) 649 bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec); 650 651 priv->rx_coalesce_usecs = ec->rx_coalesce_usecs; 652 priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; 653 usecs = priv->rx_coalesce_usecs; 654 pkts = priv->rx_max_coalesced_frames; 655 656 if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) { 657 moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode); 658 usecs = moder.usec; 659 pkts = moder.pkts; 660 } 661 662 priv->dim.use_dim = ec->use_adaptive_rx_coalesce; 663 664 /* Apply desired coalescing parameters */ 665 bcm_sysport_set_rx_coalesce(priv, usecs, pkts); 666 667 return 0; 668 } 669 670 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) 671 { 672 dev_consume_skb_any(cb->skb); 673 cb->skb = NULL; 674 dma_unmap_addr_set(cb, dma_addr, 0); 675 } 676 677 static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, 678 struct bcm_sysport_cb *cb) 679 { 680 struct device *kdev = &priv->pdev->dev; 681 struct net_device *ndev = priv->netdev; 682 struct sk_buff *skb, *rx_skb; 683 dma_addr_t mapping; 684 685 /* Allocate a new SKB for a new packet */ 686 skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); 687 if (!skb) { 688 priv->mib.alloc_rx_buff_failed++; 689 netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); 690 return NULL; 691 } 692 693 mapping = dma_map_single(kdev, skb->data, 694 RX_BUF_LENGTH, DMA_FROM_DEVICE); 695 if (dma_mapping_error(kdev, mapping)) { 696 priv->mib.rx_dma_failed++; 697 dev_kfree_skb_any(skb); 698 netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); 699 return NULL; 700 } 701 702 /* Grab the current SKB on the ring */ 703 rx_skb = cb->skb; 704 if (likely(rx_skb)) 705 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 706 RX_BUF_LENGTH, DMA_FROM_DEVICE); 707 708 /* Put the new SKB on the ring */ 709 cb->skb = skb; 710 dma_unmap_addr_set(cb, dma_addr, mapping); 711 dma_desc_set_addr(priv, cb->bd_addr, mapping); 712 713 netif_dbg(priv, rx_status, ndev, "RX refill\n"); 714 715 /* Return the current SKB to the caller */ 716 return rx_skb; 717 } 718 719 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) 720 { 721 struct bcm_sysport_cb *cb; 722 struct sk_buff *skb; 723 unsigned int i; 724 725 for (i = 0; i < priv->num_rx_bds; i++) { 726 cb = &priv->rx_cbs[i]; 727 skb = bcm_sysport_rx_refill(priv, cb); 728 if (skb) 729 dev_kfree_skb(skb); 730 if (!cb->skb) 731 return -ENOMEM; 732 } 733 734 return 0; 735 } 736 737 /* Poll the hardware for up to budget packets to process */ 738 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, 739 unsigned int budget) 740 { 741 struct bcm_sysport_stats64 *stats64 = &priv->stats64; 742 struct net_device *ndev = priv->netdev; 743 unsigned int processed = 0, to_process; 744 unsigned int processed_bytes = 0; 745 struct bcm_sysport_cb *cb; 746 struct sk_buff *skb; 747 unsigned int p_index; 748 u16 len, status; 749 struct bcm_rsb *rsb; 750 751 /* Clear status before servicing to reduce spurious interrupts */ 752 intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR); 753 754 /* Determine how much we should process since last call, SYSTEMPORT Lite 755 * groups the producer and consumer indexes into the same 32-bit 756 * which we access using RDMA_CONS_INDEX 757 */ 758 if (!priv->is_lite) 759 p_index = rdma_readl(priv, RDMA_PROD_INDEX); 760 else 761 p_index = rdma_readl(priv, RDMA_CONS_INDEX); 762 p_index &= RDMA_PROD_INDEX_MASK; 763 764 to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK; 765 766 netif_dbg(priv, rx_status, ndev, 767 "p_index=%d rx_c_index=%d to_process=%d\n", 768 p_index, priv->rx_c_index, to_process); 769 770 while ((processed < to_process) && (processed < budget)) { 771 cb = &priv->rx_cbs[priv->rx_read_ptr]; 772 skb = bcm_sysport_rx_refill(priv, cb); 773 774 775 /* We do not have a backing SKB, so we do not a corresponding 776 * DMA mapping for this incoming packet since 777 * bcm_sysport_rx_refill always either has both skb and mapping 778 * or none. 779 */ 780 if (unlikely(!skb)) { 781 netif_err(priv, rx_err, ndev, "out of memory!\n"); 782 ndev->stats.rx_dropped++; 783 ndev->stats.rx_errors++; 784 goto next; 785 } 786 787 /* Extract the Receive Status Block prepended */ 788 rsb = (struct bcm_rsb *)skb->data; 789 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; 790 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & 791 DESC_STATUS_MASK; 792 793 netif_dbg(priv, rx_status, ndev, 794 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", 795 p_index, priv->rx_c_index, priv->rx_read_ptr, 796 len, status); 797 798 if (unlikely(len > RX_BUF_LENGTH)) { 799 netif_err(priv, rx_status, ndev, "oversized packet\n"); 800 ndev->stats.rx_length_errors++; 801 ndev->stats.rx_errors++; 802 dev_kfree_skb_any(skb); 803 goto next; 804 } 805 806 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { 807 netif_err(priv, rx_status, ndev, "fragmented packet!\n"); 808 ndev->stats.rx_dropped++; 809 ndev->stats.rx_errors++; 810 dev_kfree_skb_any(skb); 811 goto next; 812 } 813 814 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) { 815 netif_err(priv, rx_err, ndev, "error packet\n"); 816 if (status & RX_STATUS_OVFLOW) 817 ndev->stats.rx_over_errors++; 818 ndev->stats.rx_dropped++; 819 ndev->stats.rx_errors++; 820 dev_kfree_skb_any(skb); 821 goto next; 822 } 823 824 skb_put(skb, len); 825 826 /* Hardware validated our checksum */ 827 if (likely(status & DESC_L4_CSUM)) 828 skb->ip_summed = CHECKSUM_UNNECESSARY; 829 830 /* Hardware pre-pends packets with 2bytes before Ethernet 831 * header plus we have the Receive Status Block, strip off all 832 * of this from the SKB. 833 */ 834 skb_pull(skb, sizeof(*rsb) + 2); 835 len -= (sizeof(*rsb) + 2); 836 processed_bytes += len; 837 838 /* UniMAC may forward CRC */ 839 if (priv->crc_fwd) { 840 skb_trim(skb, len - ETH_FCS_LEN); 841 len -= ETH_FCS_LEN; 842 } 843 844 skb->protocol = eth_type_trans(skb, ndev); 845 ndev->stats.rx_packets++; 846 ndev->stats.rx_bytes += len; 847 u64_stats_update_begin(&priv->syncp); 848 stats64->rx_packets++; 849 stats64->rx_bytes += len; 850 u64_stats_update_end(&priv->syncp); 851 852 napi_gro_receive(&priv->napi, skb); 853 next: 854 processed++; 855 priv->rx_read_ptr++; 856 857 if (priv->rx_read_ptr == priv->num_rx_bds) 858 priv->rx_read_ptr = 0; 859 } 860 861 priv->dim.packets = processed; 862 priv->dim.bytes = processed_bytes; 863 864 return processed; 865 } 866 867 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, 868 struct bcm_sysport_cb *cb, 869 unsigned int *bytes_compl, 870 unsigned int *pkts_compl) 871 { 872 struct bcm_sysport_priv *priv = ring->priv; 873 struct device *kdev = &priv->pdev->dev; 874 875 if (cb->skb) { 876 *bytes_compl += cb->skb->len; 877 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 878 dma_unmap_len(cb, dma_len), 879 DMA_TO_DEVICE); 880 (*pkts_compl)++; 881 bcm_sysport_free_cb(cb); 882 /* SKB fragment */ 883 } else if (dma_unmap_addr(cb, dma_addr)) { 884 *bytes_compl += dma_unmap_len(cb, dma_len); 885 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr), 886 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); 887 dma_unmap_addr_set(cb, dma_addr, 0); 888 } 889 } 890 891 /* Reclaim queued SKBs for transmission completion, lockless version */ 892 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 893 struct bcm_sysport_tx_ring *ring) 894 { 895 unsigned int pkts_compl = 0, bytes_compl = 0; 896 struct net_device *ndev = priv->netdev; 897 unsigned int txbds_processed = 0; 898 struct bcm_sysport_cb *cb; 899 unsigned int txbds_ready; 900 unsigned int c_index; 901 u32 hw_ind; 902 903 /* Clear status before servicing to reduce spurious interrupts */ 904 if (!ring->priv->is_lite) 905 intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR); 906 else 907 intrl2_0_writel(ring->priv, BIT(ring->index + 908 INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR); 909 910 /* Compute how many descriptors have been processed since last call */ 911 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); 912 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; 913 txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK; 914 915 netif_dbg(priv, tx_done, ndev, 916 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", 917 ring->index, ring->c_index, c_index, txbds_ready); 918 919 while (txbds_processed < txbds_ready) { 920 cb = &ring->cbs[ring->clean_index]; 921 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl); 922 923 ring->desc_count++; 924 txbds_processed++; 925 926 if (likely(ring->clean_index < ring->size - 1)) 927 ring->clean_index++; 928 else 929 ring->clean_index = 0; 930 } 931 932 u64_stats_update_begin(&priv->syncp); 933 ring->packets += pkts_compl; 934 ring->bytes += bytes_compl; 935 u64_stats_update_end(&priv->syncp); 936 937 ring->c_index = c_index; 938 939 netif_dbg(priv, tx_done, ndev, 940 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", 941 ring->index, ring->c_index, pkts_compl, bytes_compl); 942 943 return pkts_compl; 944 } 945 946 /* Locked version of the per-ring TX reclaim routine */ 947 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 948 struct bcm_sysport_tx_ring *ring) 949 { 950 struct netdev_queue *txq; 951 unsigned int released; 952 unsigned long flags; 953 954 txq = netdev_get_tx_queue(priv->netdev, ring->index); 955 956 spin_lock_irqsave(&ring->lock, flags); 957 released = __bcm_sysport_tx_reclaim(priv, ring); 958 if (released) 959 netif_tx_wake_queue(txq); 960 961 spin_unlock_irqrestore(&ring->lock, flags); 962 963 return released; 964 } 965 966 /* Locked version of the per-ring TX reclaim, but does not wake the queue */ 967 static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv, 968 struct bcm_sysport_tx_ring *ring) 969 { 970 unsigned long flags; 971 972 spin_lock_irqsave(&ring->lock, flags); 973 __bcm_sysport_tx_reclaim(priv, ring); 974 spin_unlock_irqrestore(&ring->lock, flags); 975 } 976 977 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) 978 { 979 struct bcm_sysport_tx_ring *ring = 980 container_of(napi, struct bcm_sysport_tx_ring, napi); 981 unsigned int work_done = 0; 982 983 work_done = bcm_sysport_tx_reclaim(ring->priv, ring); 984 985 if (work_done == 0) { 986 napi_complete(napi); 987 /* re-enable TX interrupt */ 988 if (!ring->priv->is_lite) 989 intrl2_1_mask_clear(ring->priv, BIT(ring->index)); 990 else 991 intrl2_0_mask_clear(ring->priv, BIT(ring->index + 992 INTRL2_0_TDMA_MBDONE_SHIFT)); 993 994 return 0; 995 } 996 997 return budget; 998 } 999 1000 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) 1001 { 1002 unsigned int q; 1003 1004 for (q = 0; q < priv->netdev->num_tx_queues; q++) 1005 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]); 1006 } 1007 1008 static int bcm_sysport_poll(struct napi_struct *napi, int budget) 1009 { 1010 struct bcm_sysport_priv *priv = 1011 container_of(napi, struct bcm_sysport_priv, napi); 1012 struct net_dim_sample dim_sample; 1013 unsigned int work_done = 0; 1014 1015 work_done = bcm_sysport_desc_rx(priv, budget); 1016 1017 priv->rx_c_index += work_done; 1018 priv->rx_c_index &= RDMA_CONS_INDEX_MASK; 1019 1020 /* SYSTEMPORT Lite groups the producer/consumer index, producer is 1021 * maintained by HW, but writes to it will be ignore while RDMA 1022 * is active 1023 */ 1024 if (!priv->is_lite) 1025 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); 1026 else 1027 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX); 1028 1029 if (work_done < budget) { 1030 napi_complete_done(napi, work_done); 1031 /* re-enable RX interrupts */ 1032 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE); 1033 } 1034 1035 if (priv->dim.use_dim) { 1036 net_dim_sample(priv->dim.event_ctr, priv->dim.packets, 1037 priv->dim.bytes, &dim_sample); 1038 net_dim(&priv->dim.dim, dim_sample); 1039 } 1040 1041 return work_done; 1042 } 1043 1044 static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable) 1045 { 1046 u32 reg, bit; 1047 1048 reg = umac_readl(priv, UMAC_MPD_CTRL); 1049 if (enable) 1050 reg |= MPD_EN; 1051 else 1052 reg &= ~MPD_EN; 1053 umac_writel(priv, reg, UMAC_MPD_CTRL); 1054 1055 if (priv->is_lite) 1056 bit = RBUF_ACPI_EN_LITE; 1057 else 1058 bit = RBUF_ACPI_EN; 1059 1060 reg = rbuf_readl(priv, RBUF_CONTROL); 1061 if (enable) 1062 reg |= bit; 1063 else 1064 reg &= ~bit; 1065 rbuf_writel(priv, reg, RBUF_CONTROL); 1066 } 1067 1068 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) 1069 { 1070 u32 reg; 1071 1072 /* Stop monitoring MPD interrupt */ 1073 intrl2_0_mask_set(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG); 1074 1075 /* Disable RXCHK, active filters and Broadcom tag matching */ 1076 reg = rxchk_readl(priv, RXCHK_CONTROL); 1077 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK << 1078 RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN); 1079 rxchk_writel(priv, reg, RXCHK_CONTROL); 1080 1081 /* Clear the MagicPacket detection logic */ 1082 mpd_enable_set(priv, false); 1083 1084 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n"); 1085 } 1086 1087 static void bcm_sysport_dim_work(struct work_struct *work) 1088 { 1089 struct net_dim *dim = container_of(work, struct net_dim, work); 1090 struct bcm_sysport_net_dim *ndim = 1091 container_of(dim, struct bcm_sysport_net_dim, dim); 1092 struct bcm_sysport_priv *priv = 1093 container_of(ndim, struct bcm_sysport_priv, dim); 1094 struct net_dim_cq_moder cur_profile = 1095 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 1096 1097 bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts); 1098 dim->state = NET_DIM_START_MEASURE; 1099 } 1100 1101 /* RX and misc interrupt routine */ 1102 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) 1103 { 1104 struct net_device *dev = dev_id; 1105 struct bcm_sysport_priv *priv = netdev_priv(dev); 1106 struct bcm_sysport_tx_ring *txr; 1107 unsigned int ring, ring_bit; 1108 u32 reg; 1109 1110 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & 1111 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); 1112 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 1113 1114 if (unlikely(priv->irq0_stat == 0)) { 1115 netdev_warn(priv->netdev, "spurious RX interrupt\n"); 1116 return IRQ_NONE; 1117 } 1118 1119 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) { 1120 priv->dim.event_ctr++; 1121 if (likely(napi_schedule_prep(&priv->napi))) { 1122 /* disable RX interrupts */ 1123 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE); 1124 __napi_schedule_irqoff(&priv->napi); 1125 } 1126 } 1127 1128 /* TX ring is full, perform a full reclaim since we do not know 1129 * which one would trigger this interrupt 1130 */ 1131 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL) 1132 bcm_sysport_tx_reclaim_all(priv); 1133 1134 if (priv->irq0_stat & INTRL2_0_MPD) 1135 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n"); 1136 1137 if (priv->irq0_stat & INTRL2_0_BRCM_MATCH_TAG) { 1138 reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) & 1139 RXCHK_BRCM_TAG_MATCH_MASK; 1140 netdev_info(priv->netdev, 1141 "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg); 1142 } 1143 1144 if (!priv->is_lite) 1145 goto out; 1146 1147 for (ring = 0; ring < dev->num_tx_queues; ring++) { 1148 ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT); 1149 if (!(priv->irq0_stat & ring_bit)) 1150 continue; 1151 1152 txr = &priv->tx_rings[ring]; 1153 1154 if (likely(napi_schedule_prep(&txr->napi))) { 1155 intrl2_0_mask_set(priv, ring_bit); 1156 __napi_schedule(&txr->napi); 1157 } 1158 } 1159 out: 1160 return IRQ_HANDLED; 1161 } 1162 1163 /* TX interrupt service routine */ 1164 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id) 1165 { 1166 struct net_device *dev = dev_id; 1167 struct bcm_sysport_priv *priv = netdev_priv(dev); 1168 struct bcm_sysport_tx_ring *txr; 1169 unsigned int ring; 1170 1171 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & 1172 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); 1173 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1174 1175 if (unlikely(priv->irq1_stat == 0)) { 1176 netdev_warn(priv->netdev, "spurious TX interrupt\n"); 1177 return IRQ_NONE; 1178 } 1179 1180 for (ring = 0; ring < dev->num_tx_queues; ring++) { 1181 if (!(priv->irq1_stat & BIT(ring))) 1182 continue; 1183 1184 txr = &priv->tx_rings[ring]; 1185 1186 if (likely(napi_schedule_prep(&txr->napi))) { 1187 intrl2_1_mask_set(priv, BIT(ring)); 1188 __napi_schedule_irqoff(&txr->napi); 1189 } 1190 } 1191 1192 return IRQ_HANDLED; 1193 } 1194 1195 static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id) 1196 { 1197 struct bcm_sysport_priv *priv = dev_id; 1198 1199 pm_wakeup_event(&priv->pdev->dev, 0); 1200 1201 return IRQ_HANDLED; 1202 } 1203 1204 #ifdef CONFIG_NET_POLL_CONTROLLER 1205 static void bcm_sysport_poll_controller(struct net_device *dev) 1206 { 1207 struct bcm_sysport_priv *priv = netdev_priv(dev); 1208 1209 disable_irq(priv->irq0); 1210 bcm_sysport_rx_isr(priv->irq0, priv); 1211 enable_irq(priv->irq0); 1212 1213 if (!priv->is_lite) { 1214 disable_irq(priv->irq1); 1215 bcm_sysport_tx_isr(priv->irq1, priv); 1216 enable_irq(priv->irq1); 1217 } 1218 } 1219 #endif 1220 1221 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, 1222 struct net_device *dev) 1223 { 1224 struct sk_buff *nskb; 1225 struct bcm_tsb *tsb; 1226 u32 csum_info; 1227 u8 ip_proto; 1228 u16 csum_start; 1229 __be16 ip_ver; 1230 1231 /* Re-allocate SKB if needed */ 1232 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) { 1233 nskb = skb_realloc_headroom(skb, sizeof(*tsb)); 1234 dev_kfree_skb(skb); 1235 if (!nskb) { 1236 dev->stats.tx_errors++; 1237 dev->stats.tx_dropped++; 1238 return NULL; 1239 } 1240 skb = nskb; 1241 } 1242 1243 tsb = skb_push(skb, sizeof(*tsb)); 1244 /* Zero-out TSB by default */ 1245 memset(tsb, 0, sizeof(*tsb)); 1246 1247 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1248 ip_ver = skb->protocol; 1249 switch (ip_ver) { 1250 case htons(ETH_P_IP): 1251 ip_proto = ip_hdr(skb)->protocol; 1252 break; 1253 case htons(ETH_P_IPV6): 1254 ip_proto = ipv6_hdr(skb)->nexthdr; 1255 break; 1256 default: 1257 return skb; 1258 } 1259 1260 /* Get the checksum offset and the L4 (transport) offset */ 1261 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb); 1262 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK; 1263 csum_info |= (csum_start << L4_PTR_SHIFT); 1264 1265 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { 1266 csum_info |= L4_LENGTH_VALID; 1267 if (ip_proto == IPPROTO_UDP && 1268 ip_ver == htons(ETH_P_IP)) 1269 csum_info |= L4_UDP; 1270 } else { 1271 csum_info = 0; 1272 } 1273 1274 tsb->l4_ptr_dest_map = csum_info; 1275 } 1276 1277 return skb; 1278 } 1279 1280 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, 1281 struct net_device *dev) 1282 { 1283 struct bcm_sysport_priv *priv = netdev_priv(dev); 1284 struct device *kdev = &priv->pdev->dev; 1285 struct bcm_sysport_tx_ring *ring; 1286 struct bcm_sysport_cb *cb; 1287 struct netdev_queue *txq; 1288 struct dma_desc *desc; 1289 unsigned int skb_len; 1290 unsigned long flags; 1291 dma_addr_t mapping; 1292 u32 len_status; 1293 u16 queue; 1294 int ret; 1295 1296 queue = skb_get_queue_mapping(skb); 1297 txq = netdev_get_tx_queue(dev, queue); 1298 ring = &priv->tx_rings[queue]; 1299 1300 /* lock against tx reclaim in BH context and TX ring full interrupt */ 1301 spin_lock_irqsave(&ring->lock, flags); 1302 if (unlikely(ring->desc_count == 0)) { 1303 netif_tx_stop_queue(txq); 1304 netdev_err(dev, "queue %d awake and ring full!\n", queue); 1305 ret = NETDEV_TX_BUSY; 1306 goto out; 1307 } 1308 1309 /* Insert TSB and checksum infos */ 1310 if (priv->tsb_en) { 1311 skb = bcm_sysport_insert_tsb(skb, dev); 1312 if (!skb) { 1313 ret = NETDEV_TX_OK; 1314 goto out; 1315 } 1316 } 1317 1318 skb_len = skb->len; 1319 1320 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); 1321 if (dma_mapping_error(kdev, mapping)) { 1322 priv->mib.tx_dma_failed++; 1323 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n", 1324 skb->data, skb_len); 1325 ret = NETDEV_TX_OK; 1326 goto out; 1327 } 1328 1329 /* Remember the SKB for future freeing */ 1330 cb = &ring->cbs[ring->curr_desc]; 1331 cb->skb = skb; 1332 dma_unmap_addr_set(cb, dma_addr, mapping); 1333 dma_unmap_len_set(cb, dma_len, skb_len); 1334 1335 /* Fetch a descriptor entry from our pool */ 1336 desc = ring->desc_cpu; 1337 1338 desc->addr_lo = lower_32_bits(mapping); 1339 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK; 1340 len_status |= (skb_len << DESC_LEN_SHIFT); 1341 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) << 1342 DESC_STATUS_SHIFT; 1343 if (skb->ip_summed == CHECKSUM_PARTIAL) 1344 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT); 1345 1346 ring->curr_desc++; 1347 if (ring->curr_desc == ring->size) 1348 ring->curr_desc = 0; 1349 ring->desc_count--; 1350 1351 /* Ensure write completion of the descriptor status/length 1352 * in DRAM before the System Port WRITE_PORT register latches 1353 * the value 1354 */ 1355 wmb(); 1356 desc->addr_status_len = len_status; 1357 wmb(); 1358 1359 /* Write this descriptor address to the RING write port */ 1360 tdma_port_write_desc_addr(priv, desc, ring->index); 1361 1362 /* Check ring space and update SW control flow */ 1363 if (ring->desc_count == 0) 1364 netif_tx_stop_queue(txq); 1365 1366 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n", 1367 ring->index, ring->desc_count, ring->curr_desc); 1368 1369 ret = NETDEV_TX_OK; 1370 out: 1371 spin_unlock_irqrestore(&ring->lock, flags); 1372 return ret; 1373 } 1374 1375 static void bcm_sysport_tx_timeout(struct net_device *dev) 1376 { 1377 netdev_warn(dev, "transmit timeout!\n"); 1378 1379 netif_trans_update(dev); 1380 dev->stats.tx_errors++; 1381 1382 netif_tx_wake_all_queues(dev); 1383 } 1384 1385 /* phylib adjust link callback */ 1386 static void bcm_sysport_adj_link(struct net_device *dev) 1387 { 1388 struct bcm_sysport_priv *priv = netdev_priv(dev); 1389 struct phy_device *phydev = dev->phydev; 1390 unsigned int changed = 0; 1391 u32 cmd_bits = 0, reg; 1392 1393 if (priv->old_link != phydev->link) { 1394 changed = 1; 1395 priv->old_link = phydev->link; 1396 } 1397 1398 if (priv->old_duplex != phydev->duplex) { 1399 changed = 1; 1400 priv->old_duplex = phydev->duplex; 1401 } 1402 1403 if (priv->is_lite) 1404 goto out; 1405 1406 switch (phydev->speed) { 1407 case SPEED_2500: 1408 cmd_bits = CMD_SPEED_2500; 1409 break; 1410 case SPEED_1000: 1411 cmd_bits = CMD_SPEED_1000; 1412 break; 1413 case SPEED_100: 1414 cmd_bits = CMD_SPEED_100; 1415 break; 1416 case SPEED_10: 1417 cmd_bits = CMD_SPEED_10; 1418 break; 1419 default: 1420 break; 1421 } 1422 cmd_bits <<= CMD_SPEED_SHIFT; 1423 1424 if (phydev->duplex == DUPLEX_HALF) 1425 cmd_bits |= CMD_HD_EN; 1426 1427 if (priv->old_pause != phydev->pause) { 1428 changed = 1; 1429 priv->old_pause = phydev->pause; 1430 } 1431 1432 if (!phydev->pause) 1433 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; 1434 1435 if (!changed) 1436 return; 1437 1438 if (phydev->link) { 1439 reg = umac_readl(priv, UMAC_CMD); 1440 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | 1441 CMD_HD_EN | CMD_RX_PAUSE_IGNORE | 1442 CMD_TX_PAUSE_IGNORE); 1443 reg |= cmd_bits; 1444 umac_writel(priv, reg, UMAC_CMD); 1445 } 1446 out: 1447 if (changed) 1448 phy_print_status(phydev); 1449 } 1450 1451 static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv, 1452 void (*cb)(struct work_struct *work)) 1453 { 1454 struct bcm_sysport_net_dim *dim = &priv->dim; 1455 1456 INIT_WORK(&dim->dim.work, cb); 1457 dim->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; 1458 dim->event_ctr = 0; 1459 dim->packets = 0; 1460 dim->bytes = 0; 1461 } 1462 1463 static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv) 1464 { 1465 struct bcm_sysport_net_dim *dim = &priv->dim; 1466 struct net_dim_cq_moder moder; 1467 u32 usecs, pkts; 1468 1469 usecs = priv->rx_coalesce_usecs; 1470 pkts = priv->rx_max_coalesced_frames; 1471 1472 /* If DIM was enabled, re-apply default parameters */ 1473 if (dim->use_dim) { 1474 moder = net_dim_get_def_rx_moderation(dim->dim.mode); 1475 usecs = moder.usec; 1476 pkts = moder.pkts; 1477 } 1478 1479 bcm_sysport_set_rx_coalesce(priv, usecs, pkts); 1480 } 1481 1482 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, 1483 unsigned int index) 1484 { 1485 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; 1486 struct device *kdev = &priv->pdev->dev; 1487 size_t size; 1488 void *p; 1489 u32 reg; 1490 1491 /* Simple descriptors partitioning for now */ 1492 size = 256; 1493 1494 /* We just need one DMA descriptor which is DMA-able, since writing to 1495 * the port will allocate a new descriptor in its internal linked-list 1496 */ 1497 p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, 1498 GFP_KERNEL); 1499 if (!p) { 1500 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); 1501 return -ENOMEM; 1502 } 1503 1504 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL); 1505 if (!ring->cbs) { 1506 dma_free_coherent(kdev, sizeof(struct dma_desc), 1507 ring->desc_cpu, ring->desc_dma); 1508 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1509 return -ENOMEM; 1510 } 1511 1512 /* Initialize SW view of the ring */ 1513 spin_lock_init(&ring->lock); 1514 ring->priv = priv; 1515 netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); 1516 ring->index = index; 1517 ring->size = size; 1518 ring->clean_index = 0; 1519 ring->alloc_size = ring->size; 1520 ring->desc_cpu = p; 1521 ring->desc_count = ring->size; 1522 ring->curr_desc = 0; 1523 1524 /* Initialize HW ring */ 1525 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index)); 1526 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index)); 1527 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index)); 1528 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index)); 1529 1530 /* Configure QID and port mapping */ 1531 reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index)); 1532 reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT); 1533 if (ring->inspect) { 1534 reg |= ring->switch_queue & RING_QID_MASK; 1535 reg |= ring->switch_port << RING_PORT_ID_SHIFT; 1536 } else { 1537 reg |= RING_IGNORE_STATUS; 1538 } 1539 tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index)); 1540 tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index)); 1541 1542 /* Enable ACB algorithm 2 */ 1543 reg = tdma_readl(priv, TDMA_CONTROL); 1544 reg |= tdma_control_bit(priv, ACB_ALGO); 1545 tdma_writel(priv, reg, TDMA_CONTROL); 1546 1547 /* Do not use tdma_control_bit() here because TSB_SWAP1 collides 1548 * with the original definition of ACB_ALGO 1549 */ 1550 reg = tdma_readl(priv, TDMA_CONTROL); 1551 if (priv->is_lite) 1552 reg &= ~BIT(TSB_SWAP1); 1553 /* Set a correct TSB format based on host endian */ 1554 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 1555 reg |= tdma_control_bit(priv, TSB_SWAP0); 1556 else 1557 reg &= ~tdma_control_bit(priv, TSB_SWAP0); 1558 tdma_writel(priv, reg, TDMA_CONTROL); 1559 1560 /* Program the number of descriptors as MAX_THRESHOLD and half of 1561 * its size for the hysteresis trigger 1562 */ 1563 tdma_writel(priv, ring->size | 1564 1 << RING_HYST_THRESH_SHIFT, 1565 TDMA_DESC_RING_MAX_HYST(index)); 1566 1567 /* Enable the ring queue in the arbiter */ 1568 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN); 1569 reg |= (1 << index); 1570 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN); 1571 1572 napi_enable(&ring->napi); 1573 1574 netif_dbg(priv, hw, priv->netdev, 1575 "TDMA cfg, size=%d, desc_cpu=%p switch q=%d,port=%d\n", 1576 ring->size, ring->desc_cpu, ring->switch_queue, 1577 ring->switch_port); 1578 1579 return 0; 1580 } 1581 1582 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, 1583 unsigned int index) 1584 { 1585 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; 1586 struct device *kdev = &priv->pdev->dev; 1587 u32 reg; 1588 1589 /* Caller should stop the TDMA engine */ 1590 reg = tdma_readl(priv, TDMA_STATUS); 1591 if (!(reg & TDMA_DISABLED)) 1592 netdev_warn(priv->netdev, "TDMA not stopped!\n"); 1593 1594 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could 1595 * fail, so by checking this pointer we know whether the TX ring was 1596 * fully initialized or not. 1597 */ 1598 if (!ring->cbs) 1599 return; 1600 1601 napi_disable(&ring->napi); 1602 netif_napi_del(&ring->napi); 1603 1604 bcm_sysport_tx_clean(priv, ring); 1605 1606 kfree(ring->cbs); 1607 ring->cbs = NULL; 1608 1609 if (ring->desc_dma) { 1610 dma_free_coherent(kdev, sizeof(struct dma_desc), 1611 ring->desc_cpu, ring->desc_dma); 1612 ring->desc_dma = 0; 1613 } 1614 ring->size = 0; 1615 ring->alloc_size = 0; 1616 1617 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n"); 1618 } 1619 1620 /* RDMA helper */ 1621 static inline int rdma_enable_set(struct bcm_sysport_priv *priv, 1622 unsigned int enable) 1623 { 1624 unsigned int timeout = 1000; 1625 u32 reg; 1626 1627 reg = rdma_readl(priv, RDMA_CONTROL); 1628 if (enable) 1629 reg |= RDMA_EN; 1630 else 1631 reg &= ~RDMA_EN; 1632 rdma_writel(priv, reg, RDMA_CONTROL); 1633 1634 /* Poll for RMDA disabling completion */ 1635 do { 1636 reg = rdma_readl(priv, RDMA_STATUS); 1637 if (!!(reg & RDMA_DISABLED) == !enable) 1638 return 0; 1639 usleep_range(1000, 2000); 1640 } while (timeout-- > 0); 1641 1642 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n"); 1643 1644 return -ETIMEDOUT; 1645 } 1646 1647 /* TDMA helper */ 1648 static inline int tdma_enable_set(struct bcm_sysport_priv *priv, 1649 unsigned int enable) 1650 { 1651 unsigned int timeout = 1000; 1652 u32 reg; 1653 1654 reg = tdma_readl(priv, TDMA_CONTROL); 1655 if (enable) 1656 reg |= tdma_control_bit(priv, TDMA_EN); 1657 else 1658 reg &= ~tdma_control_bit(priv, TDMA_EN); 1659 tdma_writel(priv, reg, TDMA_CONTROL); 1660 1661 /* Poll for TMDA disabling completion */ 1662 do { 1663 reg = tdma_readl(priv, TDMA_STATUS); 1664 if (!!(reg & TDMA_DISABLED) == !enable) 1665 return 0; 1666 1667 usleep_range(1000, 2000); 1668 } while (timeout-- > 0); 1669 1670 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n"); 1671 1672 return -ETIMEDOUT; 1673 } 1674 1675 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) 1676 { 1677 struct bcm_sysport_cb *cb; 1678 u32 reg; 1679 int ret; 1680 int i; 1681 1682 /* Initialize SW view of the RX ring */ 1683 priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC; 1684 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; 1685 priv->rx_c_index = 0; 1686 priv->rx_read_ptr = 0; 1687 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb), 1688 GFP_KERNEL); 1689 if (!priv->rx_cbs) { 1690 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1691 return -ENOMEM; 1692 } 1693 1694 for (i = 0; i < priv->num_rx_bds; i++) { 1695 cb = priv->rx_cbs + i; 1696 cb->bd_addr = priv->rx_bds + i * DESC_SIZE; 1697 } 1698 1699 ret = bcm_sysport_alloc_rx_bufs(priv); 1700 if (ret) { 1701 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n"); 1702 return ret; 1703 } 1704 1705 /* Initialize HW, ensure RDMA is disabled */ 1706 reg = rdma_readl(priv, RDMA_STATUS); 1707 if (!(reg & RDMA_DISABLED)) 1708 rdma_enable_set(priv, 0); 1709 1710 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO); 1711 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI); 1712 rdma_writel(priv, 0, RDMA_PROD_INDEX); 1713 rdma_writel(priv, 0, RDMA_CONS_INDEX); 1714 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT | 1715 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE); 1716 /* Operate the queue in ring mode */ 1717 rdma_writel(priv, 0, RDMA_START_ADDR_HI); 1718 rdma_writel(priv, 0, RDMA_START_ADDR_LO); 1719 rdma_writel(priv, 0, RDMA_END_ADDR_HI); 1720 rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO); 1721 1722 netif_dbg(priv, hw, priv->netdev, 1723 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n", 1724 priv->num_rx_bds, priv->rx_bds); 1725 1726 return 0; 1727 } 1728 1729 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv) 1730 { 1731 struct bcm_sysport_cb *cb; 1732 unsigned int i; 1733 u32 reg; 1734 1735 /* Caller should ensure RDMA is disabled */ 1736 reg = rdma_readl(priv, RDMA_STATUS); 1737 if (!(reg & RDMA_DISABLED)) 1738 netdev_warn(priv->netdev, "RDMA not stopped!\n"); 1739 1740 for (i = 0; i < priv->num_rx_bds; i++) { 1741 cb = &priv->rx_cbs[i]; 1742 if (dma_unmap_addr(cb, dma_addr)) 1743 dma_unmap_single(&priv->pdev->dev, 1744 dma_unmap_addr(cb, dma_addr), 1745 RX_BUF_LENGTH, DMA_FROM_DEVICE); 1746 bcm_sysport_free_cb(cb); 1747 } 1748 1749 kfree(priv->rx_cbs); 1750 priv->rx_cbs = NULL; 1751 1752 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n"); 1753 } 1754 1755 static void bcm_sysport_set_rx_mode(struct net_device *dev) 1756 { 1757 struct bcm_sysport_priv *priv = netdev_priv(dev); 1758 u32 reg; 1759 1760 if (priv->is_lite) 1761 return; 1762 1763 reg = umac_readl(priv, UMAC_CMD); 1764 if (dev->flags & IFF_PROMISC) 1765 reg |= CMD_PROMISC; 1766 else 1767 reg &= ~CMD_PROMISC; 1768 umac_writel(priv, reg, UMAC_CMD); 1769 1770 /* No support for ALLMULTI */ 1771 if (dev->flags & IFF_ALLMULTI) 1772 return; 1773 } 1774 1775 static inline void umac_enable_set(struct bcm_sysport_priv *priv, 1776 u32 mask, unsigned int enable) 1777 { 1778 u32 reg; 1779 1780 if (!priv->is_lite) { 1781 reg = umac_readl(priv, UMAC_CMD); 1782 if (enable) 1783 reg |= mask; 1784 else 1785 reg &= ~mask; 1786 umac_writel(priv, reg, UMAC_CMD); 1787 } else { 1788 reg = gib_readl(priv, GIB_CONTROL); 1789 if (enable) 1790 reg |= mask; 1791 else 1792 reg &= ~mask; 1793 gib_writel(priv, reg, GIB_CONTROL); 1794 } 1795 1796 /* UniMAC stops on a packet boundary, wait for a full-sized packet 1797 * to be processed (1 msec). 1798 */ 1799 if (enable == 0) 1800 usleep_range(1000, 2000); 1801 } 1802 1803 static inline void umac_reset(struct bcm_sysport_priv *priv) 1804 { 1805 u32 reg; 1806 1807 if (priv->is_lite) 1808 return; 1809 1810 reg = umac_readl(priv, UMAC_CMD); 1811 reg |= CMD_SW_RESET; 1812 umac_writel(priv, reg, UMAC_CMD); 1813 udelay(10); 1814 reg = umac_readl(priv, UMAC_CMD); 1815 reg &= ~CMD_SW_RESET; 1816 umac_writel(priv, reg, UMAC_CMD); 1817 } 1818 1819 static void umac_set_hw_addr(struct bcm_sysport_priv *priv, 1820 unsigned char *addr) 1821 { 1822 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | 1823 addr[3]; 1824 u32 mac1 = (addr[4] << 8) | addr[5]; 1825 1826 if (!priv->is_lite) { 1827 umac_writel(priv, mac0, UMAC_MAC0); 1828 umac_writel(priv, mac1, UMAC_MAC1); 1829 } else { 1830 gib_writel(priv, mac0, GIB_MAC0); 1831 gib_writel(priv, mac1, GIB_MAC1); 1832 } 1833 } 1834 1835 static void topctrl_flush(struct bcm_sysport_priv *priv) 1836 { 1837 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); 1838 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); 1839 mdelay(1); 1840 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 1841 topctrl_writel(priv, 0, TX_FLUSH_CNTL); 1842 } 1843 1844 static int bcm_sysport_change_mac(struct net_device *dev, void *p) 1845 { 1846 struct bcm_sysport_priv *priv = netdev_priv(dev); 1847 struct sockaddr *addr = p; 1848 1849 if (!is_valid_ether_addr(addr->sa_data)) 1850 return -EINVAL; 1851 1852 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1853 1854 /* interface is disabled, changes to MAC will be reflected on next 1855 * open call 1856 */ 1857 if (!netif_running(dev)) 1858 return 0; 1859 1860 umac_set_hw_addr(priv, dev->dev_addr); 1861 1862 return 0; 1863 } 1864 1865 static void bcm_sysport_get_stats64(struct net_device *dev, 1866 struct rtnl_link_stats64 *stats) 1867 { 1868 struct bcm_sysport_priv *priv = netdev_priv(dev); 1869 struct bcm_sysport_stats64 *stats64 = &priv->stats64; 1870 unsigned int start; 1871 1872 netdev_stats_to_stats64(stats, &dev->stats); 1873 1874 bcm_sysport_update_tx_stats(priv, &stats->tx_bytes, 1875 &stats->tx_packets); 1876 1877 do { 1878 start = u64_stats_fetch_begin_irq(&priv->syncp); 1879 stats->rx_packets = stats64->rx_packets; 1880 stats->rx_bytes = stats64->rx_bytes; 1881 } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); 1882 } 1883 1884 static void bcm_sysport_netif_start(struct net_device *dev) 1885 { 1886 struct bcm_sysport_priv *priv = netdev_priv(dev); 1887 1888 /* Enable NAPI */ 1889 bcm_sysport_init_dim(priv, bcm_sysport_dim_work); 1890 bcm_sysport_init_rx_coalesce(priv); 1891 napi_enable(&priv->napi); 1892 1893 /* Enable RX interrupt and TX ring full interrupt */ 1894 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 1895 1896 phy_start(dev->phydev); 1897 1898 /* Enable TX interrupts for the TXQs */ 1899 if (!priv->is_lite) 1900 intrl2_1_mask_clear(priv, 0xffffffff); 1901 else 1902 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK); 1903 1904 /* Last call before we start the real business */ 1905 netif_tx_start_all_queues(dev); 1906 } 1907 1908 static void rbuf_init(struct bcm_sysport_priv *priv) 1909 { 1910 u32 reg; 1911 1912 reg = rbuf_readl(priv, RBUF_CONTROL); 1913 reg |= RBUF_4B_ALGN | RBUF_RSB_EN; 1914 /* Set a correct RSB format on SYSTEMPORT Lite */ 1915 if (priv->is_lite) 1916 reg &= ~RBUF_RSB_SWAP1; 1917 1918 /* Set a correct RSB format based on host endian */ 1919 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 1920 reg |= RBUF_RSB_SWAP0; 1921 else 1922 reg &= ~RBUF_RSB_SWAP0; 1923 rbuf_writel(priv, reg, RBUF_CONTROL); 1924 } 1925 1926 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv) 1927 { 1928 intrl2_0_mask_set(priv, 0xffffffff); 1929 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1930 if (!priv->is_lite) { 1931 intrl2_1_mask_set(priv, 0xffffffff); 1932 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1933 } 1934 } 1935 1936 static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv) 1937 { 1938 u32 reg; 1939 1940 reg = gib_readl(priv, GIB_CONTROL); 1941 /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */ 1942 if (netdev_uses_dsa(priv->netdev)) { 1943 reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT); 1944 reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT; 1945 } 1946 reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT); 1947 reg |= 12 << GIB_IPG_LEN_SHIFT; 1948 gib_writel(priv, reg, GIB_CONTROL); 1949 } 1950 1951 static int bcm_sysport_open(struct net_device *dev) 1952 { 1953 struct bcm_sysport_priv *priv = netdev_priv(dev); 1954 struct phy_device *phydev; 1955 unsigned int i; 1956 int ret; 1957 1958 /* Reset UniMAC */ 1959 umac_reset(priv); 1960 1961 /* Flush TX and RX FIFOs at TOPCTRL level */ 1962 topctrl_flush(priv); 1963 1964 /* Disable the UniMAC RX/TX */ 1965 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0); 1966 1967 /* Enable RBUF 2bytes alignment and Receive Status Block */ 1968 rbuf_init(priv); 1969 1970 /* Set maximum frame length */ 1971 if (!priv->is_lite) 1972 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 1973 else 1974 gib_set_pad_extension(priv); 1975 1976 /* Set MAC address */ 1977 umac_set_hw_addr(priv, dev->dev_addr); 1978 1979 /* Read CRC forward */ 1980 if (!priv->is_lite) 1981 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); 1982 else 1983 priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) & 1984 GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT); 1985 1986 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 1987 0, priv->phy_interface); 1988 if (!phydev) { 1989 netdev_err(dev, "could not attach to PHY\n"); 1990 return -ENODEV; 1991 } 1992 1993 /* Reset house keeping link status */ 1994 priv->old_duplex = -1; 1995 priv->old_link = -1; 1996 priv->old_pause = -1; 1997 1998 /* mask all interrupts and request them */ 1999 bcm_sysport_mask_all_intrs(priv); 2000 2001 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev); 2002 if (ret) { 2003 netdev_err(dev, "failed to request RX interrupt\n"); 2004 goto out_phy_disconnect; 2005 } 2006 2007 if (!priv->is_lite) { 2008 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, 2009 dev->name, dev); 2010 if (ret) { 2011 netdev_err(dev, "failed to request TX interrupt\n"); 2012 goto out_free_irq0; 2013 } 2014 } 2015 2016 /* Initialize both hardware and software ring */ 2017 for (i = 0; i < dev->num_tx_queues; i++) { 2018 ret = bcm_sysport_init_tx_ring(priv, i); 2019 if (ret) { 2020 netdev_err(dev, "failed to initialize TX ring %d\n", 2021 i); 2022 goto out_free_tx_ring; 2023 } 2024 } 2025 2026 /* Initialize linked-list */ 2027 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); 2028 2029 /* Initialize RX ring */ 2030 ret = bcm_sysport_init_rx_ring(priv); 2031 if (ret) { 2032 netdev_err(dev, "failed to initialize RX ring\n"); 2033 goto out_free_rx_ring; 2034 } 2035 2036 /* Turn on RDMA */ 2037 ret = rdma_enable_set(priv, 1); 2038 if (ret) 2039 goto out_free_rx_ring; 2040 2041 /* Turn on TDMA */ 2042 ret = tdma_enable_set(priv, 1); 2043 if (ret) 2044 goto out_clear_rx_int; 2045 2046 /* Turn on UniMAC TX/RX */ 2047 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1); 2048 2049 bcm_sysport_netif_start(dev); 2050 2051 return 0; 2052 2053 out_clear_rx_int: 2054 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 2055 out_free_rx_ring: 2056 bcm_sysport_fini_rx_ring(priv); 2057 out_free_tx_ring: 2058 for (i = 0; i < dev->num_tx_queues; i++) 2059 bcm_sysport_fini_tx_ring(priv, i); 2060 if (!priv->is_lite) 2061 free_irq(priv->irq1, dev); 2062 out_free_irq0: 2063 free_irq(priv->irq0, dev); 2064 out_phy_disconnect: 2065 phy_disconnect(phydev); 2066 return ret; 2067 } 2068 2069 static void bcm_sysport_netif_stop(struct net_device *dev) 2070 { 2071 struct bcm_sysport_priv *priv = netdev_priv(dev); 2072 2073 /* stop all software from updating hardware */ 2074 netif_tx_stop_all_queues(dev); 2075 napi_disable(&priv->napi); 2076 cancel_work_sync(&priv->dim.dim.work); 2077 phy_stop(dev->phydev); 2078 2079 /* mask all interrupts */ 2080 bcm_sysport_mask_all_intrs(priv); 2081 } 2082 2083 static int bcm_sysport_stop(struct net_device *dev) 2084 { 2085 struct bcm_sysport_priv *priv = netdev_priv(dev); 2086 unsigned int i; 2087 int ret; 2088 2089 bcm_sysport_netif_stop(dev); 2090 2091 /* Disable UniMAC RX */ 2092 umac_enable_set(priv, CMD_RX_EN, 0); 2093 2094 ret = tdma_enable_set(priv, 0); 2095 if (ret) { 2096 netdev_err(dev, "timeout disabling RDMA\n"); 2097 return ret; 2098 } 2099 2100 /* Wait for a maximum packet size to be drained */ 2101 usleep_range(2000, 3000); 2102 2103 ret = rdma_enable_set(priv, 0); 2104 if (ret) { 2105 netdev_err(dev, "timeout disabling TDMA\n"); 2106 return ret; 2107 } 2108 2109 /* Disable UniMAC TX */ 2110 umac_enable_set(priv, CMD_TX_EN, 0); 2111 2112 /* Free RX/TX rings SW structures */ 2113 for (i = 0; i < dev->num_tx_queues; i++) 2114 bcm_sysport_fini_tx_ring(priv, i); 2115 bcm_sysport_fini_rx_ring(priv); 2116 2117 free_irq(priv->irq0, dev); 2118 if (!priv->is_lite) 2119 free_irq(priv->irq1, dev); 2120 2121 /* Disconnect from PHY */ 2122 phy_disconnect(dev->phydev); 2123 2124 return 0; 2125 } 2126 2127 static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv, 2128 u64 location) 2129 { 2130 unsigned int index; 2131 u32 reg; 2132 2133 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { 2134 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index)); 2135 reg >>= RXCHK_BRCM_TAG_CID_SHIFT; 2136 reg &= RXCHK_BRCM_TAG_CID_MASK; 2137 if (reg == location) 2138 return index; 2139 } 2140 2141 return -EINVAL; 2142 } 2143 2144 static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv, 2145 struct ethtool_rxnfc *nfc) 2146 { 2147 int index; 2148 2149 /* This is not a rule that we know about */ 2150 index = bcm_sysport_rule_find(priv, nfc->fs.location); 2151 if (index < 0) 2152 return -EOPNOTSUPP; 2153 2154 nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE; 2155 2156 return 0; 2157 } 2158 2159 static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv, 2160 struct ethtool_rxnfc *nfc) 2161 { 2162 unsigned int index; 2163 u32 reg; 2164 2165 /* We cannot match locations greater than what the classification ID 2166 * permits (256 entries) 2167 */ 2168 if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK) 2169 return -E2BIG; 2170 2171 /* We cannot support flows that are not destined for a wake-up */ 2172 if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE) 2173 return -EOPNOTSUPP; 2174 2175 /* All filters are already in use, we cannot match more rules */ 2176 if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) == 2177 RXCHK_BRCM_TAG_MAX) 2178 return -ENOSPC; 2179 2180 index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX); 2181 if (index > RXCHK_BRCM_TAG_MAX) 2182 return -ENOSPC; 2183 2184 /* Location is the classification ID, and index is the position 2185 * within one of our 8 possible filters to be programmed 2186 */ 2187 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index)); 2188 reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT); 2189 reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT; 2190 rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index)); 2191 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index)); 2192 2193 set_bit(index, priv->filters); 2194 2195 return 0; 2196 } 2197 2198 static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv, 2199 u64 location) 2200 { 2201 int index; 2202 2203 /* This is not a rule that we know about */ 2204 index = bcm_sysport_rule_find(priv, location); 2205 if (index < 0) 2206 return -EOPNOTSUPP; 2207 2208 /* No need to disable this filter if it was enabled, this will 2209 * be taken care of during suspend time by bcm_sysport_suspend_to_wol 2210 */ 2211 clear_bit(index, priv->filters); 2212 2213 return 0; 2214 } 2215 2216 static int bcm_sysport_get_rxnfc(struct net_device *dev, 2217 struct ethtool_rxnfc *nfc, u32 *rule_locs) 2218 { 2219 struct bcm_sysport_priv *priv = netdev_priv(dev); 2220 int ret = -EOPNOTSUPP; 2221 2222 switch (nfc->cmd) { 2223 case ETHTOOL_GRXCLSRULE: 2224 ret = bcm_sysport_rule_get(priv, nfc); 2225 break; 2226 default: 2227 break; 2228 } 2229 2230 return ret; 2231 } 2232 2233 static int bcm_sysport_set_rxnfc(struct net_device *dev, 2234 struct ethtool_rxnfc *nfc) 2235 { 2236 struct bcm_sysport_priv *priv = netdev_priv(dev); 2237 int ret = -EOPNOTSUPP; 2238 2239 switch (nfc->cmd) { 2240 case ETHTOOL_SRXCLSRLINS: 2241 ret = bcm_sysport_rule_set(priv, nfc); 2242 break; 2243 case ETHTOOL_SRXCLSRLDEL: 2244 ret = bcm_sysport_rule_del(priv, nfc->fs.location); 2245 break; 2246 default: 2247 break; 2248 } 2249 2250 return ret; 2251 } 2252 2253 static const struct ethtool_ops bcm_sysport_ethtool_ops = { 2254 .get_drvinfo = bcm_sysport_get_drvinfo, 2255 .get_msglevel = bcm_sysport_get_msglvl, 2256 .set_msglevel = bcm_sysport_set_msglvl, 2257 .get_link = ethtool_op_get_link, 2258 .get_strings = bcm_sysport_get_strings, 2259 .get_ethtool_stats = bcm_sysport_get_stats, 2260 .get_sset_count = bcm_sysport_get_sset_count, 2261 .get_wol = bcm_sysport_get_wol, 2262 .set_wol = bcm_sysport_set_wol, 2263 .get_coalesce = bcm_sysport_get_coalesce, 2264 .set_coalesce = bcm_sysport_set_coalesce, 2265 .get_link_ksettings = phy_ethtool_get_link_ksettings, 2266 .set_link_ksettings = phy_ethtool_set_link_ksettings, 2267 .get_rxnfc = bcm_sysport_get_rxnfc, 2268 .set_rxnfc = bcm_sysport_set_rxnfc, 2269 }; 2270 2271 static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, 2272 struct net_device *sb_dev, 2273 select_queue_fallback_t fallback) 2274 { 2275 struct bcm_sysport_priv *priv = netdev_priv(dev); 2276 u16 queue = skb_get_queue_mapping(skb); 2277 struct bcm_sysport_tx_ring *tx_ring; 2278 unsigned int q, port; 2279 2280 if (!netdev_uses_dsa(dev)) 2281 return fallback(dev, skb, NULL); 2282 2283 /* DSA tagging layer will have configured the correct queue */ 2284 q = BRCM_TAG_GET_QUEUE(queue); 2285 port = BRCM_TAG_GET_PORT(queue); 2286 tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues]; 2287 2288 if (unlikely(!tx_ring)) 2289 return fallback(dev, skb, NULL); 2290 2291 return tx_ring->index; 2292 } 2293 2294 static const struct net_device_ops bcm_sysport_netdev_ops = { 2295 .ndo_start_xmit = bcm_sysport_xmit, 2296 .ndo_tx_timeout = bcm_sysport_tx_timeout, 2297 .ndo_open = bcm_sysport_open, 2298 .ndo_stop = bcm_sysport_stop, 2299 .ndo_set_features = bcm_sysport_set_features, 2300 .ndo_set_rx_mode = bcm_sysport_set_rx_mode, 2301 .ndo_set_mac_address = bcm_sysport_change_mac, 2302 #ifdef CONFIG_NET_POLL_CONTROLLER 2303 .ndo_poll_controller = bcm_sysport_poll_controller, 2304 #endif 2305 .ndo_get_stats64 = bcm_sysport_get_stats64, 2306 .ndo_select_queue = bcm_sysport_select_queue, 2307 }; 2308 2309 static int bcm_sysport_map_queues(struct notifier_block *nb, 2310 struct dsa_notifier_register_info *info) 2311 { 2312 struct bcm_sysport_tx_ring *ring; 2313 struct bcm_sysport_priv *priv; 2314 struct net_device *slave_dev; 2315 unsigned int num_tx_queues; 2316 unsigned int q, start, port; 2317 struct net_device *dev; 2318 2319 priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); 2320 if (priv->netdev != info->master) 2321 return 0; 2322 2323 dev = info->master; 2324 2325 /* We can't be setting up queue inspection for non directly attached 2326 * switches 2327 */ 2328 if (info->switch_number) 2329 return 0; 2330 2331 if (dev->netdev_ops != &bcm_sysport_netdev_ops) 2332 return 0; 2333 2334 port = info->port_number; 2335 slave_dev = info->info.dev; 2336 2337 /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a 2338 * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of 2339 * per-port (slave_dev) network devices queue, we achieve just that. 2340 * This need to happen now before any slave network device is used such 2341 * it accurately reflects the number of real TX queues. 2342 */ 2343 if (priv->is_lite) 2344 netif_set_real_num_tx_queues(slave_dev, 2345 slave_dev->num_tx_queues / 2); 2346 2347 num_tx_queues = slave_dev->real_num_tx_queues; 2348 2349 if (priv->per_port_num_tx_queues && 2350 priv->per_port_num_tx_queues != num_tx_queues) 2351 netdev_warn(slave_dev, "asymmetric number of per-port queues\n"); 2352 2353 priv->per_port_num_tx_queues = num_tx_queues; 2354 2355 start = find_first_zero_bit(&priv->queue_bitmap, dev->num_tx_queues); 2356 for (q = 0; q < num_tx_queues; q++) { 2357 ring = &priv->tx_rings[q + start]; 2358 2359 /* Just remember the mapping actual programming done 2360 * during bcm_sysport_init_tx_ring 2361 */ 2362 ring->switch_queue = q; 2363 ring->switch_port = port; 2364 ring->inspect = true; 2365 priv->ring_map[q + port * num_tx_queues] = ring; 2366 2367 /* Set all queues as being used now */ 2368 set_bit(q + start, &priv->queue_bitmap); 2369 } 2370 2371 return 0; 2372 } 2373 2374 static int bcm_sysport_dsa_notifier(struct notifier_block *nb, 2375 unsigned long event, void *ptr) 2376 { 2377 struct dsa_notifier_register_info *info; 2378 2379 if (event != DSA_PORT_REGISTER) 2380 return NOTIFY_DONE; 2381 2382 info = ptr; 2383 2384 return notifier_from_errno(bcm_sysport_map_queues(nb, info)); 2385 } 2386 2387 #define REV_FMT "v%2x.%02x" 2388 2389 static const struct bcm_sysport_hw_params bcm_sysport_params[] = { 2390 [SYSTEMPORT] = { 2391 .is_lite = false, 2392 .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS, 2393 }, 2394 [SYSTEMPORT_LITE] = { 2395 .is_lite = true, 2396 .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS, 2397 }, 2398 }; 2399 2400 static const struct of_device_id bcm_sysport_of_match[] = { 2401 { .compatible = "brcm,systemportlite-v1.00", 2402 .data = &bcm_sysport_params[SYSTEMPORT_LITE] }, 2403 { .compatible = "brcm,systemport-v1.00", 2404 .data = &bcm_sysport_params[SYSTEMPORT] }, 2405 { .compatible = "brcm,systemport", 2406 .data = &bcm_sysport_params[SYSTEMPORT] }, 2407 { /* sentinel */ } 2408 }; 2409 MODULE_DEVICE_TABLE(of, bcm_sysport_of_match); 2410 2411 static int bcm_sysport_probe(struct platform_device *pdev) 2412 { 2413 const struct bcm_sysport_hw_params *params; 2414 const struct of_device_id *of_id = NULL; 2415 struct bcm_sysport_priv *priv; 2416 struct device_node *dn; 2417 struct net_device *dev; 2418 const void *macaddr; 2419 struct resource *r; 2420 u32 txq, rxq; 2421 int ret; 2422 2423 dn = pdev->dev.of_node; 2424 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2425 of_id = of_match_node(bcm_sysport_of_match, dn); 2426 if (!of_id || !of_id->data) 2427 return -EINVAL; 2428 2429 /* Fairly quickly we need to know the type of adapter we have */ 2430 params = of_id->data; 2431 2432 /* Read the Transmit/Receive Queue properties */ 2433 if (of_property_read_u32(dn, "systemport,num-txq", &txq)) 2434 txq = TDMA_NUM_RINGS; 2435 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq)) 2436 rxq = 1; 2437 2438 /* Sanity check the number of transmit queues */ 2439 if (!txq || txq > TDMA_NUM_RINGS) 2440 return -EINVAL; 2441 2442 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq); 2443 if (!dev) 2444 return -ENOMEM; 2445 2446 /* Initialize private members */ 2447 priv = netdev_priv(dev); 2448 2449 /* Allocate number of TX rings */ 2450 priv->tx_rings = devm_kcalloc(&pdev->dev, txq, 2451 sizeof(struct bcm_sysport_tx_ring), 2452 GFP_KERNEL); 2453 if (!priv->tx_rings) 2454 return -ENOMEM; 2455 2456 priv->is_lite = params->is_lite; 2457 priv->num_rx_desc_words = params->num_rx_desc_words; 2458 2459 priv->irq0 = platform_get_irq(pdev, 0); 2460 if (!priv->is_lite) { 2461 priv->irq1 = platform_get_irq(pdev, 1); 2462 priv->wol_irq = platform_get_irq(pdev, 2); 2463 } else { 2464 priv->wol_irq = platform_get_irq(pdev, 1); 2465 } 2466 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { 2467 dev_err(&pdev->dev, "invalid interrupts\n"); 2468 ret = -EINVAL; 2469 goto err_free_netdev; 2470 } 2471 2472 priv->base = devm_ioremap_resource(&pdev->dev, r); 2473 if (IS_ERR(priv->base)) { 2474 ret = PTR_ERR(priv->base); 2475 goto err_free_netdev; 2476 } 2477 2478 priv->netdev = dev; 2479 priv->pdev = pdev; 2480 2481 priv->phy_interface = of_get_phy_mode(dn); 2482 /* Default to GMII interface mode */ 2483 if (priv->phy_interface < 0) 2484 priv->phy_interface = PHY_INTERFACE_MODE_GMII; 2485 2486 /* In the case of a fixed PHY, the DT node associated 2487 * to the PHY is the Ethernet MAC DT node. 2488 */ 2489 if (of_phy_is_fixed_link(dn)) { 2490 ret = of_phy_register_fixed_link(dn); 2491 if (ret) { 2492 dev_err(&pdev->dev, "failed to register fixed PHY\n"); 2493 goto err_free_netdev; 2494 } 2495 2496 priv->phy_dn = dn; 2497 } 2498 2499 /* Initialize netdevice members */ 2500 macaddr = of_get_mac_address(dn); 2501 if (!macaddr || !is_valid_ether_addr(macaddr)) { 2502 dev_warn(&pdev->dev, "using random Ethernet MAC\n"); 2503 eth_hw_addr_random(dev); 2504 } else { 2505 ether_addr_copy(dev->dev_addr, macaddr); 2506 } 2507 2508 SET_NETDEV_DEV(dev, &pdev->dev); 2509 dev_set_drvdata(&pdev->dev, dev); 2510 dev->ethtool_ops = &bcm_sysport_ethtool_ops; 2511 dev->netdev_ops = &bcm_sysport_netdev_ops; 2512 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64); 2513 2514 /* HW supported features, none enabled by default */ 2515 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | 2516 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2517 2518 /* Request the WOL interrupt and advertise suspend if available */ 2519 priv->wol_irq_disabled = 1; 2520 ret = devm_request_irq(&pdev->dev, priv->wol_irq, 2521 bcm_sysport_wol_isr, 0, dev->name, priv); 2522 if (!ret) 2523 device_set_wakeup_capable(&pdev->dev, 1); 2524 2525 /* Set the needed headroom once and for all */ 2526 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); 2527 dev->needed_headroom += sizeof(struct bcm_tsb); 2528 2529 /* libphy will adjust the link state accordingly */ 2530 netif_carrier_off(dev); 2531 2532 priv->rx_max_coalesced_frames = 1; 2533 u64_stats_init(&priv->syncp); 2534 2535 priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier; 2536 2537 ret = register_dsa_notifier(&priv->dsa_notifier); 2538 if (ret) { 2539 dev_err(&pdev->dev, "failed to register DSA notifier\n"); 2540 goto err_deregister_fixed_link; 2541 } 2542 2543 ret = register_netdev(dev); 2544 if (ret) { 2545 dev_err(&pdev->dev, "failed to register net_device\n"); 2546 goto err_deregister_notifier; 2547 } 2548 2549 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; 2550 dev_info(&pdev->dev, 2551 "Broadcom SYSTEMPORT%s" REV_FMT 2552 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", 2553 priv->is_lite ? " Lite" : "", 2554 (priv->rev >> 8) & 0xff, priv->rev & 0xff, 2555 priv->base, priv->irq0, priv->irq1, txq, rxq); 2556 2557 return 0; 2558 2559 err_deregister_notifier: 2560 unregister_dsa_notifier(&priv->dsa_notifier); 2561 err_deregister_fixed_link: 2562 if (of_phy_is_fixed_link(dn)) 2563 of_phy_deregister_fixed_link(dn); 2564 err_free_netdev: 2565 free_netdev(dev); 2566 return ret; 2567 } 2568 2569 static int bcm_sysport_remove(struct platform_device *pdev) 2570 { 2571 struct net_device *dev = dev_get_drvdata(&pdev->dev); 2572 struct bcm_sysport_priv *priv = netdev_priv(dev); 2573 struct device_node *dn = pdev->dev.of_node; 2574 2575 /* Not much to do, ndo_close has been called 2576 * and we use managed allocations 2577 */ 2578 unregister_dsa_notifier(&priv->dsa_notifier); 2579 unregister_netdev(dev); 2580 if (of_phy_is_fixed_link(dn)) 2581 of_phy_deregister_fixed_link(dn); 2582 free_netdev(dev); 2583 dev_set_drvdata(&pdev->dev, NULL); 2584 2585 return 0; 2586 } 2587 2588 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) 2589 { 2590 struct net_device *ndev = priv->netdev; 2591 unsigned int timeout = 1000; 2592 unsigned int index, i = 0; 2593 u32 reg; 2594 2595 /* Password has already been programmed */ 2596 reg = umac_readl(priv, UMAC_MPD_CTRL); 2597 if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) 2598 reg |= MPD_EN; 2599 reg &= ~PSW_EN; 2600 if (priv->wolopts & WAKE_MAGICSECURE) 2601 reg |= PSW_EN; 2602 umac_writel(priv, reg, UMAC_MPD_CTRL); 2603 2604 if (priv->wolopts & WAKE_FILTER) { 2605 /* Turn on ACPI matching to steal packets from RBUF */ 2606 reg = rbuf_readl(priv, RBUF_CONTROL); 2607 if (priv->is_lite) 2608 reg |= RBUF_ACPI_EN_LITE; 2609 else 2610 reg |= RBUF_ACPI_EN; 2611 rbuf_writel(priv, reg, RBUF_CONTROL); 2612 2613 /* Enable RXCHK, active filters and Broadcom tag matching */ 2614 reg = rxchk_readl(priv, RXCHK_CONTROL); 2615 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK << 2616 RXCHK_BRCM_TAG_MATCH_SHIFT); 2617 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { 2618 reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i); 2619 i++; 2620 } 2621 reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN; 2622 rxchk_writel(priv, reg, RXCHK_CONTROL); 2623 } 2624 2625 /* Make sure RBUF entered WoL mode as result */ 2626 do { 2627 reg = rbuf_readl(priv, RBUF_STATUS); 2628 if (reg & RBUF_WOL_MODE) 2629 break; 2630 2631 udelay(10); 2632 } while (timeout-- > 0); 2633 2634 /* Do not leave the UniMAC RBUF matching only MPD packets */ 2635 if (!timeout) { 2636 mpd_enable_set(priv, false); 2637 netif_err(priv, wol, ndev, "failed to enter WOL mode\n"); 2638 return -ETIMEDOUT; 2639 } 2640 2641 /* UniMAC receive needs to be turned on */ 2642 umac_enable_set(priv, CMD_RX_EN, 1); 2643 2644 /* Enable the interrupt wake-up source */ 2645 intrl2_0_mask_clear(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG); 2646 2647 netif_dbg(priv, wol, ndev, "entered WOL mode\n"); 2648 2649 return 0; 2650 } 2651 2652 static int __maybe_unused bcm_sysport_suspend(struct device *d) 2653 { 2654 struct net_device *dev = dev_get_drvdata(d); 2655 struct bcm_sysport_priv *priv = netdev_priv(dev); 2656 unsigned int i; 2657 int ret = 0; 2658 u32 reg; 2659 2660 if (!netif_running(dev)) 2661 return 0; 2662 2663 bcm_sysport_netif_stop(dev); 2664 2665 phy_suspend(dev->phydev); 2666 2667 netif_device_detach(dev); 2668 2669 /* Disable UniMAC RX */ 2670 umac_enable_set(priv, CMD_RX_EN, 0); 2671 2672 ret = rdma_enable_set(priv, 0); 2673 if (ret) { 2674 netdev_err(dev, "RDMA timeout!\n"); 2675 return ret; 2676 } 2677 2678 /* Disable RXCHK if enabled */ 2679 if (priv->rx_chk_en) { 2680 reg = rxchk_readl(priv, RXCHK_CONTROL); 2681 reg &= ~RXCHK_EN; 2682 rxchk_writel(priv, reg, RXCHK_CONTROL); 2683 } 2684 2685 /* Flush RX pipe */ 2686 if (!priv->wolopts) 2687 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); 2688 2689 ret = tdma_enable_set(priv, 0); 2690 if (ret) { 2691 netdev_err(dev, "TDMA timeout!\n"); 2692 return ret; 2693 } 2694 2695 /* Wait for a packet boundary */ 2696 usleep_range(2000, 3000); 2697 2698 umac_enable_set(priv, CMD_TX_EN, 0); 2699 2700 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); 2701 2702 /* Free RX/TX rings SW structures */ 2703 for (i = 0; i < dev->num_tx_queues; i++) 2704 bcm_sysport_fini_tx_ring(priv, i); 2705 bcm_sysport_fini_rx_ring(priv); 2706 2707 /* Get prepared for Wake-on-LAN */ 2708 if (device_may_wakeup(d) && priv->wolopts) 2709 ret = bcm_sysport_suspend_to_wol(priv); 2710 2711 return ret; 2712 } 2713 2714 static int __maybe_unused bcm_sysport_resume(struct device *d) 2715 { 2716 struct net_device *dev = dev_get_drvdata(d); 2717 struct bcm_sysport_priv *priv = netdev_priv(dev); 2718 unsigned int i; 2719 u32 reg; 2720 int ret; 2721 2722 if (!netif_running(dev)) 2723 return 0; 2724 2725 umac_reset(priv); 2726 2727 /* We may have been suspended and never received a WOL event that 2728 * would turn off MPD detection, take care of that now 2729 */ 2730 bcm_sysport_resume_from_wol(priv); 2731 2732 /* Initialize both hardware and software ring */ 2733 for (i = 0; i < dev->num_tx_queues; i++) { 2734 ret = bcm_sysport_init_tx_ring(priv, i); 2735 if (ret) { 2736 netdev_err(dev, "failed to initialize TX ring %d\n", 2737 i); 2738 goto out_free_tx_rings; 2739 } 2740 } 2741 2742 /* Initialize linked-list */ 2743 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); 2744 2745 /* Initialize RX ring */ 2746 ret = bcm_sysport_init_rx_ring(priv); 2747 if (ret) { 2748 netdev_err(dev, "failed to initialize RX ring\n"); 2749 goto out_free_rx_ring; 2750 } 2751 2752 netif_device_attach(dev); 2753 2754 /* RX pipe enable */ 2755 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 2756 2757 ret = rdma_enable_set(priv, 1); 2758 if (ret) { 2759 netdev_err(dev, "failed to enable RDMA\n"); 2760 goto out_free_rx_ring; 2761 } 2762 2763 /* Enable rxhck */ 2764 if (priv->rx_chk_en) { 2765 reg = rxchk_readl(priv, RXCHK_CONTROL); 2766 reg |= RXCHK_EN; 2767 rxchk_writel(priv, reg, RXCHK_CONTROL); 2768 } 2769 2770 rbuf_init(priv); 2771 2772 /* Set maximum frame length */ 2773 if (!priv->is_lite) 2774 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 2775 else 2776 gib_set_pad_extension(priv); 2777 2778 /* Set MAC address */ 2779 umac_set_hw_addr(priv, dev->dev_addr); 2780 2781 umac_enable_set(priv, CMD_RX_EN, 1); 2782 2783 /* TX pipe enable */ 2784 topctrl_writel(priv, 0, TX_FLUSH_CNTL); 2785 2786 umac_enable_set(priv, CMD_TX_EN, 1); 2787 2788 ret = tdma_enable_set(priv, 1); 2789 if (ret) { 2790 netdev_err(dev, "TDMA timeout!\n"); 2791 goto out_free_rx_ring; 2792 } 2793 2794 phy_resume(dev->phydev); 2795 2796 bcm_sysport_netif_start(dev); 2797 2798 return 0; 2799 2800 out_free_rx_ring: 2801 bcm_sysport_fini_rx_ring(priv); 2802 out_free_tx_rings: 2803 for (i = 0; i < dev->num_tx_queues; i++) 2804 bcm_sysport_fini_tx_ring(priv, i); 2805 return ret; 2806 } 2807 2808 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops, 2809 bcm_sysport_suspend, bcm_sysport_resume); 2810 2811 static struct platform_driver bcm_sysport_driver = { 2812 .probe = bcm_sysport_probe, 2813 .remove = bcm_sysport_remove, 2814 .driver = { 2815 .name = "brcm-systemport", 2816 .of_match_table = bcm_sysport_of_match, 2817 .pm = &bcm_sysport_pm_ops, 2818 }, 2819 }; 2820 module_platform_driver(bcm_sysport_driver); 2821 2822 MODULE_AUTHOR("Broadcom Corporation"); 2823 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver"); 2824 MODULE_ALIAS("platform:brcm-systemport"); 2825 MODULE_LICENSE("GPL"); 2826