1adfc5217SJeff Kirsher /* 2adfc5217SJeff Kirsher * Driver for BCM963xx builtin Ethernet mac 3adfc5217SJeff Kirsher * 4adfc5217SJeff Kirsher * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> 5adfc5217SJeff Kirsher * 6adfc5217SJeff Kirsher * This program is free software; you can redistribute it and/or modify 7adfc5217SJeff Kirsher * it under the terms of the GNU General Public License as published by 8adfc5217SJeff Kirsher * the Free Software Foundation; either version 2 of the License, or 9adfc5217SJeff Kirsher * (at your option) any later version. 10adfc5217SJeff Kirsher * 11adfc5217SJeff Kirsher * This program is distributed in the hope that it will be useful, 12adfc5217SJeff Kirsher * but WITHOUT ANY WARRANTY; without even the implied warranty of 13adfc5217SJeff Kirsher * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14adfc5217SJeff Kirsher * GNU General Public License for more details. 15adfc5217SJeff Kirsher * 16adfc5217SJeff Kirsher * You should have received a copy of the GNU General Public License 17adfc5217SJeff Kirsher * along with this program; if not, write to the Free Software 18adfc5217SJeff Kirsher * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19adfc5217SJeff Kirsher */ 20adfc5217SJeff Kirsher #include <linux/init.h> 21adfc5217SJeff Kirsher #include <linux/interrupt.h> 22adfc5217SJeff Kirsher #include <linux/module.h> 23adfc5217SJeff Kirsher #include <linux/clk.h> 24adfc5217SJeff Kirsher #include <linux/etherdevice.h> 25adfc5217SJeff Kirsher #include <linux/slab.h> 26adfc5217SJeff Kirsher #include <linux/delay.h> 27adfc5217SJeff Kirsher #include <linux/ethtool.h> 28adfc5217SJeff Kirsher #include <linux/crc32.h> 29adfc5217SJeff Kirsher #include <linux/err.h> 30adfc5217SJeff Kirsher #include <linux/dma-mapping.h> 31adfc5217SJeff Kirsher #include <linux/platform_device.h> 32adfc5217SJeff Kirsher #include <linux/if_vlan.h> 33adfc5217SJeff Kirsher 34adfc5217SJeff Kirsher #include <bcm63xx_dev_enet.h> 35adfc5217SJeff Kirsher #include "bcm63xx_enet.h" 36adfc5217SJeff Kirsher 37adfc5217SJeff Kirsher static char bcm_enet_driver_name[] = "bcm63xx_enet"; 38adfc5217SJeff Kirsher static char bcm_enet_driver_version[] = "1.0"; 39adfc5217SJeff Kirsher 40adfc5217SJeff Kirsher static int copybreak __read_mostly = 128; 41adfc5217SJeff Kirsher module_param(copybreak, int, 0); 42adfc5217SJeff Kirsher MODULE_PARM_DESC(copybreak, "Receive copy threshold"); 43adfc5217SJeff Kirsher 440ae99b5fSMaxime Bizon /* io registers memory shared between all devices */ 450ae99b5fSMaxime Bizon static void __iomem *bcm_enet_shared_base[3]; 46adfc5217SJeff Kirsher 47adfc5217SJeff Kirsher /* 48adfc5217SJeff Kirsher * io helpers to access mac registers 49adfc5217SJeff Kirsher */ 50adfc5217SJeff Kirsher static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off) 51adfc5217SJeff Kirsher { 52adfc5217SJeff Kirsher return bcm_readl(priv->base + off); 53adfc5217SJeff Kirsher } 54adfc5217SJeff Kirsher 55adfc5217SJeff Kirsher static inline void enet_writel(struct bcm_enet_priv *priv, 56adfc5217SJeff Kirsher u32 val, u32 off) 57adfc5217SJeff Kirsher { 58adfc5217SJeff Kirsher bcm_writel(val, priv->base + off); 59adfc5217SJeff Kirsher } 60adfc5217SJeff Kirsher 61adfc5217SJeff Kirsher /* 626f00a022SMaxime Bizon * io helpers to access switch registers 63adfc5217SJeff Kirsher */ 646f00a022SMaxime Bizon static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off) 656f00a022SMaxime Bizon { 666f00a022SMaxime Bizon return bcm_readl(priv->base + off); 676f00a022SMaxime Bizon } 686f00a022SMaxime Bizon 696f00a022SMaxime Bizon static inline void enetsw_writel(struct bcm_enet_priv *priv, 706f00a022SMaxime Bizon u32 val, u32 off) 716f00a022SMaxime Bizon { 726f00a022SMaxime Bizon bcm_writel(val, priv->base + off); 736f00a022SMaxime Bizon } 746f00a022SMaxime Bizon 756f00a022SMaxime Bizon static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off) 766f00a022SMaxime Bizon { 776f00a022SMaxime Bizon return bcm_readw(priv->base + off); 786f00a022SMaxime Bizon } 796f00a022SMaxime Bizon 806f00a022SMaxime Bizon static inline void enetsw_writew(struct bcm_enet_priv *priv, 816f00a022SMaxime Bizon u16 val, u32 off) 826f00a022SMaxime Bizon { 836f00a022SMaxime Bizon bcm_writew(val, priv->base + off); 846f00a022SMaxime Bizon } 856f00a022SMaxime Bizon 866f00a022SMaxime Bizon static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off) 876f00a022SMaxime Bizon { 886f00a022SMaxime Bizon return bcm_readb(priv->base + off); 896f00a022SMaxime Bizon } 906f00a022SMaxime Bizon 916f00a022SMaxime Bizon static inline void enetsw_writeb(struct bcm_enet_priv *priv, 926f00a022SMaxime Bizon u8 val, u32 off) 936f00a022SMaxime Bizon { 946f00a022SMaxime Bizon bcm_writeb(val, priv->base + off); 956f00a022SMaxime Bizon } 966f00a022SMaxime Bizon 976f00a022SMaxime Bizon 986f00a022SMaxime Bizon /* io helpers to access shared registers */ 99adfc5217SJeff Kirsher static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) 100adfc5217SJeff Kirsher { 1010ae99b5fSMaxime Bizon return bcm_readl(bcm_enet_shared_base[0] + off); 102adfc5217SJeff Kirsher } 103adfc5217SJeff Kirsher 104adfc5217SJeff Kirsher static inline void enet_dma_writel(struct bcm_enet_priv *priv, 105adfc5217SJeff Kirsher u32 val, u32 off) 106adfc5217SJeff Kirsher { 1070ae99b5fSMaxime Bizon bcm_writel(val, bcm_enet_shared_base[0] + off); 1080ae99b5fSMaxime Bizon } 1090ae99b5fSMaxime Bizon 1103dc6475cSFlorian Fainelli static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan) 1110ae99b5fSMaxime Bizon { 1123dc6475cSFlorian Fainelli return bcm_readl(bcm_enet_shared_base[1] + 1133dc6475cSFlorian Fainelli bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); 1140ae99b5fSMaxime Bizon } 1150ae99b5fSMaxime Bizon 1160ae99b5fSMaxime Bizon static inline void enet_dmac_writel(struct bcm_enet_priv *priv, 1173dc6475cSFlorian Fainelli u32 val, u32 off, int chan) 1180ae99b5fSMaxime Bizon { 1193dc6475cSFlorian Fainelli bcm_writel(val, bcm_enet_shared_base[1] + 1203dc6475cSFlorian Fainelli bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); 1210ae99b5fSMaxime Bizon } 1220ae99b5fSMaxime Bizon 1233dc6475cSFlorian Fainelli static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan) 1240ae99b5fSMaxime Bizon { 1253dc6475cSFlorian Fainelli return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); 1260ae99b5fSMaxime Bizon } 1270ae99b5fSMaxime Bizon 1280ae99b5fSMaxime Bizon static inline void enet_dmas_writel(struct bcm_enet_priv *priv, 1293dc6475cSFlorian Fainelli u32 val, u32 off, int chan) 1300ae99b5fSMaxime Bizon { 1313dc6475cSFlorian Fainelli bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); 132adfc5217SJeff Kirsher } 133adfc5217SJeff Kirsher 134adfc5217SJeff Kirsher /* 135adfc5217SJeff Kirsher * write given data into mii register and wait for transfer to end 136adfc5217SJeff Kirsher * with timeout (average measured transfer time is 25us) 137adfc5217SJeff Kirsher */ 138adfc5217SJeff Kirsher static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data) 139adfc5217SJeff Kirsher { 140adfc5217SJeff Kirsher int limit; 141adfc5217SJeff Kirsher 142adfc5217SJeff Kirsher /* make sure mii interrupt status is cleared */ 143adfc5217SJeff Kirsher enet_writel(priv, ENET_IR_MII, ENET_IR_REG); 144adfc5217SJeff Kirsher 145adfc5217SJeff Kirsher enet_writel(priv, data, ENET_MIIDATA_REG); 146adfc5217SJeff Kirsher wmb(); 147adfc5217SJeff Kirsher 148adfc5217SJeff Kirsher /* busy wait on mii interrupt bit, with timeout */ 149adfc5217SJeff Kirsher limit = 1000; 150adfc5217SJeff Kirsher do { 151adfc5217SJeff Kirsher if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) 152adfc5217SJeff Kirsher break; 153adfc5217SJeff Kirsher udelay(1); 154adfc5217SJeff Kirsher } while (limit-- > 0); 155adfc5217SJeff Kirsher 156adfc5217SJeff Kirsher return (limit < 0) ? 1 : 0; 157adfc5217SJeff Kirsher } 158adfc5217SJeff Kirsher 159adfc5217SJeff Kirsher /* 160adfc5217SJeff Kirsher * MII internal read callback 161adfc5217SJeff Kirsher */ 162adfc5217SJeff Kirsher static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id, 163adfc5217SJeff Kirsher int regnum) 164adfc5217SJeff Kirsher { 165adfc5217SJeff Kirsher u32 tmp, val; 166adfc5217SJeff Kirsher 167adfc5217SJeff Kirsher tmp = regnum << ENET_MIIDATA_REG_SHIFT; 168adfc5217SJeff Kirsher tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; 169adfc5217SJeff Kirsher tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; 170adfc5217SJeff Kirsher tmp |= ENET_MIIDATA_OP_READ_MASK; 171adfc5217SJeff Kirsher 172adfc5217SJeff Kirsher if (do_mdio_op(priv, tmp)) 173adfc5217SJeff Kirsher return -1; 174adfc5217SJeff Kirsher 175adfc5217SJeff Kirsher val = enet_readl(priv, ENET_MIIDATA_REG); 176adfc5217SJeff Kirsher val &= 0xffff; 177adfc5217SJeff Kirsher return val; 178adfc5217SJeff Kirsher } 179adfc5217SJeff Kirsher 180adfc5217SJeff Kirsher /* 181adfc5217SJeff Kirsher * MII internal write callback 182adfc5217SJeff Kirsher */ 183adfc5217SJeff Kirsher static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id, 184adfc5217SJeff Kirsher int regnum, u16 value) 185adfc5217SJeff Kirsher { 186adfc5217SJeff Kirsher u32 tmp; 187adfc5217SJeff Kirsher 188adfc5217SJeff Kirsher tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT; 189adfc5217SJeff Kirsher tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; 190adfc5217SJeff Kirsher tmp |= regnum << ENET_MIIDATA_REG_SHIFT; 191adfc5217SJeff Kirsher tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; 192adfc5217SJeff Kirsher tmp |= ENET_MIIDATA_OP_WRITE_MASK; 193adfc5217SJeff Kirsher 194adfc5217SJeff Kirsher (void)do_mdio_op(priv, tmp); 195adfc5217SJeff Kirsher return 0; 196adfc5217SJeff Kirsher } 197adfc5217SJeff Kirsher 198adfc5217SJeff Kirsher /* 199adfc5217SJeff Kirsher * MII read callback from phylib 200adfc5217SJeff Kirsher */ 201adfc5217SJeff Kirsher static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id, 202adfc5217SJeff Kirsher int regnum) 203adfc5217SJeff Kirsher { 204adfc5217SJeff Kirsher return bcm_enet_mdio_read(bus->priv, mii_id, regnum); 205adfc5217SJeff Kirsher } 206adfc5217SJeff Kirsher 207adfc5217SJeff Kirsher /* 208adfc5217SJeff Kirsher * MII write callback from phylib 209adfc5217SJeff Kirsher */ 210adfc5217SJeff Kirsher static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id, 211adfc5217SJeff Kirsher int regnum, u16 value) 212adfc5217SJeff Kirsher { 213adfc5217SJeff Kirsher return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value); 214adfc5217SJeff Kirsher } 215adfc5217SJeff Kirsher 216adfc5217SJeff Kirsher /* 217adfc5217SJeff Kirsher * MII read callback from mii core 218adfc5217SJeff Kirsher */ 219adfc5217SJeff Kirsher static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id, 220adfc5217SJeff Kirsher int regnum) 221adfc5217SJeff Kirsher { 222adfc5217SJeff Kirsher return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum); 223adfc5217SJeff Kirsher } 224adfc5217SJeff Kirsher 225adfc5217SJeff Kirsher /* 226adfc5217SJeff Kirsher * MII write callback from mii core 227adfc5217SJeff Kirsher */ 228adfc5217SJeff Kirsher static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id, 229adfc5217SJeff Kirsher int regnum, int value) 230adfc5217SJeff Kirsher { 231adfc5217SJeff Kirsher bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value); 232adfc5217SJeff Kirsher } 233adfc5217SJeff Kirsher 234adfc5217SJeff Kirsher /* 235adfc5217SJeff Kirsher * refill rx queue 236adfc5217SJeff Kirsher */ 237adfc5217SJeff Kirsher static int bcm_enet_refill_rx(struct net_device *dev) 238adfc5217SJeff Kirsher { 239adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 240adfc5217SJeff Kirsher 241adfc5217SJeff Kirsher priv = netdev_priv(dev); 242adfc5217SJeff Kirsher 243adfc5217SJeff Kirsher while (priv->rx_desc_count < priv->rx_ring_size) { 244adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 245adfc5217SJeff Kirsher struct sk_buff *skb; 246adfc5217SJeff Kirsher dma_addr_t p; 247adfc5217SJeff Kirsher int desc_idx; 248adfc5217SJeff Kirsher u32 len_stat; 249adfc5217SJeff Kirsher 250adfc5217SJeff Kirsher desc_idx = priv->rx_dirty_desc; 251adfc5217SJeff Kirsher desc = &priv->rx_desc_cpu[desc_idx]; 252adfc5217SJeff Kirsher 253adfc5217SJeff Kirsher if (!priv->rx_skb[desc_idx]) { 254adfc5217SJeff Kirsher skb = netdev_alloc_skb(dev, priv->rx_skb_size); 255adfc5217SJeff Kirsher if (!skb) 256adfc5217SJeff Kirsher break; 257adfc5217SJeff Kirsher priv->rx_skb[desc_idx] = skb; 258adfc5217SJeff Kirsher p = dma_map_single(&priv->pdev->dev, skb->data, 259adfc5217SJeff Kirsher priv->rx_skb_size, 260adfc5217SJeff Kirsher DMA_FROM_DEVICE); 261adfc5217SJeff Kirsher desc->address = p; 262adfc5217SJeff Kirsher } 263adfc5217SJeff Kirsher 264adfc5217SJeff Kirsher len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; 265adfc5217SJeff Kirsher len_stat |= DMADESC_OWNER_MASK; 266adfc5217SJeff Kirsher if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { 2673dc6475cSFlorian Fainelli len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); 268adfc5217SJeff Kirsher priv->rx_dirty_desc = 0; 269adfc5217SJeff Kirsher } else { 270adfc5217SJeff Kirsher priv->rx_dirty_desc++; 271adfc5217SJeff Kirsher } 272adfc5217SJeff Kirsher wmb(); 273adfc5217SJeff Kirsher desc->len_stat = len_stat; 274adfc5217SJeff Kirsher 275adfc5217SJeff Kirsher priv->rx_desc_count++; 276adfc5217SJeff Kirsher 277adfc5217SJeff Kirsher /* tell dma engine we allocated one buffer */ 2783dc6475cSFlorian Fainelli if (priv->dma_has_sram) 279adfc5217SJeff Kirsher enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); 2803dc6475cSFlorian Fainelli else 2813dc6475cSFlorian Fainelli enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan); 282adfc5217SJeff Kirsher } 283adfc5217SJeff Kirsher 284adfc5217SJeff Kirsher /* If rx ring is still empty, set a timer to try allocating 285adfc5217SJeff Kirsher * again at a later time. */ 286adfc5217SJeff Kirsher if (priv->rx_desc_count == 0 && netif_running(dev)) { 287adfc5217SJeff Kirsher dev_warn(&priv->pdev->dev, "unable to refill rx ring\n"); 288adfc5217SJeff Kirsher priv->rx_timeout.expires = jiffies + HZ; 289adfc5217SJeff Kirsher add_timer(&priv->rx_timeout); 290adfc5217SJeff Kirsher } 291adfc5217SJeff Kirsher 292adfc5217SJeff Kirsher return 0; 293adfc5217SJeff Kirsher } 294adfc5217SJeff Kirsher 295adfc5217SJeff Kirsher /* 296adfc5217SJeff Kirsher * timer callback to defer refill rx queue in case we're OOM 297adfc5217SJeff Kirsher */ 298eb8c6b5bSKees Cook static void bcm_enet_refill_rx_timer(struct timer_list *t) 299adfc5217SJeff Kirsher { 300eb8c6b5bSKees Cook struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout); 301eb8c6b5bSKees Cook struct net_device *dev = priv->net_dev; 302adfc5217SJeff Kirsher 303adfc5217SJeff Kirsher spin_lock(&priv->rx_lock); 304eb8c6b5bSKees Cook bcm_enet_refill_rx(dev); 305adfc5217SJeff Kirsher spin_unlock(&priv->rx_lock); 306adfc5217SJeff Kirsher } 307adfc5217SJeff Kirsher 308adfc5217SJeff Kirsher /* 309adfc5217SJeff Kirsher * extract packet from rx queue 310adfc5217SJeff Kirsher */ 311adfc5217SJeff Kirsher static int bcm_enet_receive_queue(struct net_device *dev, int budget) 312adfc5217SJeff Kirsher { 313adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 314adfc5217SJeff Kirsher struct device *kdev; 315adfc5217SJeff Kirsher int processed; 316adfc5217SJeff Kirsher 317adfc5217SJeff Kirsher priv = netdev_priv(dev); 318adfc5217SJeff Kirsher kdev = &priv->pdev->dev; 319adfc5217SJeff Kirsher processed = 0; 320adfc5217SJeff Kirsher 321adfc5217SJeff Kirsher /* don't scan ring further than number of refilled 322adfc5217SJeff Kirsher * descriptor */ 323adfc5217SJeff Kirsher if (budget > priv->rx_desc_count) 324adfc5217SJeff Kirsher budget = priv->rx_desc_count; 325adfc5217SJeff Kirsher 326adfc5217SJeff Kirsher do { 327adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 328adfc5217SJeff Kirsher struct sk_buff *skb; 329adfc5217SJeff Kirsher int desc_idx; 330adfc5217SJeff Kirsher u32 len_stat; 331adfc5217SJeff Kirsher unsigned int len; 332adfc5217SJeff Kirsher 333adfc5217SJeff Kirsher desc_idx = priv->rx_curr_desc; 334adfc5217SJeff Kirsher desc = &priv->rx_desc_cpu[desc_idx]; 335adfc5217SJeff Kirsher 336adfc5217SJeff Kirsher /* make sure we actually read the descriptor status at 337adfc5217SJeff Kirsher * each loop */ 338adfc5217SJeff Kirsher rmb(); 339adfc5217SJeff Kirsher 340adfc5217SJeff Kirsher len_stat = desc->len_stat; 341adfc5217SJeff Kirsher 342adfc5217SJeff Kirsher /* break if dma ownership belongs to hw */ 343adfc5217SJeff Kirsher if (len_stat & DMADESC_OWNER_MASK) 344adfc5217SJeff Kirsher break; 345adfc5217SJeff Kirsher 346adfc5217SJeff Kirsher processed++; 347adfc5217SJeff Kirsher priv->rx_curr_desc++; 348adfc5217SJeff Kirsher if (priv->rx_curr_desc == priv->rx_ring_size) 349adfc5217SJeff Kirsher priv->rx_curr_desc = 0; 350adfc5217SJeff Kirsher priv->rx_desc_count--; 351adfc5217SJeff Kirsher 352adfc5217SJeff Kirsher /* if the packet does not have start of packet _and_ 353adfc5217SJeff Kirsher * end of packet flag set, then just recycle it */ 3543dc6475cSFlorian Fainelli if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) != 3553dc6475cSFlorian Fainelli (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) { 356adfc5217SJeff Kirsher dev->stats.rx_dropped++; 357adfc5217SJeff Kirsher continue; 358adfc5217SJeff Kirsher } 359adfc5217SJeff Kirsher 360adfc5217SJeff Kirsher /* recycle packet if it's marked as bad */ 3616f00a022SMaxime Bizon if (!priv->enet_is_sw && 3626f00a022SMaxime Bizon unlikely(len_stat & DMADESC_ERR_MASK)) { 363adfc5217SJeff Kirsher dev->stats.rx_errors++; 364adfc5217SJeff Kirsher 365adfc5217SJeff Kirsher if (len_stat & DMADESC_OVSIZE_MASK) 366adfc5217SJeff Kirsher dev->stats.rx_length_errors++; 367adfc5217SJeff Kirsher if (len_stat & DMADESC_CRC_MASK) 368adfc5217SJeff Kirsher dev->stats.rx_crc_errors++; 369adfc5217SJeff Kirsher if (len_stat & DMADESC_UNDER_MASK) 370adfc5217SJeff Kirsher dev->stats.rx_frame_errors++; 371adfc5217SJeff Kirsher if (len_stat & DMADESC_OV_MASK) 372adfc5217SJeff Kirsher dev->stats.rx_fifo_errors++; 373adfc5217SJeff Kirsher continue; 374adfc5217SJeff Kirsher } 375adfc5217SJeff Kirsher 376adfc5217SJeff Kirsher /* valid packet */ 377adfc5217SJeff Kirsher skb = priv->rx_skb[desc_idx]; 378adfc5217SJeff Kirsher len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT; 379adfc5217SJeff Kirsher /* don't include FCS */ 380adfc5217SJeff Kirsher len -= 4; 381adfc5217SJeff Kirsher 382adfc5217SJeff Kirsher if (len < copybreak) { 383adfc5217SJeff Kirsher struct sk_buff *nskb; 384adfc5217SJeff Kirsher 38545abfb10SAlexander Duyck nskb = napi_alloc_skb(&priv->napi, len); 386adfc5217SJeff Kirsher if (!nskb) { 387adfc5217SJeff Kirsher /* forget packet, just rearm desc */ 388adfc5217SJeff Kirsher dev->stats.rx_dropped++; 389adfc5217SJeff Kirsher continue; 390adfc5217SJeff Kirsher } 391adfc5217SJeff Kirsher 392adfc5217SJeff Kirsher dma_sync_single_for_cpu(kdev, desc->address, 393adfc5217SJeff Kirsher len, DMA_FROM_DEVICE); 394adfc5217SJeff Kirsher memcpy(nskb->data, skb->data, len); 395adfc5217SJeff Kirsher dma_sync_single_for_device(kdev, desc->address, 396adfc5217SJeff Kirsher len, DMA_FROM_DEVICE); 397adfc5217SJeff Kirsher skb = nskb; 398adfc5217SJeff Kirsher } else { 399adfc5217SJeff Kirsher dma_unmap_single(&priv->pdev->dev, desc->address, 400adfc5217SJeff Kirsher priv->rx_skb_size, DMA_FROM_DEVICE); 401adfc5217SJeff Kirsher priv->rx_skb[desc_idx] = NULL; 402adfc5217SJeff Kirsher } 403adfc5217SJeff Kirsher 404adfc5217SJeff Kirsher skb_put(skb, len); 405adfc5217SJeff Kirsher skb->protocol = eth_type_trans(skb, dev); 406adfc5217SJeff Kirsher dev->stats.rx_packets++; 407adfc5217SJeff Kirsher dev->stats.rx_bytes += len; 408adfc5217SJeff Kirsher netif_receive_skb(skb); 409adfc5217SJeff Kirsher 410adfc5217SJeff Kirsher } while (--budget > 0); 411adfc5217SJeff Kirsher 412adfc5217SJeff Kirsher if (processed || !priv->rx_desc_count) { 413adfc5217SJeff Kirsher bcm_enet_refill_rx(dev); 414adfc5217SJeff Kirsher 415adfc5217SJeff Kirsher /* kick rx dma */ 4163dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_en_mask, 4173dc6475cSFlorian Fainelli ENETDMAC_CHANCFG, priv->rx_chan); 418adfc5217SJeff Kirsher } 419adfc5217SJeff Kirsher 420adfc5217SJeff Kirsher return processed; 421adfc5217SJeff Kirsher } 422adfc5217SJeff Kirsher 423adfc5217SJeff Kirsher 424adfc5217SJeff Kirsher /* 425adfc5217SJeff Kirsher * try to or force reclaim of transmitted buffers 426adfc5217SJeff Kirsher */ 427adfc5217SJeff Kirsher static int bcm_enet_tx_reclaim(struct net_device *dev, int force) 428adfc5217SJeff Kirsher { 429adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 430adfc5217SJeff Kirsher int released; 431adfc5217SJeff Kirsher 432adfc5217SJeff Kirsher priv = netdev_priv(dev); 433adfc5217SJeff Kirsher released = 0; 434adfc5217SJeff Kirsher 435adfc5217SJeff Kirsher while (priv->tx_desc_count < priv->tx_ring_size) { 436adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 437adfc5217SJeff Kirsher struct sk_buff *skb; 438adfc5217SJeff Kirsher 439adfc5217SJeff Kirsher /* We run in a bh and fight against start_xmit, which 440adfc5217SJeff Kirsher * is called with bh disabled */ 441adfc5217SJeff Kirsher spin_lock(&priv->tx_lock); 442adfc5217SJeff Kirsher 443adfc5217SJeff Kirsher desc = &priv->tx_desc_cpu[priv->tx_dirty_desc]; 444adfc5217SJeff Kirsher 445adfc5217SJeff Kirsher if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) { 446adfc5217SJeff Kirsher spin_unlock(&priv->tx_lock); 447adfc5217SJeff Kirsher break; 448adfc5217SJeff Kirsher } 449adfc5217SJeff Kirsher 450adfc5217SJeff Kirsher /* ensure other field of the descriptor were not read 451adfc5217SJeff Kirsher * before we checked ownership */ 452adfc5217SJeff Kirsher rmb(); 453adfc5217SJeff Kirsher 454adfc5217SJeff Kirsher skb = priv->tx_skb[priv->tx_dirty_desc]; 455adfc5217SJeff Kirsher priv->tx_skb[priv->tx_dirty_desc] = NULL; 456adfc5217SJeff Kirsher dma_unmap_single(&priv->pdev->dev, desc->address, skb->len, 457adfc5217SJeff Kirsher DMA_TO_DEVICE); 458adfc5217SJeff Kirsher 459adfc5217SJeff Kirsher priv->tx_dirty_desc++; 460adfc5217SJeff Kirsher if (priv->tx_dirty_desc == priv->tx_ring_size) 461adfc5217SJeff Kirsher priv->tx_dirty_desc = 0; 462adfc5217SJeff Kirsher priv->tx_desc_count++; 463adfc5217SJeff Kirsher 464adfc5217SJeff Kirsher spin_unlock(&priv->tx_lock); 465adfc5217SJeff Kirsher 466adfc5217SJeff Kirsher if (desc->len_stat & DMADESC_UNDER_MASK) 467adfc5217SJeff Kirsher dev->stats.tx_errors++; 468adfc5217SJeff Kirsher 469adfc5217SJeff Kirsher dev_kfree_skb(skb); 470adfc5217SJeff Kirsher released++; 471adfc5217SJeff Kirsher } 472adfc5217SJeff Kirsher 473adfc5217SJeff Kirsher if (netif_queue_stopped(dev) && released) 474adfc5217SJeff Kirsher netif_wake_queue(dev); 475adfc5217SJeff Kirsher 476adfc5217SJeff Kirsher return released; 477adfc5217SJeff Kirsher } 478adfc5217SJeff Kirsher 479adfc5217SJeff Kirsher /* 480adfc5217SJeff Kirsher * poll func, called by network core 481adfc5217SJeff Kirsher */ 482adfc5217SJeff Kirsher static int bcm_enet_poll(struct napi_struct *napi, int budget) 483adfc5217SJeff Kirsher { 484adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 485adfc5217SJeff Kirsher struct net_device *dev; 486cd33ccf5SNicolas Schichan int rx_work_done; 487adfc5217SJeff Kirsher 488adfc5217SJeff Kirsher priv = container_of(napi, struct bcm_enet_priv, napi); 489adfc5217SJeff Kirsher dev = priv->net_dev; 490adfc5217SJeff Kirsher 491adfc5217SJeff Kirsher /* ack interrupts */ 4923dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 4933dc6475cSFlorian Fainelli ENETDMAC_IR, priv->rx_chan); 4943dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 4953dc6475cSFlorian Fainelli ENETDMAC_IR, priv->tx_chan); 496adfc5217SJeff Kirsher 497adfc5217SJeff Kirsher /* reclaim sent skb */ 498cd33ccf5SNicolas Schichan bcm_enet_tx_reclaim(dev, 0); 499adfc5217SJeff Kirsher 500adfc5217SJeff Kirsher spin_lock(&priv->rx_lock); 501adfc5217SJeff Kirsher rx_work_done = bcm_enet_receive_queue(dev, budget); 502adfc5217SJeff Kirsher spin_unlock(&priv->rx_lock); 503adfc5217SJeff Kirsher 504cd33ccf5SNicolas Schichan if (rx_work_done >= budget) { 505cd33ccf5SNicolas Schichan /* rx queue is not yet empty/clean */ 506adfc5217SJeff Kirsher return rx_work_done; 507adfc5217SJeff Kirsher } 508adfc5217SJeff Kirsher 509adfc5217SJeff Kirsher /* no more packet in rx/tx queue, remove device from poll 510adfc5217SJeff Kirsher * queue */ 5116ad20165SEric Dumazet napi_complete_done(napi, rx_work_done); 512adfc5217SJeff Kirsher 513adfc5217SJeff Kirsher /* restore rx/tx interrupt */ 5143dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 5153dc6475cSFlorian Fainelli ENETDMAC_IRMASK, priv->rx_chan); 5163dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 5173dc6475cSFlorian Fainelli ENETDMAC_IRMASK, priv->tx_chan); 518adfc5217SJeff Kirsher 519adfc5217SJeff Kirsher return rx_work_done; 520adfc5217SJeff Kirsher } 521adfc5217SJeff Kirsher 522adfc5217SJeff Kirsher /* 523adfc5217SJeff Kirsher * mac interrupt handler 524adfc5217SJeff Kirsher */ 525adfc5217SJeff Kirsher static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id) 526adfc5217SJeff Kirsher { 527adfc5217SJeff Kirsher struct net_device *dev; 528adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 529adfc5217SJeff Kirsher u32 stat; 530adfc5217SJeff Kirsher 531adfc5217SJeff Kirsher dev = dev_id; 532adfc5217SJeff Kirsher priv = netdev_priv(dev); 533adfc5217SJeff Kirsher 534adfc5217SJeff Kirsher stat = enet_readl(priv, ENET_IR_REG); 535adfc5217SJeff Kirsher if (!(stat & ENET_IR_MIB)) 536adfc5217SJeff Kirsher return IRQ_NONE; 537adfc5217SJeff Kirsher 538adfc5217SJeff Kirsher /* clear & mask interrupt */ 539adfc5217SJeff Kirsher enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); 540adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_IRMASK_REG); 541adfc5217SJeff Kirsher 542adfc5217SJeff Kirsher /* read mib registers in workqueue */ 543adfc5217SJeff Kirsher schedule_work(&priv->mib_update_task); 544adfc5217SJeff Kirsher 545adfc5217SJeff Kirsher return IRQ_HANDLED; 546adfc5217SJeff Kirsher } 547adfc5217SJeff Kirsher 548adfc5217SJeff Kirsher /* 549adfc5217SJeff Kirsher * rx/tx dma interrupt handler 550adfc5217SJeff Kirsher */ 551adfc5217SJeff Kirsher static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) 552adfc5217SJeff Kirsher { 553adfc5217SJeff Kirsher struct net_device *dev; 554adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 555adfc5217SJeff Kirsher 556adfc5217SJeff Kirsher dev = dev_id; 557adfc5217SJeff Kirsher priv = netdev_priv(dev); 558adfc5217SJeff Kirsher 559adfc5217SJeff Kirsher /* mask rx/tx interrupts */ 5603dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 5613dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 562adfc5217SJeff Kirsher 563adfc5217SJeff Kirsher napi_schedule(&priv->napi); 564adfc5217SJeff Kirsher 565adfc5217SJeff Kirsher return IRQ_HANDLED; 566adfc5217SJeff Kirsher } 567adfc5217SJeff Kirsher 568adfc5217SJeff Kirsher /* 569adfc5217SJeff Kirsher * tx request callback 570adfc5217SJeff Kirsher */ 571adfc5217SJeff Kirsher static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 572adfc5217SJeff Kirsher { 573adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 574adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 575adfc5217SJeff Kirsher u32 len_stat; 576adfc5217SJeff Kirsher int ret; 577adfc5217SJeff Kirsher 578adfc5217SJeff Kirsher priv = netdev_priv(dev); 579adfc5217SJeff Kirsher 580adfc5217SJeff Kirsher /* lock against tx reclaim */ 581adfc5217SJeff Kirsher spin_lock(&priv->tx_lock); 582adfc5217SJeff Kirsher 583adfc5217SJeff Kirsher /* make sure the tx hw queue is not full, should not happen 584adfc5217SJeff Kirsher * since we stop queue before it's the case */ 585adfc5217SJeff Kirsher if (unlikely(!priv->tx_desc_count)) { 586adfc5217SJeff Kirsher netif_stop_queue(dev); 587adfc5217SJeff Kirsher dev_err(&priv->pdev->dev, "xmit called with no tx desc " 588adfc5217SJeff Kirsher "available?\n"); 589adfc5217SJeff Kirsher ret = NETDEV_TX_BUSY; 590adfc5217SJeff Kirsher goto out_unlock; 591adfc5217SJeff Kirsher } 592adfc5217SJeff Kirsher 5936f00a022SMaxime Bizon /* pad small packets sent on a switch device */ 5946f00a022SMaxime Bizon if (priv->enet_is_sw && skb->len < 64) { 5956f00a022SMaxime Bizon int needed = 64 - skb->len; 5966f00a022SMaxime Bizon char *data; 5976f00a022SMaxime Bizon 5986f00a022SMaxime Bizon if (unlikely(skb_tailroom(skb) < needed)) { 5996f00a022SMaxime Bizon struct sk_buff *nskb; 6006f00a022SMaxime Bizon 6016f00a022SMaxime Bizon nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC); 6026f00a022SMaxime Bizon if (!nskb) { 6036f00a022SMaxime Bizon ret = NETDEV_TX_BUSY; 6046f00a022SMaxime Bizon goto out_unlock; 6056f00a022SMaxime Bizon } 6066f00a022SMaxime Bizon dev_kfree_skb(skb); 6076f00a022SMaxime Bizon skb = nskb; 6086f00a022SMaxime Bizon } 609aa9f979cSJohannes Berg data = skb_put_zero(skb, needed); 6106f00a022SMaxime Bizon } 6116f00a022SMaxime Bizon 612adfc5217SJeff Kirsher /* point to the next available desc */ 613adfc5217SJeff Kirsher desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; 614adfc5217SJeff Kirsher priv->tx_skb[priv->tx_curr_desc] = skb; 615adfc5217SJeff Kirsher 616adfc5217SJeff Kirsher /* fill descriptor */ 617adfc5217SJeff Kirsher desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len, 618adfc5217SJeff Kirsher DMA_TO_DEVICE); 619adfc5217SJeff Kirsher 620adfc5217SJeff Kirsher len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; 6213dc6475cSFlorian Fainelli len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) | 622adfc5217SJeff Kirsher DMADESC_APPEND_CRC | 623adfc5217SJeff Kirsher DMADESC_OWNER_MASK; 624adfc5217SJeff Kirsher 625adfc5217SJeff Kirsher priv->tx_curr_desc++; 626adfc5217SJeff Kirsher if (priv->tx_curr_desc == priv->tx_ring_size) { 627adfc5217SJeff Kirsher priv->tx_curr_desc = 0; 6283dc6475cSFlorian Fainelli len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); 629adfc5217SJeff Kirsher } 630adfc5217SJeff Kirsher priv->tx_desc_count--; 631adfc5217SJeff Kirsher 632adfc5217SJeff Kirsher /* dma might be already polling, make sure we update desc 633adfc5217SJeff Kirsher * fields in correct order */ 634adfc5217SJeff Kirsher wmb(); 635adfc5217SJeff Kirsher desc->len_stat = len_stat; 636adfc5217SJeff Kirsher wmb(); 637adfc5217SJeff Kirsher 638adfc5217SJeff Kirsher /* kick tx dma */ 6393dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_en_mask, 6403dc6475cSFlorian Fainelli ENETDMAC_CHANCFG, priv->tx_chan); 641adfc5217SJeff Kirsher 642adfc5217SJeff Kirsher /* stop queue if no more desc available */ 643adfc5217SJeff Kirsher if (!priv->tx_desc_count) 644adfc5217SJeff Kirsher netif_stop_queue(dev); 645adfc5217SJeff Kirsher 646adfc5217SJeff Kirsher dev->stats.tx_bytes += skb->len; 647adfc5217SJeff Kirsher dev->stats.tx_packets++; 648adfc5217SJeff Kirsher ret = NETDEV_TX_OK; 649adfc5217SJeff Kirsher 650adfc5217SJeff Kirsher out_unlock: 651adfc5217SJeff Kirsher spin_unlock(&priv->tx_lock); 652adfc5217SJeff Kirsher return ret; 653adfc5217SJeff Kirsher } 654adfc5217SJeff Kirsher 655adfc5217SJeff Kirsher /* 656adfc5217SJeff Kirsher * Change the interface's mac address. 657adfc5217SJeff Kirsher */ 658adfc5217SJeff Kirsher static int bcm_enet_set_mac_address(struct net_device *dev, void *p) 659adfc5217SJeff Kirsher { 660adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 661adfc5217SJeff Kirsher struct sockaddr *addr = p; 662adfc5217SJeff Kirsher u32 val; 663adfc5217SJeff Kirsher 664adfc5217SJeff Kirsher priv = netdev_priv(dev); 665adfc5217SJeff Kirsher memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 666adfc5217SJeff Kirsher 667adfc5217SJeff Kirsher /* use perfect match register 0 to store my mac address */ 668adfc5217SJeff Kirsher val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | 669adfc5217SJeff Kirsher (dev->dev_addr[4] << 8) | dev->dev_addr[5]; 670adfc5217SJeff Kirsher enet_writel(priv, val, ENET_PML_REG(0)); 671adfc5217SJeff Kirsher 672adfc5217SJeff Kirsher val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]); 673adfc5217SJeff Kirsher val |= ENET_PMH_DATAVALID_MASK; 674adfc5217SJeff Kirsher enet_writel(priv, val, ENET_PMH_REG(0)); 675adfc5217SJeff Kirsher 676adfc5217SJeff Kirsher return 0; 677adfc5217SJeff Kirsher } 678adfc5217SJeff Kirsher 679adfc5217SJeff Kirsher /* 680adfc5217SJeff Kirsher * Change rx mode (promiscuous/allmulti) and update multicast list 681adfc5217SJeff Kirsher */ 682adfc5217SJeff Kirsher static void bcm_enet_set_multicast_list(struct net_device *dev) 683adfc5217SJeff Kirsher { 684adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 685adfc5217SJeff Kirsher struct netdev_hw_addr *ha; 686adfc5217SJeff Kirsher u32 val; 687adfc5217SJeff Kirsher int i; 688adfc5217SJeff Kirsher 689adfc5217SJeff Kirsher priv = netdev_priv(dev); 690adfc5217SJeff Kirsher 691adfc5217SJeff Kirsher val = enet_readl(priv, ENET_RXCFG_REG); 692adfc5217SJeff Kirsher 693adfc5217SJeff Kirsher if (dev->flags & IFF_PROMISC) 694adfc5217SJeff Kirsher val |= ENET_RXCFG_PROMISC_MASK; 695adfc5217SJeff Kirsher else 696adfc5217SJeff Kirsher val &= ~ENET_RXCFG_PROMISC_MASK; 697adfc5217SJeff Kirsher 698adfc5217SJeff Kirsher /* only 3 perfect match registers left, first one is used for 699adfc5217SJeff Kirsher * own mac address */ 700adfc5217SJeff Kirsher if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3) 701adfc5217SJeff Kirsher val |= ENET_RXCFG_ALLMCAST_MASK; 702adfc5217SJeff Kirsher else 703adfc5217SJeff Kirsher val &= ~ENET_RXCFG_ALLMCAST_MASK; 704adfc5217SJeff Kirsher 705adfc5217SJeff Kirsher /* no need to set perfect match registers if we catch all 706adfc5217SJeff Kirsher * multicast */ 707adfc5217SJeff Kirsher if (val & ENET_RXCFG_ALLMCAST_MASK) { 708adfc5217SJeff Kirsher enet_writel(priv, val, ENET_RXCFG_REG); 709adfc5217SJeff Kirsher return; 710adfc5217SJeff Kirsher } 711adfc5217SJeff Kirsher 712adfc5217SJeff Kirsher i = 0; 713adfc5217SJeff Kirsher netdev_for_each_mc_addr(ha, dev) { 714adfc5217SJeff Kirsher u8 *dmi_addr; 715adfc5217SJeff Kirsher u32 tmp; 716adfc5217SJeff Kirsher 717adfc5217SJeff Kirsher if (i == 3) 718adfc5217SJeff Kirsher break; 719adfc5217SJeff Kirsher /* update perfect match registers */ 720adfc5217SJeff Kirsher dmi_addr = ha->addr; 721adfc5217SJeff Kirsher tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | 722adfc5217SJeff Kirsher (dmi_addr[4] << 8) | dmi_addr[5]; 723adfc5217SJeff Kirsher enet_writel(priv, tmp, ENET_PML_REG(i + 1)); 724adfc5217SJeff Kirsher 725adfc5217SJeff Kirsher tmp = (dmi_addr[0] << 8 | dmi_addr[1]); 726adfc5217SJeff Kirsher tmp |= ENET_PMH_DATAVALID_MASK; 727adfc5217SJeff Kirsher enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1)); 728adfc5217SJeff Kirsher } 729adfc5217SJeff Kirsher 730adfc5217SJeff Kirsher for (; i < 3; i++) { 731adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_PML_REG(i + 1)); 732adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_PMH_REG(i + 1)); 733adfc5217SJeff Kirsher } 734adfc5217SJeff Kirsher 735adfc5217SJeff Kirsher enet_writel(priv, val, ENET_RXCFG_REG); 736adfc5217SJeff Kirsher } 737adfc5217SJeff Kirsher 738adfc5217SJeff Kirsher /* 739adfc5217SJeff Kirsher * set mac duplex parameters 740adfc5217SJeff Kirsher */ 741adfc5217SJeff Kirsher static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex) 742adfc5217SJeff Kirsher { 743adfc5217SJeff Kirsher u32 val; 744adfc5217SJeff Kirsher 745adfc5217SJeff Kirsher val = enet_readl(priv, ENET_TXCTL_REG); 746adfc5217SJeff Kirsher if (fullduplex) 747adfc5217SJeff Kirsher val |= ENET_TXCTL_FD_MASK; 748adfc5217SJeff Kirsher else 749adfc5217SJeff Kirsher val &= ~ENET_TXCTL_FD_MASK; 750adfc5217SJeff Kirsher enet_writel(priv, val, ENET_TXCTL_REG); 751adfc5217SJeff Kirsher } 752adfc5217SJeff Kirsher 753adfc5217SJeff Kirsher /* 754adfc5217SJeff Kirsher * set mac flow control parameters 755adfc5217SJeff Kirsher */ 756adfc5217SJeff Kirsher static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en) 757adfc5217SJeff Kirsher { 758adfc5217SJeff Kirsher u32 val; 759adfc5217SJeff Kirsher 760adfc5217SJeff Kirsher /* rx flow control (pause frame handling) */ 761adfc5217SJeff Kirsher val = enet_readl(priv, ENET_RXCFG_REG); 762adfc5217SJeff Kirsher if (rx_en) 763adfc5217SJeff Kirsher val |= ENET_RXCFG_ENFLOW_MASK; 764adfc5217SJeff Kirsher else 765adfc5217SJeff Kirsher val &= ~ENET_RXCFG_ENFLOW_MASK; 766adfc5217SJeff Kirsher enet_writel(priv, val, ENET_RXCFG_REG); 767adfc5217SJeff Kirsher 7683dc6475cSFlorian Fainelli if (!priv->dma_has_sram) 7693dc6475cSFlorian Fainelli return; 7703dc6475cSFlorian Fainelli 771adfc5217SJeff Kirsher /* tx flow control (pause frame generation) */ 772adfc5217SJeff Kirsher val = enet_dma_readl(priv, ENETDMA_CFG_REG); 773adfc5217SJeff Kirsher if (tx_en) 774adfc5217SJeff Kirsher val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); 775adfc5217SJeff Kirsher else 776adfc5217SJeff Kirsher val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); 777adfc5217SJeff Kirsher enet_dma_writel(priv, val, ENETDMA_CFG_REG); 778adfc5217SJeff Kirsher } 779adfc5217SJeff Kirsher 780adfc5217SJeff Kirsher /* 781adfc5217SJeff Kirsher * link changed callback (from phylib) 782adfc5217SJeff Kirsher */ 783adfc5217SJeff Kirsher static void bcm_enet_adjust_phy_link(struct net_device *dev) 784adfc5217SJeff Kirsher { 785adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 786adfc5217SJeff Kirsher struct phy_device *phydev; 787adfc5217SJeff Kirsher int status_changed; 788adfc5217SJeff Kirsher 789adfc5217SJeff Kirsher priv = netdev_priv(dev); 790625eb866SPhilippe Reynes phydev = dev->phydev; 791adfc5217SJeff Kirsher status_changed = 0; 792adfc5217SJeff Kirsher 793adfc5217SJeff Kirsher if (priv->old_link != phydev->link) { 794adfc5217SJeff Kirsher status_changed = 1; 795adfc5217SJeff Kirsher priv->old_link = phydev->link; 796adfc5217SJeff Kirsher } 797adfc5217SJeff Kirsher 798adfc5217SJeff Kirsher /* reflect duplex change in mac configuration */ 799adfc5217SJeff Kirsher if (phydev->link && phydev->duplex != priv->old_duplex) { 800adfc5217SJeff Kirsher bcm_enet_set_duplex(priv, 801adfc5217SJeff Kirsher (phydev->duplex == DUPLEX_FULL) ? 1 : 0); 802adfc5217SJeff Kirsher status_changed = 1; 803adfc5217SJeff Kirsher priv->old_duplex = phydev->duplex; 804adfc5217SJeff Kirsher } 805adfc5217SJeff Kirsher 806adfc5217SJeff Kirsher /* enable flow control if remote advertise it (trust phylib to 807adfc5217SJeff Kirsher * check that duplex is full */ 808adfc5217SJeff Kirsher if (phydev->link && phydev->pause != priv->old_pause) { 809adfc5217SJeff Kirsher int rx_pause_en, tx_pause_en; 810adfc5217SJeff Kirsher 811adfc5217SJeff Kirsher if (phydev->pause) { 812adfc5217SJeff Kirsher /* pause was advertised by lpa and us */ 813adfc5217SJeff Kirsher rx_pause_en = 1; 814adfc5217SJeff Kirsher tx_pause_en = 1; 815adfc5217SJeff Kirsher } else if (!priv->pause_auto) { 81603671057SMasahiro Yamada /* pause setting overridden by user */ 817adfc5217SJeff Kirsher rx_pause_en = priv->pause_rx; 818adfc5217SJeff Kirsher tx_pause_en = priv->pause_tx; 819adfc5217SJeff Kirsher } else { 820adfc5217SJeff Kirsher rx_pause_en = 0; 821adfc5217SJeff Kirsher tx_pause_en = 0; 822adfc5217SJeff Kirsher } 823adfc5217SJeff Kirsher 824adfc5217SJeff Kirsher bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en); 825adfc5217SJeff Kirsher status_changed = 1; 826adfc5217SJeff Kirsher priv->old_pause = phydev->pause; 827adfc5217SJeff Kirsher } 828adfc5217SJeff Kirsher 829adfc5217SJeff Kirsher if (status_changed) { 830adfc5217SJeff Kirsher pr_info("%s: link %s", dev->name, phydev->link ? 831adfc5217SJeff Kirsher "UP" : "DOWN"); 832adfc5217SJeff Kirsher if (phydev->link) 833adfc5217SJeff Kirsher pr_cont(" - %d/%s - flow control %s", phydev->speed, 834adfc5217SJeff Kirsher DUPLEX_FULL == phydev->duplex ? "full" : "half", 835adfc5217SJeff Kirsher phydev->pause == 1 ? "rx&tx" : "off"); 836adfc5217SJeff Kirsher 837adfc5217SJeff Kirsher pr_cont("\n"); 838adfc5217SJeff Kirsher } 839adfc5217SJeff Kirsher } 840adfc5217SJeff Kirsher 841adfc5217SJeff Kirsher /* 842adfc5217SJeff Kirsher * link changed callback (if phylib is not used) 843adfc5217SJeff Kirsher */ 844adfc5217SJeff Kirsher static void bcm_enet_adjust_link(struct net_device *dev) 845adfc5217SJeff Kirsher { 846adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 847adfc5217SJeff Kirsher 848adfc5217SJeff Kirsher priv = netdev_priv(dev); 849adfc5217SJeff Kirsher bcm_enet_set_duplex(priv, priv->force_duplex_full); 850adfc5217SJeff Kirsher bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx); 851adfc5217SJeff Kirsher netif_carrier_on(dev); 852adfc5217SJeff Kirsher 853adfc5217SJeff Kirsher pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n", 854adfc5217SJeff Kirsher dev->name, 855adfc5217SJeff Kirsher priv->force_speed_100 ? 100 : 10, 856adfc5217SJeff Kirsher priv->force_duplex_full ? "full" : "half", 857adfc5217SJeff Kirsher priv->pause_rx ? "rx" : "off", 858adfc5217SJeff Kirsher priv->pause_tx ? "tx" : "off"); 859adfc5217SJeff Kirsher } 860adfc5217SJeff Kirsher 861adfc5217SJeff Kirsher /* 862adfc5217SJeff Kirsher * open callback, allocate dma rings & buffers and start rx operation 863adfc5217SJeff Kirsher */ 864adfc5217SJeff Kirsher static int bcm_enet_open(struct net_device *dev) 865adfc5217SJeff Kirsher { 866adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 867adfc5217SJeff Kirsher struct sockaddr addr; 868adfc5217SJeff Kirsher struct device *kdev; 869adfc5217SJeff Kirsher struct phy_device *phydev; 870adfc5217SJeff Kirsher int i, ret; 871adfc5217SJeff Kirsher unsigned int size; 872adfc5217SJeff Kirsher char phy_id[MII_BUS_ID_SIZE + 3]; 873adfc5217SJeff Kirsher void *p; 874adfc5217SJeff Kirsher u32 val; 875adfc5217SJeff Kirsher 876adfc5217SJeff Kirsher priv = netdev_priv(dev); 877adfc5217SJeff Kirsher kdev = &priv->pdev->dev; 878adfc5217SJeff Kirsher 879adfc5217SJeff Kirsher if (priv->has_phy) { 880adfc5217SJeff Kirsher /* connect to PHY */ 881adfc5217SJeff Kirsher snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 882c56e9e2aSFlorian Fainelli priv->mii_bus->id, priv->phy_id); 883adfc5217SJeff Kirsher 884f9a8f83bSFlorian Fainelli phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 885adfc5217SJeff Kirsher PHY_INTERFACE_MODE_MII); 886adfc5217SJeff Kirsher 887adfc5217SJeff Kirsher if (IS_ERR(phydev)) { 888adfc5217SJeff Kirsher dev_err(kdev, "could not attach to PHY\n"); 889adfc5217SJeff Kirsher return PTR_ERR(phydev); 890adfc5217SJeff Kirsher } 891adfc5217SJeff Kirsher 892adfc5217SJeff Kirsher /* mask with MAC supported features */ 893c306ad36SAndrew Lunn phy_support_sym_pause(phydev); 89458056c1eSAndrew Lunn phy_set_max_speed(phydev, SPEED_100); 8950c122405SAndrew Lunn phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx, 8960c122405SAndrew Lunn priv->pause_auto); 897adfc5217SJeff Kirsher 8982220943aSAndrew Lunn phy_attached_info(phydev); 899adfc5217SJeff Kirsher 900adfc5217SJeff Kirsher priv->old_link = 0; 901adfc5217SJeff Kirsher priv->old_duplex = -1; 902adfc5217SJeff Kirsher priv->old_pause = -1; 903df384d43SArnd Bergmann } else { 904df384d43SArnd Bergmann phydev = NULL; 905adfc5217SJeff Kirsher } 906adfc5217SJeff Kirsher 907adfc5217SJeff Kirsher /* mask all interrupts and request them */ 908adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_IRMASK_REG); 9093dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 9103dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 911adfc5217SJeff Kirsher 912adfc5217SJeff Kirsher ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); 913adfc5217SJeff Kirsher if (ret) 914adfc5217SJeff Kirsher goto out_phy_disconnect; 915adfc5217SJeff Kirsher 916df9f1b9fSMichael Opdenacker ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0, 917adfc5217SJeff Kirsher dev->name, dev); 918adfc5217SJeff Kirsher if (ret) 919adfc5217SJeff Kirsher goto out_freeirq; 920adfc5217SJeff Kirsher 921adfc5217SJeff Kirsher ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, 922df9f1b9fSMichael Opdenacker 0, dev->name, dev); 923adfc5217SJeff Kirsher if (ret) 924adfc5217SJeff Kirsher goto out_freeirq_rx; 925adfc5217SJeff Kirsher 926adfc5217SJeff Kirsher /* initialize perfect match registers */ 927adfc5217SJeff Kirsher for (i = 0; i < 4; i++) { 928adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_PML_REG(i)); 929adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_PMH_REG(i)); 930adfc5217SJeff Kirsher } 931adfc5217SJeff Kirsher 932adfc5217SJeff Kirsher /* write device mac address */ 933adfc5217SJeff Kirsher memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN); 934adfc5217SJeff Kirsher bcm_enet_set_mac_address(dev, &addr); 935adfc5217SJeff Kirsher 936adfc5217SJeff Kirsher /* allocate rx dma ring */ 937adfc5217SJeff Kirsher size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 938ede23fa8SJoe Perches p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 939adfc5217SJeff Kirsher if (!p) { 940adfc5217SJeff Kirsher ret = -ENOMEM; 941adfc5217SJeff Kirsher goto out_freeirq_tx; 942adfc5217SJeff Kirsher } 943adfc5217SJeff Kirsher 944adfc5217SJeff Kirsher priv->rx_desc_alloc_size = size; 945adfc5217SJeff Kirsher priv->rx_desc_cpu = p; 946adfc5217SJeff Kirsher 947adfc5217SJeff Kirsher /* allocate tx dma ring */ 948adfc5217SJeff Kirsher size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 949ede23fa8SJoe Perches p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 950adfc5217SJeff Kirsher if (!p) { 951adfc5217SJeff Kirsher ret = -ENOMEM; 952adfc5217SJeff Kirsher goto out_free_rx_ring; 953adfc5217SJeff Kirsher } 954adfc5217SJeff Kirsher 955adfc5217SJeff Kirsher priv->tx_desc_alloc_size = size; 956adfc5217SJeff Kirsher priv->tx_desc_cpu = p; 957adfc5217SJeff Kirsher 958b2adaca9SJoe Perches priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *), 959adfc5217SJeff Kirsher GFP_KERNEL); 960adfc5217SJeff Kirsher if (!priv->tx_skb) { 961adfc5217SJeff Kirsher ret = -ENOMEM; 962adfc5217SJeff Kirsher goto out_free_tx_ring; 963adfc5217SJeff Kirsher } 964adfc5217SJeff Kirsher 965adfc5217SJeff Kirsher priv->tx_desc_count = priv->tx_ring_size; 966adfc5217SJeff Kirsher priv->tx_dirty_desc = 0; 967adfc5217SJeff Kirsher priv->tx_curr_desc = 0; 968adfc5217SJeff Kirsher spin_lock_init(&priv->tx_lock); 969adfc5217SJeff Kirsher 970adfc5217SJeff Kirsher /* init & fill rx ring with skbs */ 971b2adaca9SJoe Perches priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *), 972adfc5217SJeff Kirsher GFP_KERNEL); 973adfc5217SJeff Kirsher if (!priv->rx_skb) { 974adfc5217SJeff Kirsher ret = -ENOMEM; 975adfc5217SJeff Kirsher goto out_free_tx_skb; 976adfc5217SJeff Kirsher } 977adfc5217SJeff Kirsher 978adfc5217SJeff Kirsher priv->rx_desc_count = 0; 979adfc5217SJeff Kirsher priv->rx_dirty_desc = 0; 980adfc5217SJeff Kirsher priv->rx_curr_desc = 0; 981adfc5217SJeff Kirsher 982adfc5217SJeff Kirsher /* initialize flow control buffer allocation */ 9833dc6475cSFlorian Fainelli if (priv->dma_has_sram) 984adfc5217SJeff Kirsher enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, 985adfc5217SJeff Kirsher ENETDMA_BUFALLOC_REG(priv->rx_chan)); 9863dc6475cSFlorian Fainelli else 9873dc6475cSFlorian Fainelli enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, 9883dc6475cSFlorian Fainelli ENETDMAC_BUFALLOC, priv->rx_chan); 989adfc5217SJeff Kirsher 990adfc5217SJeff Kirsher if (bcm_enet_refill_rx(dev)) { 991adfc5217SJeff Kirsher dev_err(kdev, "cannot allocate rx skb queue\n"); 992adfc5217SJeff Kirsher ret = -ENOMEM; 993adfc5217SJeff Kirsher goto out; 994adfc5217SJeff Kirsher } 995adfc5217SJeff Kirsher 996adfc5217SJeff Kirsher /* write rx & tx ring addresses */ 9973dc6475cSFlorian Fainelli if (priv->dma_has_sram) { 9980ae99b5fSMaxime Bizon enet_dmas_writel(priv, priv->rx_desc_dma, 9993dc6475cSFlorian Fainelli ENETDMAS_RSTART_REG, priv->rx_chan); 10000ae99b5fSMaxime Bizon enet_dmas_writel(priv, priv->tx_desc_dma, 10013dc6475cSFlorian Fainelli ENETDMAS_RSTART_REG, priv->tx_chan); 10023dc6475cSFlorian Fainelli } else { 10033dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->rx_desc_dma, 10043dc6475cSFlorian Fainelli ENETDMAC_RSTART, priv->rx_chan); 10053dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->tx_desc_dma, 10063dc6475cSFlorian Fainelli ENETDMAC_RSTART, priv->tx_chan); 10073dc6475cSFlorian Fainelli } 1008adfc5217SJeff Kirsher 1009adfc5217SJeff Kirsher /* clear remaining state ram for rx & tx channel */ 10103dc6475cSFlorian Fainelli if (priv->dma_has_sram) { 10113dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); 10123dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); 10133dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); 10143dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); 10153dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); 10163dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); 10173dc6475cSFlorian Fainelli } else { 10183dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan); 10193dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan); 10203dc6475cSFlorian Fainelli } 1021adfc5217SJeff Kirsher 1022adfc5217SJeff Kirsher /* set max rx/tx length */ 1023adfc5217SJeff Kirsher enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); 1024adfc5217SJeff Kirsher enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); 1025adfc5217SJeff Kirsher 1026adfc5217SJeff Kirsher /* set dma maximum burst len */ 10276f00a022SMaxime Bizon enet_dmac_writel(priv, priv->dma_maxburst, 10283dc6475cSFlorian Fainelli ENETDMAC_MAXBURST, priv->rx_chan); 10296f00a022SMaxime Bizon enet_dmac_writel(priv, priv->dma_maxburst, 10303dc6475cSFlorian Fainelli ENETDMAC_MAXBURST, priv->tx_chan); 1031adfc5217SJeff Kirsher 1032adfc5217SJeff Kirsher /* set correct transmit fifo watermark */ 1033adfc5217SJeff Kirsher enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); 1034adfc5217SJeff Kirsher 1035adfc5217SJeff Kirsher /* set flow control low/high threshold to 1/3 / 2/3 */ 10363dc6475cSFlorian Fainelli if (priv->dma_has_sram) { 1037adfc5217SJeff Kirsher val = priv->rx_ring_size / 3; 1038adfc5217SJeff Kirsher enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); 1039adfc5217SJeff Kirsher val = (priv->rx_ring_size * 2) / 3; 1040adfc5217SJeff Kirsher enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); 10413dc6475cSFlorian Fainelli } else { 10423dc6475cSFlorian Fainelli enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan); 10433dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan); 10443dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan); 10453dc6475cSFlorian Fainelli } 1046adfc5217SJeff Kirsher 1047adfc5217SJeff Kirsher /* all set, enable mac and interrupts, start dma engine and 1048adfc5217SJeff Kirsher * kick rx dma channel */ 1049adfc5217SJeff Kirsher wmb(); 1050adfc5217SJeff Kirsher val = enet_readl(priv, ENET_CTL_REG); 1051adfc5217SJeff Kirsher val |= ENET_CTL_ENABLE_MASK; 1052adfc5217SJeff Kirsher enet_writel(priv, val, ENET_CTL_REG); 1053d6213c1fSJonas Gorski if (priv->dma_has_sram) 1054adfc5217SJeff Kirsher enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); 10553dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_en_mask, 10563dc6475cSFlorian Fainelli ENETDMAC_CHANCFG, priv->rx_chan); 1057adfc5217SJeff Kirsher 1058adfc5217SJeff Kirsher /* watch "mib counters about to overflow" interrupt */ 1059adfc5217SJeff Kirsher enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); 1060adfc5217SJeff Kirsher enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); 1061adfc5217SJeff Kirsher 1062adfc5217SJeff Kirsher /* watch "packet transferred" interrupt in rx and tx */ 10633dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 10643dc6475cSFlorian Fainelli ENETDMAC_IR, priv->rx_chan); 10653dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 10663dc6475cSFlorian Fainelli ENETDMAC_IR, priv->tx_chan); 1067adfc5217SJeff Kirsher 1068adfc5217SJeff Kirsher /* make sure we enable napi before rx interrupt */ 1069adfc5217SJeff Kirsher napi_enable(&priv->napi); 1070adfc5217SJeff Kirsher 10713dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 10723dc6475cSFlorian Fainelli ENETDMAC_IRMASK, priv->rx_chan); 10733dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 10743dc6475cSFlorian Fainelli ENETDMAC_IRMASK, priv->tx_chan); 1075adfc5217SJeff Kirsher 1076df384d43SArnd Bergmann if (phydev) 1077625eb866SPhilippe Reynes phy_start(phydev); 1078adfc5217SJeff Kirsher else 1079adfc5217SJeff Kirsher bcm_enet_adjust_link(dev); 1080adfc5217SJeff Kirsher 1081adfc5217SJeff Kirsher netif_start_queue(dev); 1082adfc5217SJeff Kirsher return 0; 1083adfc5217SJeff Kirsher 1084adfc5217SJeff Kirsher out: 1085adfc5217SJeff Kirsher for (i = 0; i < priv->rx_ring_size; i++) { 1086adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 1087adfc5217SJeff Kirsher 1088adfc5217SJeff Kirsher if (!priv->rx_skb[i]) 1089adfc5217SJeff Kirsher continue; 1090adfc5217SJeff Kirsher 1091adfc5217SJeff Kirsher desc = &priv->rx_desc_cpu[i]; 1092adfc5217SJeff Kirsher dma_unmap_single(kdev, desc->address, priv->rx_skb_size, 1093adfc5217SJeff Kirsher DMA_FROM_DEVICE); 1094adfc5217SJeff Kirsher kfree_skb(priv->rx_skb[i]); 1095adfc5217SJeff Kirsher } 1096adfc5217SJeff Kirsher kfree(priv->rx_skb); 1097adfc5217SJeff Kirsher 1098adfc5217SJeff Kirsher out_free_tx_skb: 1099adfc5217SJeff Kirsher kfree(priv->tx_skb); 1100adfc5217SJeff Kirsher 1101adfc5217SJeff Kirsher out_free_tx_ring: 1102adfc5217SJeff Kirsher dma_free_coherent(kdev, priv->tx_desc_alloc_size, 1103adfc5217SJeff Kirsher priv->tx_desc_cpu, priv->tx_desc_dma); 1104adfc5217SJeff Kirsher 1105adfc5217SJeff Kirsher out_free_rx_ring: 1106adfc5217SJeff Kirsher dma_free_coherent(kdev, priv->rx_desc_alloc_size, 1107adfc5217SJeff Kirsher priv->rx_desc_cpu, priv->rx_desc_dma); 1108adfc5217SJeff Kirsher 1109adfc5217SJeff Kirsher out_freeirq_tx: 1110adfc5217SJeff Kirsher free_irq(priv->irq_tx, dev); 1111adfc5217SJeff Kirsher 1112adfc5217SJeff Kirsher out_freeirq_rx: 1113adfc5217SJeff Kirsher free_irq(priv->irq_rx, dev); 1114adfc5217SJeff Kirsher 1115adfc5217SJeff Kirsher out_freeirq: 1116adfc5217SJeff Kirsher free_irq(dev->irq, dev); 1117adfc5217SJeff Kirsher 1118adfc5217SJeff Kirsher out_phy_disconnect: 1119df384d43SArnd Bergmann if (phydev) 1120625eb866SPhilippe Reynes phy_disconnect(phydev); 1121adfc5217SJeff Kirsher 1122adfc5217SJeff Kirsher return ret; 1123adfc5217SJeff Kirsher } 1124adfc5217SJeff Kirsher 1125adfc5217SJeff Kirsher /* 1126adfc5217SJeff Kirsher * disable mac 1127adfc5217SJeff Kirsher */ 1128adfc5217SJeff Kirsher static void bcm_enet_disable_mac(struct bcm_enet_priv *priv) 1129adfc5217SJeff Kirsher { 1130adfc5217SJeff Kirsher int limit; 1131adfc5217SJeff Kirsher u32 val; 1132adfc5217SJeff Kirsher 1133adfc5217SJeff Kirsher val = enet_readl(priv, ENET_CTL_REG); 1134adfc5217SJeff Kirsher val |= ENET_CTL_DISABLE_MASK; 1135adfc5217SJeff Kirsher enet_writel(priv, val, ENET_CTL_REG); 1136adfc5217SJeff Kirsher 1137adfc5217SJeff Kirsher limit = 1000; 1138adfc5217SJeff Kirsher do { 1139adfc5217SJeff Kirsher u32 val; 1140adfc5217SJeff Kirsher 1141adfc5217SJeff Kirsher val = enet_readl(priv, ENET_CTL_REG); 1142adfc5217SJeff Kirsher if (!(val & ENET_CTL_DISABLE_MASK)) 1143adfc5217SJeff Kirsher break; 1144adfc5217SJeff Kirsher udelay(1); 1145adfc5217SJeff Kirsher } while (limit--); 1146adfc5217SJeff Kirsher } 1147adfc5217SJeff Kirsher 1148adfc5217SJeff Kirsher /* 1149adfc5217SJeff Kirsher * disable dma in given channel 1150adfc5217SJeff Kirsher */ 1151adfc5217SJeff Kirsher static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan) 1152adfc5217SJeff Kirsher { 1153adfc5217SJeff Kirsher int limit; 1154adfc5217SJeff Kirsher 11553dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan); 1156adfc5217SJeff Kirsher 1157adfc5217SJeff Kirsher limit = 1000; 1158adfc5217SJeff Kirsher do { 1159adfc5217SJeff Kirsher u32 val; 1160adfc5217SJeff Kirsher 11613dc6475cSFlorian Fainelli val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan); 11620ae99b5fSMaxime Bizon if (!(val & ENETDMAC_CHANCFG_EN_MASK)) 1163adfc5217SJeff Kirsher break; 1164adfc5217SJeff Kirsher udelay(1); 1165adfc5217SJeff Kirsher } while (limit--); 1166adfc5217SJeff Kirsher } 1167adfc5217SJeff Kirsher 1168adfc5217SJeff Kirsher /* 1169adfc5217SJeff Kirsher * stop callback 1170adfc5217SJeff Kirsher */ 1171adfc5217SJeff Kirsher static int bcm_enet_stop(struct net_device *dev) 1172adfc5217SJeff Kirsher { 1173adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1174adfc5217SJeff Kirsher struct device *kdev; 1175adfc5217SJeff Kirsher int i; 1176adfc5217SJeff Kirsher 1177adfc5217SJeff Kirsher priv = netdev_priv(dev); 1178adfc5217SJeff Kirsher kdev = &priv->pdev->dev; 1179adfc5217SJeff Kirsher 1180adfc5217SJeff Kirsher netif_stop_queue(dev); 1181adfc5217SJeff Kirsher napi_disable(&priv->napi); 1182adfc5217SJeff Kirsher if (priv->has_phy) 1183625eb866SPhilippe Reynes phy_stop(dev->phydev); 1184adfc5217SJeff Kirsher del_timer_sync(&priv->rx_timeout); 1185adfc5217SJeff Kirsher 1186adfc5217SJeff Kirsher /* mask all interrupts */ 1187adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_IRMASK_REG); 11883dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 11893dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 1190adfc5217SJeff Kirsher 1191adfc5217SJeff Kirsher /* make sure no mib update is scheduled */ 1192adfc5217SJeff Kirsher cancel_work_sync(&priv->mib_update_task); 1193adfc5217SJeff Kirsher 1194adfc5217SJeff Kirsher /* disable dma & mac */ 1195adfc5217SJeff Kirsher bcm_enet_disable_dma(priv, priv->tx_chan); 1196adfc5217SJeff Kirsher bcm_enet_disable_dma(priv, priv->rx_chan); 1197adfc5217SJeff Kirsher bcm_enet_disable_mac(priv); 1198adfc5217SJeff Kirsher 1199adfc5217SJeff Kirsher /* force reclaim of all tx buffers */ 1200adfc5217SJeff Kirsher bcm_enet_tx_reclaim(dev, 1); 1201adfc5217SJeff Kirsher 1202adfc5217SJeff Kirsher /* free the rx skb ring */ 1203adfc5217SJeff Kirsher for (i = 0; i < priv->rx_ring_size; i++) { 1204adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 1205adfc5217SJeff Kirsher 1206adfc5217SJeff Kirsher if (!priv->rx_skb[i]) 1207adfc5217SJeff Kirsher continue; 1208adfc5217SJeff Kirsher 1209adfc5217SJeff Kirsher desc = &priv->rx_desc_cpu[i]; 1210adfc5217SJeff Kirsher dma_unmap_single(kdev, desc->address, priv->rx_skb_size, 1211adfc5217SJeff Kirsher DMA_FROM_DEVICE); 1212adfc5217SJeff Kirsher kfree_skb(priv->rx_skb[i]); 1213adfc5217SJeff Kirsher } 1214adfc5217SJeff Kirsher 1215adfc5217SJeff Kirsher /* free remaining allocated memory */ 1216adfc5217SJeff Kirsher kfree(priv->rx_skb); 1217adfc5217SJeff Kirsher kfree(priv->tx_skb); 1218adfc5217SJeff Kirsher dma_free_coherent(kdev, priv->rx_desc_alloc_size, 1219adfc5217SJeff Kirsher priv->rx_desc_cpu, priv->rx_desc_dma); 1220adfc5217SJeff Kirsher dma_free_coherent(kdev, priv->tx_desc_alloc_size, 1221adfc5217SJeff Kirsher priv->tx_desc_cpu, priv->tx_desc_dma); 1222adfc5217SJeff Kirsher free_irq(priv->irq_tx, dev); 1223adfc5217SJeff Kirsher free_irq(priv->irq_rx, dev); 1224adfc5217SJeff Kirsher free_irq(dev->irq, dev); 1225adfc5217SJeff Kirsher 1226adfc5217SJeff Kirsher /* release phy */ 1227625eb866SPhilippe Reynes if (priv->has_phy) 1228625eb866SPhilippe Reynes phy_disconnect(dev->phydev); 1229adfc5217SJeff Kirsher 1230adfc5217SJeff Kirsher return 0; 1231adfc5217SJeff Kirsher } 1232adfc5217SJeff Kirsher 1233adfc5217SJeff Kirsher /* 1234adfc5217SJeff Kirsher * ethtool callbacks 1235adfc5217SJeff Kirsher */ 1236adfc5217SJeff Kirsher struct bcm_enet_stats { 1237adfc5217SJeff Kirsher char stat_string[ETH_GSTRING_LEN]; 1238adfc5217SJeff Kirsher int sizeof_stat; 1239adfc5217SJeff Kirsher int stat_offset; 1240adfc5217SJeff Kirsher int mib_reg; 1241adfc5217SJeff Kirsher }; 1242adfc5217SJeff Kirsher 1243adfc5217SJeff Kirsher #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \ 1244adfc5217SJeff Kirsher offsetof(struct bcm_enet_priv, m) 1245adfc5217SJeff Kirsher #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \ 1246adfc5217SJeff Kirsher offsetof(struct net_device_stats, m) 1247adfc5217SJeff Kirsher 1248adfc5217SJeff Kirsher static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = { 1249adfc5217SJeff Kirsher { "rx_packets", DEV_STAT(rx_packets), -1 }, 1250adfc5217SJeff Kirsher { "tx_packets", DEV_STAT(tx_packets), -1 }, 1251adfc5217SJeff Kirsher { "rx_bytes", DEV_STAT(rx_bytes), -1 }, 1252adfc5217SJeff Kirsher { "tx_bytes", DEV_STAT(tx_bytes), -1 }, 1253adfc5217SJeff Kirsher { "rx_errors", DEV_STAT(rx_errors), -1 }, 1254adfc5217SJeff Kirsher { "tx_errors", DEV_STAT(tx_errors), -1 }, 1255adfc5217SJeff Kirsher { "rx_dropped", DEV_STAT(rx_dropped), -1 }, 1256adfc5217SJeff Kirsher { "tx_dropped", DEV_STAT(tx_dropped), -1 }, 1257adfc5217SJeff Kirsher 1258adfc5217SJeff Kirsher { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS}, 1259adfc5217SJeff Kirsher { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS }, 1260adfc5217SJeff Kirsher { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST }, 1261adfc5217SJeff Kirsher { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT }, 1262adfc5217SJeff Kirsher { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 }, 1263adfc5217SJeff Kirsher { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 }, 1264adfc5217SJeff Kirsher { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 }, 1265adfc5217SJeff Kirsher { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 }, 1266adfc5217SJeff Kirsher { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 }, 1267adfc5217SJeff Kirsher { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX }, 1268adfc5217SJeff Kirsher { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB }, 1269adfc5217SJeff Kirsher { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR }, 1270adfc5217SJeff Kirsher { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG }, 1271adfc5217SJeff Kirsher { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP }, 1272adfc5217SJeff Kirsher { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN }, 1273adfc5217SJeff Kirsher { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND }, 1274adfc5217SJeff Kirsher { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC }, 1275adfc5217SJeff Kirsher { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN }, 1276adfc5217SJeff Kirsher { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM }, 1277adfc5217SJeff Kirsher { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE }, 1278adfc5217SJeff Kirsher { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL }, 1279adfc5217SJeff Kirsher 1280adfc5217SJeff Kirsher { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS }, 1281adfc5217SJeff Kirsher { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS }, 1282adfc5217SJeff Kirsher { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST }, 1283adfc5217SJeff Kirsher { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT }, 1284adfc5217SJeff Kirsher { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 }, 1285adfc5217SJeff Kirsher { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 }, 1286adfc5217SJeff Kirsher { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 }, 1287adfc5217SJeff Kirsher { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 }, 1288adfc5217SJeff Kirsher { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023}, 1289adfc5217SJeff Kirsher { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX }, 1290adfc5217SJeff Kirsher { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB }, 1291adfc5217SJeff Kirsher { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR }, 1292adfc5217SJeff Kirsher { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG }, 1293adfc5217SJeff Kirsher { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN }, 1294adfc5217SJeff Kirsher { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL }, 1295adfc5217SJeff Kirsher { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL }, 1296adfc5217SJeff Kirsher { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL }, 1297adfc5217SJeff Kirsher { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL }, 1298adfc5217SJeff Kirsher { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE }, 1299adfc5217SJeff Kirsher { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF }, 1300adfc5217SJeff Kirsher { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS }, 1301adfc5217SJeff Kirsher { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE }, 1302adfc5217SJeff Kirsher 1303adfc5217SJeff Kirsher }; 1304adfc5217SJeff Kirsher 13056afc0d7aSTobias Klauser #define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats) 1306adfc5217SJeff Kirsher 1307adfc5217SJeff Kirsher static const u32 unused_mib_regs[] = { 1308adfc5217SJeff Kirsher ETH_MIB_TX_ALL_OCTETS, 1309adfc5217SJeff Kirsher ETH_MIB_TX_ALL_PKTS, 1310adfc5217SJeff Kirsher ETH_MIB_RX_ALL_OCTETS, 1311adfc5217SJeff Kirsher ETH_MIB_RX_ALL_PKTS, 1312adfc5217SJeff Kirsher }; 1313adfc5217SJeff Kirsher 1314adfc5217SJeff Kirsher 1315adfc5217SJeff Kirsher static void bcm_enet_get_drvinfo(struct net_device *netdev, 1316adfc5217SJeff Kirsher struct ethtool_drvinfo *drvinfo) 1317adfc5217SJeff Kirsher { 13187826d43fSJiri Pirko strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver)); 13197826d43fSJiri Pirko strlcpy(drvinfo->version, bcm_enet_driver_version, 13207826d43fSJiri Pirko sizeof(drvinfo->version)); 13217826d43fSJiri Pirko strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); 13227826d43fSJiri Pirko strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info)); 1323adfc5217SJeff Kirsher } 1324adfc5217SJeff Kirsher 1325adfc5217SJeff Kirsher static int bcm_enet_get_sset_count(struct net_device *netdev, 1326adfc5217SJeff Kirsher int string_set) 1327adfc5217SJeff Kirsher { 1328adfc5217SJeff Kirsher switch (string_set) { 1329adfc5217SJeff Kirsher case ETH_SS_STATS: 1330adfc5217SJeff Kirsher return BCM_ENET_STATS_LEN; 1331adfc5217SJeff Kirsher default: 1332adfc5217SJeff Kirsher return -EINVAL; 1333adfc5217SJeff Kirsher } 1334adfc5217SJeff Kirsher } 1335adfc5217SJeff Kirsher 1336adfc5217SJeff Kirsher static void bcm_enet_get_strings(struct net_device *netdev, 1337adfc5217SJeff Kirsher u32 stringset, u8 *data) 1338adfc5217SJeff Kirsher { 1339adfc5217SJeff Kirsher int i; 1340adfc5217SJeff Kirsher 1341adfc5217SJeff Kirsher switch (stringset) { 1342adfc5217SJeff Kirsher case ETH_SS_STATS: 1343adfc5217SJeff Kirsher for (i = 0; i < BCM_ENET_STATS_LEN; i++) { 1344adfc5217SJeff Kirsher memcpy(data + i * ETH_GSTRING_LEN, 1345adfc5217SJeff Kirsher bcm_enet_gstrings_stats[i].stat_string, 1346adfc5217SJeff Kirsher ETH_GSTRING_LEN); 1347adfc5217SJeff Kirsher } 1348adfc5217SJeff Kirsher break; 1349adfc5217SJeff Kirsher } 1350adfc5217SJeff Kirsher } 1351adfc5217SJeff Kirsher 1352adfc5217SJeff Kirsher static void update_mib_counters(struct bcm_enet_priv *priv) 1353adfc5217SJeff Kirsher { 1354adfc5217SJeff Kirsher int i; 1355adfc5217SJeff Kirsher 1356adfc5217SJeff Kirsher for (i = 0; i < BCM_ENET_STATS_LEN; i++) { 1357adfc5217SJeff Kirsher const struct bcm_enet_stats *s; 1358adfc5217SJeff Kirsher u32 val; 1359adfc5217SJeff Kirsher char *p; 1360adfc5217SJeff Kirsher 1361adfc5217SJeff Kirsher s = &bcm_enet_gstrings_stats[i]; 1362adfc5217SJeff Kirsher if (s->mib_reg == -1) 1363adfc5217SJeff Kirsher continue; 1364adfc5217SJeff Kirsher 1365adfc5217SJeff Kirsher val = enet_readl(priv, ENET_MIB_REG(s->mib_reg)); 1366adfc5217SJeff Kirsher p = (char *)priv + s->stat_offset; 1367adfc5217SJeff Kirsher 1368adfc5217SJeff Kirsher if (s->sizeof_stat == sizeof(u64)) 1369adfc5217SJeff Kirsher *(u64 *)p += val; 1370adfc5217SJeff Kirsher else 1371adfc5217SJeff Kirsher *(u32 *)p += val; 1372adfc5217SJeff Kirsher } 1373adfc5217SJeff Kirsher 1374adfc5217SJeff Kirsher /* also empty unused mib counters to make sure mib counter 1375adfc5217SJeff Kirsher * overflow interrupt is cleared */ 1376adfc5217SJeff Kirsher for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++) 1377adfc5217SJeff Kirsher (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i])); 1378adfc5217SJeff Kirsher } 1379adfc5217SJeff Kirsher 1380adfc5217SJeff Kirsher static void bcm_enet_update_mib_counters_defer(struct work_struct *t) 1381adfc5217SJeff Kirsher { 1382adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1383adfc5217SJeff Kirsher 1384adfc5217SJeff Kirsher priv = container_of(t, struct bcm_enet_priv, mib_update_task); 1385adfc5217SJeff Kirsher mutex_lock(&priv->mib_update_lock); 1386adfc5217SJeff Kirsher update_mib_counters(priv); 1387adfc5217SJeff Kirsher mutex_unlock(&priv->mib_update_lock); 1388adfc5217SJeff Kirsher 1389adfc5217SJeff Kirsher /* reenable mib interrupt */ 1390adfc5217SJeff Kirsher if (netif_running(priv->net_dev)) 1391adfc5217SJeff Kirsher enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); 1392adfc5217SJeff Kirsher } 1393adfc5217SJeff Kirsher 1394adfc5217SJeff Kirsher static void bcm_enet_get_ethtool_stats(struct net_device *netdev, 1395adfc5217SJeff Kirsher struct ethtool_stats *stats, 1396adfc5217SJeff Kirsher u64 *data) 1397adfc5217SJeff Kirsher { 1398adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1399adfc5217SJeff Kirsher int i; 1400adfc5217SJeff Kirsher 1401adfc5217SJeff Kirsher priv = netdev_priv(netdev); 1402adfc5217SJeff Kirsher 1403adfc5217SJeff Kirsher mutex_lock(&priv->mib_update_lock); 1404adfc5217SJeff Kirsher update_mib_counters(priv); 1405adfc5217SJeff Kirsher 1406adfc5217SJeff Kirsher for (i = 0; i < BCM_ENET_STATS_LEN; i++) { 1407adfc5217SJeff Kirsher const struct bcm_enet_stats *s; 1408adfc5217SJeff Kirsher char *p; 1409adfc5217SJeff Kirsher 1410adfc5217SJeff Kirsher s = &bcm_enet_gstrings_stats[i]; 1411adfc5217SJeff Kirsher if (s->mib_reg == -1) 1412adfc5217SJeff Kirsher p = (char *)&netdev->stats; 1413adfc5217SJeff Kirsher else 1414adfc5217SJeff Kirsher p = (char *)priv; 1415adfc5217SJeff Kirsher p += s->stat_offset; 1416adfc5217SJeff Kirsher data[i] = (s->sizeof_stat == sizeof(u64)) ? 1417adfc5217SJeff Kirsher *(u64 *)p : *(u32 *)p; 1418adfc5217SJeff Kirsher } 1419adfc5217SJeff Kirsher mutex_unlock(&priv->mib_update_lock); 1420adfc5217SJeff Kirsher } 1421adfc5217SJeff Kirsher 14227260aac9SMaxime Bizon static int bcm_enet_nway_reset(struct net_device *dev) 14237260aac9SMaxime Bizon { 14247260aac9SMaxime Bizon struct bcm_enet_priv *priv; 14257260aac9SMaxime Bizon 14267260aac9SMaxime Bizon priv = netdev_priv(dev); 142742469bf5SFlorian Fainelli if (priv->has_phy) 14280fa1dfd6SFlorian Fainelli return phy_ethtool_nway_reset(dev); 14297260aac9SMaxime Bizon 14307260aac9SMaxime Bizon return -EOPNOTSUPP; 14317260aac9SMaxime Bizon } 14327260aac9SMaxime Bizon 1433639cfa9eSPhilippe Reynes static int bcm_enet_get_link_ksettings(struct net_device *dev, 1434639cfa9eSPhilippe Reynes struct ethtool_link_ksettings *cmd) 1435adfc5217SJeff Kirsher { 1436adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1437639cfa9eSPhilippe Reynes u32 supported, advertising; 1438adfc5217SJeff Kirsher 1439adfc5217SJeff Kirsher priv = netdev_priv(dev); 1440adfc5217SJeff Kirsher 1441adfc5217SJeff Kirsher if (priv->has_phy) { 1442625eb866SPhilippe Reynes if (!dev->phydev) 1443adfc5217SJeff Kirsher return -ENODEV; 14445514174fSyuval.shaia@oracle.com 14455514174fSyuval.shaia@oracle.com phy_ethtool_ksettings_get(dev->phydev, cmd); 14465514174fSyuval.shaia@oracle.com 14475514174fSyuval.shaia@oracle.com return 0; 1448adfc5217SJeff Kirsher } else { 1449639cfa9eSPhilippe Reynes cmd->base.autoneg = 0; 1450639cfa9eSPhilippe Reynes cmd->base.speed = (priv->force_speed_100) ? 1451639cfa9eSPhilippe Reynes SPEED_100 : SPEED_10; 1452639cfa9eSPhilippe Reynes cmd->base.duplex = (priv->force_duplex_full) ? 1453adfc5217SJeff Kirsher DUPLEX_FULL : DUPLEX_HALF; 1454639cfa9eSPhilippe Reynes supported = ADVERTISED_10baseT_Half | 1455adfc5217SJeff Kirsher ADVERTISED_10baseT_Full | 1456adfc5217SJeff Kirsher ADVERTISED_100baseT_Half | 1457adfc5217SJeff Kirsher ADVERTISED_100baseT_Full; 1458639cfa9eSPhilippe Reynes advertising = 0; 1459639cfa9eSPhilippe Reynes ethtool_convert_legacy_u32_to_link_mode( 1460639cfa9eSPhilippe Reynes cmd->link_modes.supported, supported); 1461639cfa9eSPhilippe Reynes ethtool_convert_legacy_u32_to_link_mode( 1462639cfa9eSPhilippe Reynes cmd->link_modes.advertising, advertising); 1463639cfa9eSPhilippe Reynes cmd->base.port = PORT_MII; 1464adfc5217SJeff Kirsher } 1465adfc5217SJeff Kirsher return 0; 1466adfc5217SJeff Kirsher } 1467adfc5217SJeff Kirsher 1468639cfa9eSPhilippe Reynes static int bcm_enet_set_link_ksettings(struct net_device *dev, 1469639cfa9eSPhilippe Reynes const struct ethtool_link_ksettings *cmd) 1470adfc5217SJeff Kirsher { 1471adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1472adfc5217SJeff Kirsher 1473adfc5217SJeff Kirsher priv = netdev_priv(dev); 1474adfc5217SJeff Kirsher if (priv->has_phy) { 1475625eb866SPhilippe Reynes if (!dev->phydev) 1476adfc5217SJeff Kirsher return -ENODEV; 1477639cfa9eSPhilippe Reynes return phy_ethtool_ksettings_set(dev->phydev, cmd); 1478adfc5217SJeff Kirsher } else { 1479adfc5217SJeff Kirsher 1480639cfa9eSPhilippe Reynes if (cmd->base.autoneg || 1481639cfa9eSPhilippe Reynes (cmd->base.speed != SPEED_100 && 1482639cfa9eSPhilippe Reynes cmd->base.speed != SPEED_10) || 1483639cfa9eSPhilippe Reynes cmd->base.port != PORT_MII) 1484adfc5217SJeff Kirsher return -EINVAL; 1485adfc5217SJeff Kirsher 1486639cfa9eSPhilippe Reynes priv->force_speed_100 = 1487639cfa9eSPhilippe Reynes (cmd->base.speed == SPEED_100) ? 1 : 0; 1488639cfa9eSPhilippe Reynes priv->force_duplex_full = 1489639cfa9eSPhilippe Reynes (cmd->base.duplex == DUPLEX_FULL) ? 1 : 0; 1490adfc5217SJeff Kirsher 1491adfc5217SJeff Kirsher if (netif_running(dev)) 1492adfc5217SJeff Kirsher bcm_enet_adjust_link(dev); 1493adfc5217SJeff Kirsher return 0; 1494adfc5217SJeff Kirsher } 1495adfc5217SJeff Kirsher } 1496adfc5217SJeff Kirsher 1497adfc5217SJeff Kirsher static void bcm_enet_get_ringparam(struct net_device *dev, 1498adfc5217SJeff Kirsher struct ethtool_ringparam *ering) 1499adfc5217SJeff Kirsher { 1500adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1501adfc5217SJeff Kirsher 1502adfc5217SJeff Kirsher priv = netdev_priv(dev); 1503adfc5217SJeff Kirsher 1504adfc5217SJeff Kirsher /* rx/tx ring is actually only limited by memory */ 1505adfc5217SJeff Kirsher ering->rx_max_pending = 8192; 1506adfc5217SJeff Kirsher ering->tx_max_pending = 8192; 1507adfc5217SJeff Kirsher ering->rx_pending = priv->rx_ring_size; 1508adfc5217SJeff Kirsher ering->tx_pending = priv->tx_ring_size; 1509adfc5217SJeff Kirsher } 1510adfc5217SJeff Kirsher 1511adfc5217SJeff Kirsher static int bcm_enet_set_ringparam(struct net_device *dev, 1512adfc5217SJeff Kirsher struct ethtool_ringparam *ering) 1513adfc5217SJeff Kirsher { 1514adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1515adfc5217SJeff Kirsher int was_running; 1516adfc5217SJeff Kirsher 1517adfc5217SJeff Kirsher priv = netdev_priv(dev); 1518adfc5217SJeff Kirsher 1519adfc5217SJeff Kirsher was_running = 0; 1520adfc5217SJeff Kirsher if (netif_running(dev)) { 1521adfc5217SJeff Kirsher bcm_enet_stop(dev); 1522adfc5217SJeff Kirsher was_running = 1; 1523adfc5217SJeff Kirsher } 1524adfc5217SJeff Kirsher 1525adfc5217SJeff Kirsher priv->rx_ring_size = ering->rx_pending; 1526adfc5217SJeff Kirsher priv->tx_ring_size = ering->tx_pending; 1527adfc5217SJeff Kirsher 1528adfc5217SJeff Kirsher if (was_running) { 1529adfc5217SJeff Kirsher int err; 1530adfc5217SJeff Kirsher 1531adfc5217SJeff Kirsher err = bcm_enet_open(dev); 1532adfc5217SJeff Kirsher if (err) 1533adfc5217SJeff Kirsher dev_close(dev); 1534adfc5217SJeff Kirsher else 1535adfc5217SJeff Kirsher bcm_enet_set_multicast_list(dev); 1536adfc5217SJeff Kirsher } 1537adfc5217SJeff Kirsher return 0; 1538adfc5217SJeff Kirsher } 1539adfc5217SJeff Kirsher 1540adfc5217SJeff Kirsher static void bcm_enet_get_pauseparam(struct net_device *dev, 1541adfc5217SJeff Kirsher struct ethtool_pauseparam *ecmd) 1542adfc5217SJeff Kirsher { 1543adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1544adfc5217SJeff Kirsher 1545adfc5217SJeff Kirsher priv = netdev_priv(dev); 1546adfc5217SJeff Kirsher ecmd->autoneg = priv->pause_auto; 1547adfc5217SJeff Kirsher ecmd->rx_pause = priv->pause_rx; 1548adfc5217SJeff Kirsher ecmd->tx_pause = priv->pause_tx; 1549adfc5217SJeff Kirsher } 1550adfc5217SJeff Kirsher 1551adfc5217SJeff Kirsher static int bcm_enet_set_pauseparam(struct net_device *dev, 1552adfc5217SJeff Kirsher struct ethtool_pauseparam *ecmd) 1553adfc5217SJeff Kirsher { 1554adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1555adfc5217SJeff Kirsher 1556adfc5217SJeff Kirsher priv = netdev_priv(dev); 1557adfc5217SJeff Kirsher 1558adfc5217SJeff Kirsher if (priv->has_phy) { 1559adfc5217SJeff Kirsher if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) { 1560adfc5217SJeff Kirsher /* asymetric pause mode not supported, 1561adfc5217SJeff Kirsher * actually possible but integrated PHY has RO 1562adfc5217SJeff Kirsher * asym_pause bit */ 1563adfc5217SJeff Kirsher return -EINVAL; 1564adfc5217SJeff Kirsher } 1565adfc5217SJeff Kirsher } else { 1566adfc5217SJeff Kirsher /* no pause autoneg on direct mii connection */ 1567adfc5217SJeff Kirsher if (ecmd->autoneg) 1568adfc5217SJeff Kirsher return -EINVAL; 1569adfc5217SJeff Kirsher } 1570adfc5217SJeff Kirsher 1571adfc5217SJeff Kirsher priv->pause_auto = ecmd->autoneg; 1572adfc5217SJeff Kirsher priv->pause_rx = ecmd->rx_pause; 1573adfc5217SJeff Kirsher priv->pause_tx = ecmd->tx_pause; 1574adfc5217SJeff Kirsher 1575adfc5217SJeff Kirsher return 0; 1576adfc5217SJeff Kirsher } 1577adfc5217SJeff Kirsher 15781aff0cbeSstephen hemminger static const struct ethtool_ops bcm_enet_ethtool_ops = { 1579adfc5217SJeff Kirsher .get_strings = bcm_enet_get_strings, 1580adfc5217SJeff Kirsher .get_sset_count = bcm_enet_get_sset_count, 1581adfc5217SJeff Kirsher .get_ethtool_stats = bcm_enet_get_ethtool_stats, 15827260aac9SMaxime Bizon .nway_reset = bcm_enet_nway_reset, 1583adfc5217SJeff Kirsher .get_drvinfo = bcm_enet_get_drvinfo, 1584adfc5217SJeff Kirsher .get_link = ethtool_op_get_link, 1585adfc5217SJeff Kirsher .get_ringparam = bcm_enet_get_ringparam, 1586adfc5217SJeff Kirsher .set_ringparam = bcm_enet_set_ringparam, 1587adfc5217SJeff Kirsher .get_pauseparam = bcm_enet_get_pauseparam, 1588adfc5217SJeff Kirsher .set_pauseparam = bcm_enet_set_pauseparam, 1589639cfa9eSPhilippe Reynes .get_link_ksettings = bcm_enet_get_link_ksettings, 1590639cfa9eSPhilippe Reynes .set_link_ksettings = bcm_enet_set_link_ksettings, 1591adfc5217SJeff Kirsher }; 1592adfc5217SJeff Kirsher 1593adfc5217SJeff Kirsher static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1594adfc5217SJeff Kirsher { 1595adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1596adfc5217SJeff Kirsher 1597adfc5217SJeff Kirsher priv = netdev_priv(dev); 1598adfc5217SJeff Kirsher if (priv->has_phy) { 1599625eb866SPhilippe Reynes if (!dev->phydev) 1600adfc5217SJeff Kirsher return -ENODEV; 1601625eb866SPhilippe Reynes return phy_mii_ioctl(dev->phydev, rq, cmd); 1602adfc5217SJeff Kirsher } else { 1603adfc5217SJeff Kirsher struct mii_if_info mii; 1604adfc5217SJeff Kirsher 1605adfc5217SJeff Kirsher mii.dev = dev; 1606adfc5217SJeff Kirsher mii.mdio_read = bcm_enet_mdio_read_mii; 1607adfc5217SJeff Kirsher mii.mdio_write = bcm_enet_mdio_write_mii; 1608adfc5217SJeff Kirsher mii.phy_id = 0; 1609adfc5217SJeff Kirsher mii.phy_id_mask = 0x3f; 1610adfc5217SJeff Kirsher mii.reg_num_mask = 0x1f; 1611adfc5217SJeff Kirsher return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); 1612adfc5217SJeff Kirsher } 1613adfc5217SJeff Kirsher } 1614adfc5217SJeff Kirsher 1615adfc5217SJeff Kirsher /* 1616e1c6dccaSJarod Wilson * adjust mtu, can't be called while device is running 1617adfc5217SJeff Kirsher */ 1618e1c6dccaSJarod Wilson static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu) 1619adfc5217SJeff Kirsher { 1620e1c6dccaSJarod Wilson struct bcm_enet_priv *priv = netdev_priv(dev); 1621e1c6dccaSJarod Wilson int actual_mtu = new_mtu; 1622adfc5217SJeff Kirsher 1623e1c6dccaSJarod Wilson if (netif_running(dev)) 1624e1c6dccaSJarod Wilson return -EBUSY; 1625adfc5217SJeff Kirsher 1626adfc5217SJeff Kirsher /* add ethernet header + vlan tag size */ 1627adfc5217SJeff Kirsher actual_mtu += VLAN_ETH_HLEN; 1628adfc5217SJeff Kirsher 1629adfc5217SJeff Kirsher /* 1630adfc5217SJeff Kirsher * setup maximum size before we get overflow mark in 1631adfc5217SJeff Kirsher * descriptor, note that this will not prevent reception of 1632adfc5217SJeff Kirsher * big frames, they will be split into multiple buffers 1633adfc5217SJeff Kirsher * anyway 1634adfc5217SJeff Kirsher */ 1635adfc5217SJeff Kirsher priv->hw_mtu = actual_mtu; 1636adfc5217SJeff Kirsher 1637adfc5217SJeff Kirsher /* 1638adfc5217SJeff Kirsher * align rx buffer size to dma burst len, account FCS since 1639adfc5217SJeff Kirsher * it's appended 1640adfc5217SJeff Kirsher */ 1641adfc5217SJeff Kirsher priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN, 16426f00a022SMaxime Bizon priv->dma_maxburst * 4); 1643adfc5217SJeff Kirsher 1644adfc5217SJeff Kirsher dev->mtu = new_mtu; 1645adfc5217SJeff Kirsher return 0; 1646adfc5217SJeff Kirsher } 1647adfc5217SJeff Kirsher 1648adfc5217SJeff Kirsher /* 1649adfc5217SJeff Kirsher * preinit hardware to allow mii operation while device is down 1650adfc5217SJeff Kirsher */ 1651adfc5217SJeff Kirsher static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv) 1652adfc5217SJeff Kirsher { 1653adfc5217SJeff Kirsher u32 val; 1654adfc5217SJeff Kirsher int limit; 1655adfc5217SJeff Kirsher 1656adfc5217SJeff Kirsher /* make sure mac is disabled */ 1657adfc5217SJeff Kirsher bcm_enet_disable_mac(priv); 1658adfc5217SJeff Kirsher 1659adfc5217SJeff Kirsher /* soft reset mac */ 1660adfc5217SJeff Kirsher val = ENET_CTL_SRESET_MASK; 1661adfc5217SJeff Kirsher enet_writel(priv, val, ENET_CTL_REG); 1662adfc5217SJeff Kirsher wmb(); 1663adfc5217SJeff Kirsher 1664adfc5217SJeff Kirsher limit = 1000; 1665adfc5217SJeff Kirsher do { 1666adfc5217SJeff Kirsher val = enet_readl(priv, ENET_CTL_REG); 1667adfc5217SJeff Kirsher if (!(val & ENET_CTL_SRESET_MASK)) 1668adfc5217SJeff Kirsher break; 1669adfc5217SJeff Kirsher udelay(1); 1670adfc5217SJeff Kirsher } while (limit--); 1671adfc5217SJeff Kirsher 1672adfc5217SJeff Kirsher /* select correct mii interface */ 1673adfc5217SJeff Kirsher val = enet_readl(priv, ENET_CTL_REG); 1674adfc5217SJeff Kirsher if (priv->use_external_mii) 1675adfc5217SJeff Kirsher val |= ENET_CTL_EPHYSEL_MASK; 1676adfc5217SJeff Kirsher else 1677adfc5217SJeff Kirsher val &= ~ENET_CTL_EPHYSEL_MASK; 1678adfc5217SJeff Kirsher enet_writel(priv, val, ENET_CTL_REG); 1679adfc5217SJeff Kirsher 1680adfc5217SJeff Kirsher /* turn on mdc clock */ 1681adfc5217SJeff Kirsher enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) | 1682adfc5217SJeff Kirsher ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG); 1683adfc5217SJeff Kirsher 1684adfc5217SJeff Kirsher /* set mib counters to self-clear when read */ 1685adfc5217SJeff Kirsher val = enet_readl(priv, ENET_MIBCTL_REG); 1686adfc5217SJeff Kirsher val |= ENET_MIBCTL_RDCLEAR_MASK; 1687adfc5217SJeff Kirsher enet_writel(priv, val, ENET_MIBCTL_REG); 1688adfc5217SJeff Kirsher } 1689adfc5217SJeff Kirsher 1690adfc5217SJeff Kirsher static const struct net_device_ops bcm_enet_ops = { 1691adfc5217SJeff Kirsher .ndo_open = bcm_enet_open, 1692adfc5217SJeff Kirsher .ndo_stop = bcm_enet_stop, 1693adfc5217SJeff Kirsher .ndo_start_xmit = bcm_enet_start_xmit, 1694adfc5217SJeff Kirsher .ndo_set_mac_address = bcm_enet_set_mac_address, 1695afc4b13dSJiri Pirko .ndo_set_rx_mode = bcm_enet_set_multicast_list, 1696adfc5217SJeff Kirsher .ndo_do_ioctl = bcm_enet_ioctl, 1697adfc5217SJeff Kirsher .ndo_change_mtu = bcm_enet_change_mtu, 1698adfc5217SJeff Kirsher }; 1699adfc5217SJeff Kirsher 1700adfc5217SJeff Kirsher /* 1701adfc5217SJeff Kirsher * allocate netdevice, request register memory and register device. 1702adfc5217SJeff Kirsher */ 1703047fc566SBill Pemberton static int bcm_enet_probe(struct platform_device *pdev) 1704adfc5217SJeff Kirsher { 1705adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1706adfc5217SJeff Kirsher struct net_device *dev; 1707adfc5217SJeff Kirsher struct bcm63xx_enet_platform_data *pd; 1708adfc5217SJeff Kirsher struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx; 1709adfc5217SJeff Kirsher struct mii_bus *bus; 1710adfc5217SJeff Kirsher int i, ret; 1711adfc5217SJeff Kirsher 17120ae99b5fSMaxime Bizon if (!bcm_enet_shared_base[0]) 1713527a4871SJonas Gorski return -EPROBE_DEFER; 1714adfc5217SJeff Kirsher 1715adfc5217SJeff Kirsher res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1716adfc5217SJeff Kirsher res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 1717adfc5217SJeff Kirsher res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2); 1718f607e059SJulia Lawall if (!res_irq || !res_irq_rx || !res_irq_tx) 1719adfc5217SJeff Kirsher return -ENODEV; 1720adfc5217SJeff Kirsher 1721adfc5217SJeff Kirsher ret = 0; 1722adfc5217SJeff Kirsher dev = alloc_etherdev(sizeof(*priv)); 1723adfc5217SJeff Kirsher if (!dev) 1724adfc5217SJeff Kirsher return -ENOMEM; 1725adfc5217SJeff Kirsher priv = netdev_priv(dev); 1726adfc5217SJeff Kirsher 17276f00a022SMaxime Bizon priv->enet_is_sw = false; 17286f00a022SMaxime Bizon priv->dma_maxburst = BCMENET_DMA_MAXBURST; 17296f00a022SMaxime Bizon 1730e1c6dccaSJarod Wilson ret = bcm_enet_change_mtu(dev, dev->mtu); 1731adfc5217SJeff Kirsher if (ret) 1732adfc5217SJeff Kirsher goto out; 1733adfc5217SJeff Kirsher 1734f607e059SJulia Lawall res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1735f607e059SJulia Lawall priv->base = devm_ioremap_resource(&pdev->dev, res_mem); 1736f607e059SJulia Lawall if (IS_ERR(priv->base)) { 1737f607e059SJulia Lawall ret = PTR_ERR(priv->base); 1738adfc5217SJeff Kirsher goto out; 1739adfc5217SJeff Kirsher } 1740adfc5217SJeff Kirsher 1741adfc5217SJeff Kirsher dev->irq = priv->irq = res_irq->start; 1742adfc5217SJeff Kirsher priv->irq_rx = res_irq_rx->start; 1743adfc5217SJeff Kirsher priv->irq_tx = res_irq_tx->start; 1744adfc5217SJeff Kirsher 174575550015SJonas Gorski priv->mac_clk = devm_clk_get(&pdev->dev, "enet"); 1746adfc5217SJeff Kirsher if (IS_ERR(priv->mac_clk)) { 1747adfc5217SJeff Kirsher ret = PTR_ERR(priv->mac_clk); 17481c03da05SJonas Gorski goto out; 1749adfc5217SJeff Kirsher } 17509c86b846SJonas Gorski ret = clk_prepare_enable(priv->mac_clk); 17519c86b846SJonas Gorski if (ret) 17527e697ce9SJonas Gorski goto out; 1753adfc5217SJeff Kirsher 1754adfc5217SJeff Kirsher /* initialize default and fetch platform data */ 1755adfc5217SJeff Kirsher priv->rx_ring_size = BCMENET_DEF_RX_DESC; 1756adfc5217SJeff Kirsher priv->tx_ring_size = BCMENET_DEF_TX_DESC; 1757adfc5217SJeff Kirsher 1758cf0e7794SJingoo Han pd = dev_get_platdata(&pdev->dev); 1759adfc5217SJeff Kirsher if (pd) { 1760adfc5217SJeff Kirsher memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); 1761adfc5217SJeff Kirsher priv->has_phy = pd->has_phy; 1762adfc5217SJeff Kirsher priv->phy_id = pd->phy_id; 1763adfc5217SJeff Kirsher priv->has_phy_interrupt = pd->has_phy_interrupt; 1764adfc5217SJeff Kirsher priv->phy_interrupt = pd->phy_interrupt; 1765adfc5217SJeff Kirsher priv->use_external_mii = !pd->use_internal_phy; 1766adfc5217SJeff Kirsher priv->pause_auto = pd->pause_auto; 1767adfc5217SJeff Kirsher priv->pause_rx = pd->pause_rx; 1768adfc5217SJeff Kirsher priv->pause_tx = pd->pause_tx; 1769adfc5217SJeff Kirsher priv->force_duplex_full = pd->force_duplex_full; 1770adfc5217SJeff Kirsher priv->force_speed_100 = pd->force_speed_100; 17713dc6475cSFlorian Fainelli priv->dma_chan_en_mask = pd->dma_chan_en_mask; 17723dc6475cSFlorian Fainelli priv->dma_chan_int_mask = pd->dma_chan_int_mask; 17733dc6475cSFlorian Fainelli priv->dma_chan_width = pd->dma_chan_width; 17743dc6475cSFlorian Fainelli priv->dma_has_sram = pd->dma_has_sram; 17753dc6475cSFlorian Fainelli priv->dma_desc_shift = pd->dma_desc_shift; 17761942e482SJonas Gorski priv->rx_chan = pd->rx_chan; 17771942e482SJonas Gorski priv->tx_chan = pd->tx_chan; 1778adfc5217SJeff Kirsher } 1779adfc5217SJeff Kirsher 1780bbd62d24SJonas Gorski if (priv->has_phy && !priv->use_external_mii) { 1781adfc5217SJeff Kirsher /* using internal PHY, enable clock */ 17827e697ce9SJonas Gorski priv->phy_clk = devm_clk_get(&pdev->dev, "ephy"); 1783adfc5217SJeff Kirsher if (IS_ERR(priv->phy_clk)) { 1784adfc5217SJeff Kirsher ret = PTR_ERR(priv->phy_clk); 1785adfc5217SJeff Kirsher priv->phy_clk = NULL; 17869c86b846SJonas Gorski goto out_disable_clk_mac; 1787adfc5217SJeff Kirsher } 17889c86b846SJonas Gorski ret = clk_prepare_enable(priv->phy_clk); 17899c86b846SJonas Gorski if (ret) 17907e697ce9SJonas Gorski goto out_disable_clk_mac; 1791adfc5217SJeff Kirsher } 1792adfc5217SJeff Kirsher 1793adfc5217SJeff Kirsher /* do minimal hardware init to be able to probe mii bus */ 1794adfc5217SJeff Kirsher bcm_enet_hw_preinit(priv); 1795adfc5217SJeff Kirsher 1796adfc5217SJeff Kirsher /* MII bus registration */ 1797adfc5217SJeff Kirsher if (priv->has_phy) { 1798adfc5217SJeff Kirsher 1799adfc5217SJeff Kirsher priv->mii_bus = mdiobus_alloc(); 1800adfc5217SJeff Kirsher if (!priv->mii_bus) { 1801adfc5217SJeff Kirsher ret = -ENOMEM; 1802adfc5217SJeff Kirsher goto out_uninit_hw; 1803adfc5217SJeff Kirsher } 1804adfc5217SJeff Kirsher 1805adfc5217SJeff Kirsher bus = priv->mii_bus; 1806adfc5217SJeff Kirsher bus->name = "bcm63xx_enet MII bus"; 1807adfc5217SJeff Kirsher bus->parent = &pdev->dev; 1808adfc5217SJeff Kirsher bus->priv = priv; 1809adfc5217SJeff Kirsher bus->read = bcm_enet_mdio_read_phylib; 1810adfc5217SJeff Kirsher bus->write = bcm_enet_mdio_write_phylib; 1811c7fe89e3SJonas Gorski sprintf(bus->id, "%s-%d", pdev->name, pdev->id); 1812adfc5217SJeff Kirsher 1813adfc5217SJeff Kirsher /* only probe bus where we think the PHY is, because 1814adfc5217SJeff Kirsher * the mdio read operation return 0 instead of 0xffff 1815adfc5217SJeff Kirsher * if a slave is not present on hw */ 1816adfc5217SJeff Kirsher bus->phy_mask = ~(1 << priv->phy_id); 1817adfc5217SJeff Kirsher 1818adfc5217SJeff Kirsher if (priv->has_phy_interrupt) 1819adfc5217SJeff Kirsher bus->irq[priv->phy_id] = priv->phy_interrupt; 1820adfc5217SJeff Kirsher 1821adfc5217SJeff Kirsher ret = mdiobus_register(bus); 1822adfc5217SJeff Kirsher if (ret) { 1823adfc5217SJeff Kirsher dev_err(&pdev->dev, "unable to register mdio bus\n"); 1824adfc5217SJeff Kirsher goto out_free_mdio; 1825adfc5217SJeff Kirsher } 1826adfc5217SJeff Kirsher } else { 1827adfc5217SJeff Kirsher 1828adfc5217SJeff Kirsher /* run platform code to initialize PHY device */ 1829323b15b9Sxypron.glpk@gmx.de if (pd && pd->mii_config && 1830adfc5217SJeff Kirsher pd->mii_config(dev, 1, bcm_enet_mdio_read_mii, 1831adfc5217SJeff Kirsher bcm_enet_mdio_write_mii)) { 1832adfc5217SJeff Kirsher dev_err(&pdev->dev, "unable to configure mdio bus\n"); 1833adfc5217SJeff Kirsher goto out_uninit_hw; 1834adfc5217SJeff Kirsher } 1835adfc5217SJeff Kirsher } 1836adfc5217SJeff Kirsher 1837adfc5217SJeff Kirsher spin_lock_init(&priv->rx_lock); 1838adfc5217SJeff Kirsher 1839adfc5217SJeff Kirsher /* init rx timeout (used for oom) */ 1840eb8c6b5bSKees Cook timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0); 1841adfc5217SJeff Kirsher 1842adfc5217SJeff Kirsher /* init the mib update lock&work */ 1843adfc5217SJeff Kirsher mutex_init(&priv->mib_update_lock); 1844adfc5217SJeff Kirsher INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer); 1845adfc5217SJeff Kirsher 1846adfc5217SJeff Kirsher /* zero mib counters */ 1847adfc5217SJeff Kirsher for (i = 0; i < ENET_MIB_REG_COUNT; i++) 1848adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_MIB_REG(i)); 1849adfc5217SJeff Kirsher 1850adfc5217SJeff Kirsher /* register netdevice */ 1851adfc5217SJeff Kirsher dev->netdev_ops = &bcm_enet_ops; 1852adfc5217SJeff Kirsher netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); 1853adfc5217SJeff Kirsher 18547ad24ea4SWilfried Klaebe dev->ethtool_ops = &bcm_enet_ethtool_ops; 1855e1c6dccaSJarod Wilson /* MTU range: 46 - 2028 */ 1856e1c6dccaSJarod Wilson dev->min_mtu = ETH_ZLEN - ETH_HLEN; 1857e1c6dccaSJarod Wilson dev->max_mtu = BCMENET_MAX_MTU - VLAN_ETH_HLEN; 1858adfc5217SJeff Kirsher SET_NETDEV_DEV(dev, &pdev->dev); 1859adfc5217SJeff Kirsher 1860adfc5217SJeff Kirsher ret = register_netdev(dev); 1861adfc5217SJeff Kirsher if (ret) 1862adfc5217SJeff Kirsher goto out_unregister_mdio; 1863adfc5217SJeff Kirsher 1864adfc5217SJeff Kirsher netif_carrier_off(dev); 1865adfc5217SJeff Kirsher platform_set_drvdata(pdev, dev); 1866adfc5217SJeff Kirsher priv->pdev = pdev; 1867adfc5217SJeff Kirsher priv->net_dev = dev; 1868adfc5217SJeff Kirsher 1869adfc5217SJeff Kirsher return 0; 1870adfc5217SJeff Kirsher 1871adfc5217SJeff Kirsher out_unregister_mdio: 18722a80b5e1SJonas Gorski if (priv->mii_bus) 1873adfc5217SJeff Kirsher mdiobus_unregister(priv->mii_bus); 1874adfc5217SJeff Kirsher 1875adfc5217SJeff Kirsher out_free_mdio: 1876adfc5217SJeff Kirsher if (priv->mii_bus) 1877adfc5217SJeff Kirsher mdiobus_free(priv->mii_bus); 1878adfc5217SJeff Kirsher 1879adfc5217SJeff Kirsher out_uninit_hw: 1880adfc5217SJeff Kirsher /* turn off mdc clock */ 1881adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_MIISC_REG); 1882624e2d21SJonas Gorski clk_disable_unprepare(priv->phy_clk); 1883adfc5217SJeff Kirsher 18849c86b846SJonas Gorski out_disable_clk_mac: 1885624e2d21SJonas Gorski clk_disable_unprepare(priv->mac_clk); 1886adfc5217SJeff Kirsher out: 1887adfc5217SJeff Kirsher free_netdev(dev); 1888adfc5217SJeff Kirsher return ret; 1889adfc5217SJeff Kirsher } 1890adfc5217SJeff Kirsher 1891adfc5217SJeff Kirsher 1892adfc5217SJeff Kirsher /* 1893adfc5217SJeff Kirsher * exit func, stops hardware and unregisters netdevice 1894adfc5217SJeff Kirsher */ 1895047fc566SBill Pemberton static int bcm_enet_remove(struct platform_device *pdev) 1896adfc5217SJeff Kirsher { 1897adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1898adfc5217SJeff Kirsher struct net_device *dev; 1899adfc5217SJeff Kirsher 1900adfc5217SJeff Kirsher /* stop netdevice */ 1901adfc5217SJeff Kirsher dev = platform_get_drvdata(pdev); 1902adfc5217SJeff Kirsher priv = netdev_priv(dev); 1903adfc5217SJeff Kirsher unregister_netdev(dev); 1904adfc5217SJeff Kirsher 1905adfc5217SJeff Kirsher /* turn off mdc clock */ 1906adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_MIISC_REG); 1907adfc5217SJeff Kirsher 1908adfc5217SJeff Kirsher if (priv->has_phy) { 1909adfc5217SJeff Kirsher mdiobus_unregister(priv->mii_bus); 1910adfc5217SJeff Kirsher mdiobus_free(priv->mii_bus); 1911adfc5217SJeff Kirsher } else { 1912adfc5217SJeff Kirsher struct bcm63xx_enet_platform_data *pd; 1913adfc5217SJeff Kirsher 1914cf0e7794SJingoo Han pd = dev_get_platdata(&pdev->dev); 1915adfc5217SJeff Kirsher if (pd && pd->mii_config) 1916adfc5217SJeff Kirsher pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, 1917adfc5217SJeff Kirsher bcm_enet_mdio_write_mii); 1918adfc5217SJeff Kirsher } 1919adfc5217SJeff Kirsher 1920adfc5217SJeff Kirsher /* disable hw block clocks */ 1921624e2d21SJonas Gorski clk_disable_unprepare(priv->phy_clk); 1922624e2d21SJonas Gorski clk_disable_unprepare(priv->mac_clk); 1923adfc5217SJeff Kirsher 1924adfc5217SJeff Kirsher free_netdev(dev); 1925adfc5217SJeff Kirsher return 0; 1926adfc5217SJeff Kirsher } 1927adfc5217SJeff Kirsher 1928adfc5217SJeff Kirsher struct platform_driver bcm63xx_enet_driver = { 1929adfc5217SJeff Kirsher .probe = bcm_enet_probe, 1930047fc566SBill Pemberton .remove = bcm_enet_remove, 1931adfc5217SJeff Kirsher .driver = { 1932adfc5217SJeff Kirsher .name = "bcm63xx_enet", 1933adfc5217SJeff Kirsher .owner = THIS_MODULE, 1934adfc5217SJeff Kirsher }, 1935adfc5217SJeff Kirsher }; 1936adfc5217SJeff Kirsher 1937adfc5217SJeff Kirsher /* 19386f00a022SMaxime Bizon * switch mii access callbacks 1939adfc5217SJeff Kirsher */ 19406f00a022SMaxime Bizon static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv, 19416f00a022SMaxime Bizon int ext, int phy_id, int location) 19426f00a022SMaxime Bizon { 19436f00a022SMaxime Bizon u32 reg; 19446f00a022SMaxime Bizon int ret; 19456f00a022SMaxime Bizon 19466f00a022SMaxime Bizon spin_lock_bh(&priv->enetsw_mdio_lock); 19476f00a022SMaxime Bizon enetsw_writel(priv, 0, ENETSW_MDIOC_REG); 19486f00a022SMaxime Bizon 19496f00a022SMaxime Bizon reg = ENETSW_MDIOC_RD_MASK | 19506f00a022SMaxime Bizon (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | 19516f00a022SMaxime Bizon (location << ENETSW_MDIOC_REG_SHIFT); 19526f00a022SMaxime Bizon 19536f00a022SMaxime Bizon if (ext) 19546f00a022SMaxime Bizon reg |= ENETSW_MDIOC_EXT_MASK; 19556f00a022SMaxime Bizon 19566f00a022SMaxime Bizon enetsw_writel(priv, reg, ENETSW_MDIOC_REG); 19576f00a022SMaxime Bizon udelay(50); 19586f00a022SMaxime Bizon ret = enetsw_readw(priv, ENETSW_MDIOD_REG); 19596f00a022SMaxime Bizon spin_unlock_bh(&priv->enetsw_mdio_lock); 19606f00a022SMaxime Bizon return ret; 19616f00a022SMaxime Bizon } 19626f00a022SMaxime Bizon 19636f00a022SMaxime Bizon static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv, 19646f00a022SMaxime Bizon int ext, int phy_id, int location, 19656f00a022SMaxime Bizon uint16_t data) 19666f00a022SMaxime Bizon { 19676f00a022SMaxime Bizon u32 reg; 19686f00a022SMaxime Bizon 19696f00a022SMaxime Bizon spin_lock_bh(&priv->enetsw_mdio_lock); 19706f00a022SMaxime Bizon enetsw_writel(priv, 0, ENETSW_MDIOC_REG); 19716f00a022SMaxime Bizon 19726f00a022SMaxime Bizon reg = ENETSW_MDIOC_WR_MASK | 19736f00a022SMaxime Bizon (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | 19746f00a022SMaxime Bizon (location << ENETSW_MDIOC_REG_SHIFT); 19756f00a022SMaxime Bizon 19766f00a022SMaxime Bizon if (ext) 19776f00a022SMaxime Bizon reg |= ENETSW_MDIOC_EXT_MASK; 19786f00a022SMaxime Bizon 19796f00a022SMaxime Bizon reg |= data; 19806f00a022SMaxime Bizon 19816f00a022SMaxime Bizon enetsw_writel(priv, reg, ENETSW_MDIOC_REG); 19826f00a022SMaxime Bizon udelay(50); 19836f00a022SMaxime Bizon spin_unlock_bh(&priv->enetsw_mdio_lock); 19846f00a022SMaxime Bizon } 19856f00a022SMaxime Bizon 19866f00a022SMaxime Bizon static inline int bcm_enet_port_is_rgmii(int portid) 19876f00a022SMaxime Bizon { 19886f00a022SMaxime Bizon return portid >= ENETSW_RGMII_PORT0; 19896f00a022SMaxime Bizon } 19906f00a022SMaxime Bizon 19916f00a022SMaxime Bizon /* 19926f00a022SMaxime Bizon * enet sw PHY polling 19936f00a022SMaxime Bizon */ 1994eb8c6b5bSKees Cook static void swphy_poll_timer(struct timer_list *t) 19956f00a022SMaxime Bizon { 1996eb8c6b5bSKees Cook struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll); 19976f00a022SMaxime Bizon unsigned int i; 19986f00a022SMaxime Bizon 19996f00a022SMaxime Bizon for (i = 0; i < priv->num_ports; i++) { 20006f00a022SMaxime Bizon struct bcm63xx_enetsw_port *port; 2001aebd9947SSimon Arlott int val, j, up, advertise, lpa, speed, duplex, media; 20026f00a022SMaxime Bizon int external_phy = bcm_enet_port_is_rgmii(i); 20036f00a022SMaxime Bizon u8 override; 20046f00a022SMaxime Bizon 20056f00a022SMaxime Bizon port = &priv->used_ports[i]; 20066f00a022SMaxime Bizon if (!port->used) 20076f00a022SMaxime Bizon continue; 20086f00a022SMaxime Bizon 20096f00a022SMaxime Bizon if (port->bypass_link) 20106f00a022SMaxime Bizon continue; 20116f00a022SMaxime Bizon 20126f00a022SMaxime Bizon /* dummy read to clear */ 20136f00a022SMaxime Bizon for (j = 0; j < 2; j++) 20146f00a022SMaxime Bizon val = bcmenet_sw_mdio_read(priv, external_phy, 20156f00a022SMaxime Bizon port->phy_id, MII_BMSR); 20166f00a022SMaxime Bizon 20176f00a022SMaxime Bizon if (val == 0xffff) 20186f00a022SMaxime Bizon continue; 20196f00a022SMaxime Bizon 20206f00a022SMaxime Bizon up = (val & BMSR_LSTATUS) ? 1 : 0; 20216f00a022SMaxime Bizon if (!(up ^ priv->sw_port_link[i])) 20226f00a022SMaxime Bizon continue; 20236f00a022SMaxime Bizon 20246f00a022SMaxime Bizon priv->sw_port_link[i] = up; 20256f00a022SMaxime Bizon 20266f00a022SMaxime Bizon /* link changed */ 20276f00a022SMaxime Bizon if (!up) { 20286f00a022SMaxime Bizon dev_info(&priv->pdev->dev, "link DOWN on %s\n", 20296f00a022SMaxime Bizon port->name); 20306f00a022SMaxime Bizon enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, 20316f00a022SMaxime Bizon ENETSW_PORTOV_REG(i)); 20326f00a022SMaxime Bizon enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | 20336f00a022SMaxime Bizon ENETSW_PTCTRL_TXDIS_MASK, 20346f00a022SMaxime Bizon ENETSW_PTCTRL_REG(i)); 20356f00a022SMaxime Bizon continue; 20366f00a022SMaxime Bizon } 20376f00a022SMaxime Bizon 20386f00a022SMaxime Bizon advertise = bcmenet_sw_mdio_read(priv, external_phy, 20396f00a022SMaxime Bizon port->phy_id, MII_ADVERTISE); 20406f00a022SMaxime Bizon 20416f00a022SMaxime Bizon lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, 20426f00a022SMaxime Bizon MII_LPA); 20436f00a022SMaxime Bizon 20446f00a022SMaxime Bizon /* figure out media and duplex from advertise and LPA values */ 20456f00a022SMaxime Bizon media = mii_nway_result(lpa & advertise); 20466f00a022SMaxime Bizon duplex = (media & ADVERTISE_FULL) ? 1 : 0; 20476f00a022SMaxime Bizon 20486f00a022SMaxime Bizon if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) 20496f00a022SMaxime Bizon speed = 100; 20506f00a022SMaxime Bizon else 20516f00a022SMaxime Bizon speed = 10; 2052aebd9947SSimon Arlott 2053aebd9947SSimon Arlott if (val & BMSR_ESTATEN) { 2054aebd9947SSimon Arlott advertise = bcmenet_sw_mdio_read(priv, external_phy, 2055aebd9947SSimon Arlott port->phy_id, MII_CTRL1000); 2056aebd9947SSimon Arlott 2057aebd9947SSimon Arlott lpa = bcmenet_sw_mdio_read(priv, external_phy, 2058aebd9947SSimon Arlott port->phy_id, MII_STAT1000); 2059aebd9947SSimon Arlott 2060aebd9947SSimon Arlott if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF) 2061aebd9947SSimon Arlott && lpa & (LPA_1000FULL | LPA_1000HALF)) { 2062aebd9947SSimon Arlott speed = 1000; 2063aebd9947SSimon Arlott duplex = (lpa & LPA_1000FULL); 2064aebd9947SSimon Arlott } 20656f00a022SMaxime Bizon } 20666f00a022SMaxime Bizon 20676f00a022SMaxime Bizon dev_info(&priv->pdev->dev, 20686f00a022SMaxime Bizon "link UP on %s, %dMbps, %s-duplex\n", 20696f00a022SMaxime Bizon port->name, speed, duplex ? "full" : "half"); 20706f00a022SMaxime Bizon 20716f00a022SMaxime Bizon override = ENETSW_PORTOV_ENABLE_MASK | 20726f00a022SMaxime Bizon ENETSW_PORTOV_LINKUP_MASK; 20736f00a022SMaxime Bizon 20746f00a022SMaxime Bizon if (speed == 1000) 20756f00a022SMaxime Bizon override |= ENETSW_IMPOV_1000_MASK; 20766f00a022SMaxime Bizon else if (speed == 100) 20776f00a022SMaxime Bizon override |= ENETSW_IMPOV_100_MASK; 20786f00a022SMaxime Bizon if (duplex) 20796f00a022SMaxime Bizon override |= ENETSW_IMPOV_FDX_MASK; 20806f00a022SMaxime Bizon 20816f00a022SMaxime Bizon enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i)); 20826f00a022SMaxime Bizon enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i)); 20836f00a022SMaxime Bizon } 20846f00a022SMaxime Bizon 20856f00a022SMaxime Bizon priv->swphy_poll.expires = jiffies + HZ; 20866f00a022SMaxime Bizon add_timer(&priv->swphy_poll); 20876f00a022SMaxime Bizon } 20886f00a022SMaxime Bizon 20896f00a022SMaxime Bizon /* 20906f00a022SMaxime Bizon * open callback, allocate dma rings & buffers and start rx operation 20916f00a022SMaxime Bizon */ 20926f00a022SMaxime Bizon static int bcm_enetsw_open(struct net_device *dev) 20936f00a022SMaxime Bizon { 20946f00a022SMaxime Bizon struct bcm_enet_priv *priv; 20956f00a022SMaxime Bizon struct device *kdev; 20966f00a022SMaxime Bizon int i, ret; 20976f00a022SMaxime Bizon unsigned int size; 20986f00a022SMaxime Bizon void *p; 20996f00a022SMaxime Bizon u32 val; 21006f00a022SMaxime Bizon 21016f00a022SMaxime Bizon priv = netdev_priv(dev); 21026f00a022SMaxime Bizon kdev = &priv->pdev->dev; 21036f00a022SMaxime Bizon 21046f00a022SMaxime Bizon /* mask all interrupts and request them */ 21053dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 21063dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 21076f00a022SMaxime Bizon 21086f00a022SMaxime Bizon ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 2109df9f1b9fSMichael Opdenacker 0, dev->name, dev); 21106f00a022SMaxime Bizon if (ret) 21116f00a022SMaxime Bizon goto out_freeirq; 21126f00a022SMaxime Bizon 21136f00a022SMaxime Bizon if (priv->irq_tx != -1) { 21146f00a022SMaxime Bizon ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, 2115df9f1b9fSMichael Opdenacker 0, dev->name, dev); 21166f00a022SMaxime Bizon if (ret) 21176f00a022SMaxime Bizon goto out_freeirq_rx; 21186f00a022SMaxime Bizon } 21196f00a022SMaxime Bizon 21206f00a022SMaxime Bizon /* allocate rx dma ring */ 21216f00a022SMaxime Bizon size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 2122aa006d1aSHimanshu Jha p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 21236f00a022SMaxime Bizon if (!p) { 21246f00a022SMaxime Bizon dev_err(kdev, "cannot allocate rx ring %u\n", size); 21256f00a022SMaxime Bizon ret = -ENOMEM; 21266f00a022SMaxime Bizon goto out_freeirq_tx; 21276f00a022SMaxime Bizon } 21286f00a022SMaxime Bizon 21296f00a022SMaxime Bizon priv->rx_desc_alloc_size = size; 21306f00a022SMaxime Bizon priv->rx_desc_cpu = p; 21316f00a022SMaxime Bizon 21326f00a022SMaxime Bizon /* allocate tx dma ring */ 21336f00a022SMaxime Bizon size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 2134aa006d1aSHimanshu Jha p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 21356f00a022SMaxime Bizon if (!p) { 21366f00a022SMaxime Bizon dev_err(kdev, "cannot allocate tx ring\n"); 21376f00a022SMaxime Bizon ret = -ENOMEM; 21386f00a022SMaxime Bizon goto out_free_rx_ring; 21396f00a022SMaxime Bizon } 21406f00a022SMaxime Bizon 21416f00a022SMaxime Bizon priv->tx_desc_alloc_size = size; 21426f00a022SMaxime Bizon priv->tx_desc_cpu = p; 21436f00a022SMaxime Bizon 21446396bb22SKees Cook priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *), 21456f00a022SMaxime Bizon GFP_KERNEL); 21466f00a022SMaxime Bizon if (!priv->tx_skb) { 21476f00a022SMaxime Bizon dev_err(kdev, "cannot allocate rx skb queue\n"); 21486f00a022SMaxime Bizon ret = -ENOMEM; 21496f00a022SMaxime Bizon goto out_free_tx_ring; 21506f00a022SMaxime Bizon } 21516f00a022SMaxime Bizon 21526f00a022SMaxime Bizon priv->tx_desc_count = priv->tx_ring_size; 21536f00a022SMaxime Bizon priv->tx_dirty_desc = 0; 21546f00a022SMaxime Bizon priv->tx_curr_desc = 0; 21556f00a022SMaxime Bizon spin_lock_init(&priv->tx_lock); 21566f00a022SMaxime Bizon 21576f00a022SMaxime Bizon /* init & fill rx ring with skbs */ 21586396bb22SKees Cook priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *), 21596f00a022SMaxime Bizon GFP_KERNEL); 21606f00a022SMaxime Bizon if (!priv->rx_skb) { 21616f00a022SMaxime Bizon dev_err(kdev, "cannot allocate rx skb queue\n"); 21626f00a022SMaxime Bizon ret = -ENOMEM; 21636f00a022SMaxime Bizon goto out_free_tx_skb; 21646f00a022SMaxime Bizon } 21656f00a022SMaxime Bizon 21666f00a022SMaxime Bizon priv->rx_desc_count = 0; 21676f00a022SMaxime Bizon priv->rx_dirty_desc = 0; 21686f00a022SMaxime Bizon priv->rx_curr_desc = 0; 21696f00a022SMaxime Bizon 21706f00a022SMaxime Bizon /* disable all ports */ 21716f00a022SMaxime Bizon for (i = 0; i < priv->num_ports; i++) { 21726f00a022SMaxime Bizon enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, 21736f00a022SMaxime Bizon ENETSW_PORTOV_REG(i)); 21746f00a022SMaxime Bizon enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | 21756f00a022SMaxime Bizon ENETSW_PTCTRL_TXDIS_MASK, 21766f00a022SMaxime Bizon ENETSW_PTCTRL_REG(i)); 21776f00a022SMaxime Bizon 21786f00a022SMaxime Bizon priv->sw_port_link[i] = 0; 21796f00a022SMaxime Bizon } 21806f00a022SMaxime Bizon 21816f00a022SMaxime Bizon /* reset mib */ 21826f00a022SMaxime Bizon val = enetsw_readb(priv, ENETSW_GMCR_REG); 21836f00a022SMaxime Bizon val |= ENETSW_GMCR_RST_MIB_MASK; 21846f00a022SMaxime Bizon enetsw_writeb(priv, val, ENETSW_GMCR_REG); 21856f00a022SMaxime Bizon mdelay(1); 21866f00a022SMaxime Bizon val &= ~ENETSW_GMCR_RST_MIB_MASK; 21876f00a022SMaxime Bizon enetsw_writeb(priv, val, ENETSW_GMCR_REG); 21886f00a022SMaxime Bizon mdelay(1); 21896f00a022SMaxime Bizon 21906f00a022SMaxime Bizon /* force CPU port state */ 21916f00a022SMaxime Bizon val = enetsw_readb(priv, ENETSW_IMPOV_REG); 21926f00a022SMaxime Bizon val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK; 21936f00a022SMaxime Bizon enetsw_writeb(priv, val, ENETSW_IMPOV_REG); 21946f00a022SMaxime Bizon 21956f00a022SMaxime Bizon /* enable switch forward engine */ 21966f00a022SMaxime Bizon val = enetsw_readb(priv, ENETSW_SWMODE_REG); 21976f00a022SMaxime Bizon val |= ENETSW_SWMODE_FWD_EN_MASK; 21986f00a022SMaxime Bizon enetsw_writeb(priv, val, ENETSW_SWMODE_REG); 21996f00a022SMaxime Bizon 22006f00a022SMaxime Bizon /* enable jumbo on all ports */ 22016f00a022SMaxime Bizon enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG); 22026f00a022SMaxime Bizon enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG); 22036f00a022SMaxime Bizon 22046f00a022SMaxime Bizon /* initialize flow control buffer allocation */ 22056f00a022SMaxime Bizon enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, 22066f00a022SMaxime Bizon ENETDMA_BUFALLOC_REG(priv->rx_chan)); 22076f00a022SMaxime Bizon 22086f00a022SMaxime Bizon if (bcm_enet_refill_rx(dev)) { 22096f00a022SMaxime Bizon dev_err(kdev, "cannot allocate rx skb queue\n"); 22106f00a022SMaxime Bizon ret = -ENOMEM; 22116f00a022SMaxime Bizon goto out; 22126f00a022SMaxime Bizon } 22136f00a022SMaxime Bizon 22146f00a022SMaxime Bizon /* write rx & tx ring addresses */ 22156f00a022SMaxime Bizon enet_dmas_writel(priv, priv->rx_desc_dma, 22163dc6475cSFlorian Fainelli ENETDMAS_RSTART_REG, priv->rx_chan); 22176f00a022SMaxime Bizon enet_dmas_writel(priv, priv->tx_desc_dma, 22183dc6475cSFlorian Fainelli ENETDMAS_RSTART_REG, priv->tx_chan); 22196f00a022SMaxime Bizon 22206f00a022SMaxime Bizon /* clear remaining state ram for rx & tx channel */ 22213dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); 22223dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); 22233dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); 22243dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); 22253dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); 22263dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); 22276f00a022SMaxime Bizon 22286f00a022SMaxime Bizon /* set dma maximum burst len */ 22296f00a022SMaxime Bizon enet_dmac_writel(priv, priv->dma_maxburst, 22303dc6475cSFlorian Fainelli ENETDMAC_MAXBURST, priv->rx_chan); 22316f00a022SMaxime Bizon enet_dmac_writel(priv, priv->dma_maxburst, 22323dc6475cSFlorian Fainelli ENETDMAC_MAXBURST, priv->tx_chan); 22336f00a022SMaxime Bizon 22346f00a022SMaxime Bizon /* set flow control low/high threshold to 1/3 / 2/3 */ 22356f00a022SMaxime Bizon val = priv->rx_ring_size / 3; 22366f00a022SMaxime Bizon enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); 22376f00a022SMaxime Bizon val = (priv->rx_ring_size * 2) / 3; 22386f00a022SMaxime Bizon enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); 22396f00a022SMaxime Bizon 22406f00a022SMaxime Bizon /* all set, enable mac and interrupts, start dma engine and 22416f00a022SMaxime Bizon * kick rx dma channel 22426f00a022SMaxime Bizon */ 22436f00a022SMaxime Bizon wmb(); 22446f00a022SMaxime Bizon enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); 22456f00a022SMaxime Bizon enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, 22463dc6475cSFlorian Fainelli ENETDMAC_CHANCFG, priv->rx_chan); 22476f00a022SMaxime Bizon 22486f00a022SMaxime Bizon /* watch "packet transferred" interrupt in rx and tx */ 22496f00a022SMaxime Bizon enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 22503dc6475cSFlorian Fainelli ENETDMAC_IR, priv->rx_chan); 22516f00a022SMaxime Bizon enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 22523dc6475cSFlorian Fainelli ENETDMAC_IR, priv->tx_chan); 22536f00a022SMaxime Bizon 22546f00a022SMaxime Bizon /* make sure we enable napi before rx interrupt */ 22556f00a022SMaxime Bizon napi_enable(&priv->napi); 22566f00a022SMaxime Bizon 22576f00a022SMaxime Bizon enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 22583dc6475cSFlorian Fainelli ENETDMAC_IRMASK, priv->rx_chan); 22596f00a022SMaxime Bizon enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 22603dc6475cSFlorian Fainelli ENETDMAC_IRMASK, priv->tx_chan); 22616f00a022SMaxime Bizon 22626f00a022SMaxime Bizon netif_carrier_on(dev); 22636f00a022SMaxime Bizon netif_start_queue(dev); 22646f00a022SMaxime Bizon 22656f00a022SMaxime Bizon /* apply override config for bypass_link ports here. */ 22666f00a022SMaxime Bizon for (i = 0; i < priv->num_ports; i++) { 22676f00a022SMaxime Bizon struct bcm63xx_enetsw_port *port; 22686f00a022SMaxime Bizon u8 override; 22696f00a022SMaxime Bizon port = &priv->used_ports[i]; 22706f00a022SMaxime Bizon if (!port->used) 22716f00a022SMaxime Bizon continue; 22726f00a022SMaxime Bizon 22736f00a022SMaxime Bizon if (!port->bypass_link) 22746f00a022SMaxime Bizon continue; 22756f00a022SMaxime Bizon 22766f00a022SMaxime Bizon override = ENETSW_PORTOV_ENABLE_MASK | 22776f00a022SMaxime Bizon ENETSW_PORTOV_LINKUP_MASK; 22786f00a022SMaxime Bizon 22796f00a022SMaxime Bizon switch (port->force_speed) { 22806f00a022SMaxime Bizon case 1000: 22816f00a022SMaxime Bizon override |= ENETSW_IMPOV_1000_MASK; 22826f00a022SMaxime Bizon break; 22836f00a022SMaxime Bizon case 100: 22846f00a022SMaxime Bizon override |= ENETSW_IMPOV_100_MASK; 22856f00a022SMaxime Bizon break; 22866f00a022SMaxime Bizon case 10: 22876f00a022SMaxime Bizon break; 22886f00a022SMaxime Bizon default: 22896f00a022SMaxime Bizon pr_warn("invalid forced speed on port %s: assume 10\n", 22906f00a022SMaxime Bizon port->name); 22916f00a022SMaxime Bizon break; 22926f00a022SMaxime Bizon } 22936f00a022SMaxime Bizon 22946f00a022SMaxime Bizon if (port->force_duplex_full) 22956f00a022SMaxime Bizon override |= ENETSW_IMPOV_FDX_MASK; 22966f00a022SMaxime Bizon 22976f00a022SMaxime Bizon 22986f00a022SMaxime Bizon enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i)); 22996f00a022SMaxime Bizon enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i)); 23006f00a022SMaxime Bizon } 23016f00a022SMaxime Bizon 23026f00a022SMaxime Bizon /* start phy polling timer */ 2303eb8c6b5bSKees Cook timer_setup(&priv->swphy_poll, swphy_poll_timer, 0); 23043bd3b9edSHimanshu Jha mod_timer(&priv->swphy_poll, jiffies); 23056f00a022SMaxime Bizon return 0; 23066f00a022SMaxime Bizon 23076f00a022SMaxime Bizon out: 23086f00a022SMaxime Bizon for (i = 0; i < priv->rx_ring_size; i++) { 23096f00a022SMaxime Bizon struct bcm_enet_desc *desc; 23106f00a022SMaxime Bizon 23116f00a022SMaxime Bizon if (!priv->rx_skb[i]) 23126f00a022SMaxime Bizon continue; 23136f00a022SMaxime Bizon 23146f00a022SMaxime Bizon desc = &priv->rx_desc_cpu[i]; 23156f00a022SMaxime Bizon dma_unmap_single(kdev, desc->address, priv->rx_skb_size, 23166f00a022SMaxime Bizon DMA_FROM_DEVICE); 23176f00a022SMaxime Bizon kfree_skb(priv->rx_skb[i]); 23186f00a022SMaxime Bizon } 23196f00a022SMaxime Bizon kfree(priv->rx_skb); 23206f00a022SMaxime Bizon 23216f00a022SMaxime Bizon out_free_tx_skb: 23226f00a022SMaxime Bizon kfree(priv->tx_skb); 23236f00a022SMaxime Bizon 23246f00a022SMaxime Bizon out_free_tx_ring: 23256f00a022SMaxime Bizon dma_free_coherent(kdev, priv->tx_desc_alloc_size, 23266f00a022SMaxime Bizon priv->tx_desc_cpu, priv->tx_desc_dma); 23276f00a022SMaxime Bizon 23286f00a022SMaxime Bizon out_free_rx_ring: 23296f00a022SMaxime Bizon dma_free_coherent(kdev, priv->rx_desc_alloc_size, 23306f00a022SMaxime Bizon priv->rx_desc_cpu, priv->rx_desc_dma); 23316f00a022SMaxime Bizon 23326f00a022SMaxime Bizon out_freeirq_tx: 23336f00a022SMaxime Bizon if (priv->irq_tx != -1) 23346f00a022SMaxime Bizon free_irq(priv->irq_tx, dev); 23356f00a022SMaxime Bizon 23366f00a022SMaxime Bizon out_freeirq_rx: 23376f00a022SMaxime Bizon free_irq(priv->irq_rx, dev); 23386f00a022SMaxime Bizon 23396f00a022SMaxime Bizon out_freeirq: 23406f00a022SMaxime Bizon return ret; 23416f00a022SMaxime Bizon } 23426f00a022SMaxime Bizon 23436f00a022SMaxime Bizon /* stop callback */ 23446f00a022SMaxime Bizon static int bcm_enetsw_stop(struct net_device *dev) 23456f00a022SMaxime Bizon { 23466f00a022SMaxime Bizon struct bcm_enet_priv *priv; 23476f00a022SMaxime Bizon struct device *kdev; 23486f00a022SMaxime Bizon int i; 23496f00a022SMaxime Bizon 23506f00a022SMaxime Bizon priv = netdev_priv(dev); 23516f00a022SMaxime Bizon kdev = &priv->pdev->dev; 23526f00a022SMaxime Bizon 23536f00a022SMaxime Bizon del_timer_sync(&priv->swphy_poll); 23546f00a022SMaxime Bizon netif_stop_queue(dev); 23556f00a022SMaxime Bizon napi_disable(&priv->napi); 23566f00a022SMaxime Bizon del_timer_sync(&priv->rx_timeout); 23576f00a022SMaxime Bizon 23586f00a022SMaxime Bizon /* mask all interrupts */ 23593dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 23603dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 23616f00a022SMaxime Bizon 23626f00a022SMaxime Bizon /* disable dma & mac */ 23636f00a022SMaxime Bizon bcm_enet_disable_dma(priv, priv->tx_chan); 23646f00a022SMaxime Bizon bcm_enet_disable_dma(priv, priv->rx_chan); 23656f00a022SMaxime Bizon 23666f00a022SMaxime Bizon /* force reclaim of all tx buffers */ 23676f00a022SMaxime Bizon bcm_enet_tx_reclaim(dev, 1); 23686f00a022SMaxime Bizon 23696f00a022SMaxime Bizon /* free the rx skb ring */ 23706f00a022SMaxime Bizon for (i = 0; i < priv->rx_ring_size; i++) { 23716f00a022SMaxime Bizon struct bcm_enet_desc *desc; 23726f00a022SMaxime Bizon 23736f00a022SMaxime Bizon if (!priv->rx_skb[i]) 23746f00a022SMaxime Bizon continue; 23756f00a022SMaxime Bizon 23766f00a022SMaxime Bizon desc = &priv->rx_desc_cpu[i]; 23776f00a022SMaxime Bizon dma_unmap_single(kdev, desc->address, priv->rx_skb_size, 23786f00a022SMaxime Bizon DMA_FROM_DEVICE); 23796f00a022SMaxime Bizon kfree_skb(priv->rx_skb[i]); 23806f00a022SMaxime Bizon } 23816f00a022SMaxime Bizon 23826f00a022SMaxime Bizon /* free remaining allocated memory */ 23836f00a022SMaxime Bizon kfree(priv->rx_skb); 23846f00a022SMaxime Bizon kfree(priv->tx_skb); 23856f00a022SMaxime Bizon dma_free_coherent(kdev, priv->rx_desc_alloc_size, 23866f00a022SMaxime Bizon priv->rx_desc_cpu, priv->rx_desc_dma); 23876f00a022SMaxime Bizon dma_free_coherent(kdev, priv->tx_desc_alloc_size, 23886f00a022SMaxime Bizon priv->tx_desc_cpu, priv->tx_desc_dma); 23896f00a022SMaxime Bizon if (priv->irq_tx != -1) 23906f00a022SMaxime Bizon free_irq(priv->irq_tx, dev); 23916f00a022SMaxime Bizon free_irq(priv->irq_rx, dev); 23926f00a022SMaxime Bizon 23936f00a022SMaxime Bizon return 0; 23946f00a022SMaxime Bizon } 23956f00a022SMaxime Bizon 23966f00a022SMaxime Bizon /* try to sort out phy external status by walking the used_port field 23976f00a022SMaxime Bizon * in the bcm_enet_priv structure. in case the phy address is not 23986f00a022SMaxime Bizon * assigned to any physical port on the switch, assume it is external 23996f00a022SMaxime Bizon * (and yell at the user). 24006f00a022SMaxime Bizon */ 24016f00a022SMaxime Bizon static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id) 24026f00a022SMaxime Bizon { 24036f00a022SMaxime Bizon int i; 24046f00a022SMaxime Bizon 24056f00a022SMaxime Bizon for (i = 0; i < priv->num_ports; ++i) { 24066f00a022SMaxime Bizon if (!priv->used_ports[i].used) 24076f00a022SMaxime Bizon continue; 24086f00a022SMaxime Bizon if (priv->used_ports[i].phy_id == phy_id) 24096f00a022SMaxime Bizon return bcm_enet_port_is_rgmii(i); 24106f00a022SMaxime Bizon } 24116f00a022SMaxime Bizon 24126f00a022SMaxime Bizon printk_once(KERN_WARNING "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n", 24136f00a022SMaxime Bizon phy_id); 24146f00a022SMaxime Bizon return 1; 24156f00a022SMaxime Bizon } 24166f00a022SMaxime Bizon 24176f00a022SMaxime Bizon /* can't use bcmenet_sw_mdio_read directly as we need to sort out 24186f00a022SMaxime Bizon * external/internal status of the given phy_id first. 24196f00a022SMaxime Bizon */ 24206f00a022SMaxime Bizon static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id, 24216f00a022SMaxime Bizon int location) 24226f00a022SMaxime Bizon { 24236f00a022SMaxime Bizon struct bcm_enet_priv *priv; 24246f00a022SMaxime Bizon 24256f00a022SMaxime Bizon priv = netdev_priv(dev); 24266f00a022SMaxime Bizon return bcmenet_sw_mdio_read(priv, 24276f00a022SMaxime Bizon bcm_enetsw_phy_is_external(priv, phy_id), 24286f00a022SMaxime Bizon phy_id, location); 24296f00a022SMaxime Bizon } 24306f00a022SMaxime Bizon 24316f00a022SMaxime Bizon /* can't use bcmenet_sw_mdio_write directly as we need to sort out 24326f00a022SMaxime Bizon * external/internal status of the given phy_id first. 24336f00a022SMaxime Bizon */ 24346f00a022SMaxime Bizon static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id, 24356f00a022SMaxime Bizon int location, 24366f00a022SMaxime Bizon int val) 24376f00a022SMaxime Bizon { 24386f00a022SMaxime Bizon struct bcm_enet_priv *priv; 24396f00a022SMaxime Bizon 24406f00a022SMaxime Bizon priv = netdev_priv(dev); 24416f00a022SMaxime Bizon bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id), 24426f00a022SMaxime Bizon phy_id, location, val); 24436f00a022SMaxime Bizon } 24446f00a022SMaxime Bizon 24456f00a022SMaxime Bizon static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 24466f00a022SMaxime Bizon { 24476f00a022SMaxime Bizon struct mii_if_info mii; 24486f00a022SMaxime Bizon 24496f00a022SMaxime Bizon mii.dev = dev; 24506f00a022SMaxime Bizon mii.mdio_read = bcm_enetsw_mii_mdio_read; 24516f00a022SMaxime Bizon mii.mdio_write = bcm_enetsw_mii_mdio_write; 24526f00a022SMaxime Bizon mii.phy_id = 0; 24536f00a022SMaxime Bizon mii.phy_id_mask = 0x3f; 24546f00a022SMaxime Bizon mii.reg_num_mask = 0x1f; 24556f00a022SMaxime Bizon return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); 24566f00a022SMaxime Bizon 24576f00a022SMaxime Bizon } 24586f00a022SMaxime Bizon 24596f00a022SMaxime Bizon static const struct net_device_ops bcm_enetsw_ops = { 24606f00a022SMaxime Bizon .ndo_open = bcm_enetsw_open, 24616f00a022SMaxime Bizon .ndo_stop = bcm_enetsw_stop, 24626f00a022SMaxime Bizon .ndo_start_xmit = bcm_enet_start_xmit, 24636f00a022SMaxime Bizon .ndo_change_mtu = bcm_enet_change_mtu, 24646f00a022SMaxime Bizon .ndo_do_ioctl = bcm_enetsw_ioctl, 24656f00a022SMaxime Bizon }; 24666f00a022SMaxime Bizon 24676f00a022SMaxime Bizon 24686f00a022SMaxime Bizon static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = { 24696f00a022SMaxime Bizon { "rx_packets", DEV_STAT(rx_packets), -1 }, 24706f00a022SMaxime Bizon { "tx_packets", DEV_STAT(tx_packets), -1 }, 24716f00a022SMaxime Bizon { "rx_bytes", DEV_STAT(rx_bytes), -1 }, 24726f00a022SMaxime Bizon { "tx_bytes", DEV_STAT(tx_bytes), -1 }, 24736f00a022SMaxime Bizon { "rx_errors", DEV_STAT(rx_errors), -1 }, 24746f00a022SMaxime Bizon { "tx_errors", DEV_STAT(tx_errors), -1 }, 24756f00a022SMaxime Bizon { "rx_dropped", DEV_STAT(rx_dropped), -1 }, 24766f00a022SMaxime Bizon { "tx_dropped", DEV_STAT(tx_dropped), -1 }, 24776f00a022SMaxime Bizon 24786f00a022SMaxime Bizon { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT }, 24796f00a022SMaxime Bizon { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST }, 24806f00a022SMaxime Bizon { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST }, 24816f00a022SMaxime Bizon { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT }, 24826f00a022SMaxime Bizon { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 }, 24836f00a022SMaxime Bizon { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 }, 24846f00a022SMaxime Bizon { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 }, 24856f00a022SMaxime Bizon { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 }, 24866f00a022SMaxime Bizon { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023}, 24876f00a022SMaxime Bizon { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max), 24886f00a022SMaxime Bizon ETHSW_MIB_RX_1024_1522 }, 24896f00a022SMaxime Bizon { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047), 24906f00a022SMaxime Bizon ETHSW_MIB_RX_1523_2047 }, 24916f00a022SMaxime Bizon { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095), 24926f00a022SMaxime Bizon ETHSW_MIB_RX_2048_4095 }, 24936f00a022SMaxime Bizon { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191), 24946f00a022SMaxime Bizon ETHSW_MIB_RX_4096_8191 }, 24956f00a022SMaxime Bizon { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728), 24966f00a022SMaxime Bizon ETHSW_MIB_RX_8192_9728 }, 24976f00a022SMaxime Bizon { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR }, 24986f00a022SMaxime Bizon { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC }, 24996f00a022SMaxime Bizon { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP }, 25006f00a022SMaxime Bizon { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND }, 25016f00a022SMaxime Bizon { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE }, 25026f00a022SMaxime Bizon 25036f00a022SMaxime Bizon { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT }, 25046f00a022SMaxime Bizon { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST }, 25056f00a022SMaxime Bizon { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT }, 25066f00a022SMaxime Bizon { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT }, 25076f00a022SMaxime Bizon { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE }, 25086f00a022SMaxime Bizon { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS }, 25096f00a022SMaxime Bizon 25106f00a022SMaxime Bizon }; 25116f00a022SMaxime Bizon 25126f00a022SMaxime Bizon #define BCM_ENETSW_STATS_LEN \ 25136f00a022SMaxime Bizon (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats)) 25146f00a022SMaxime Bizon 25156f00a022SMaxime Bizon static void bcm_enetsw_get_strings(struct net_device *netdev, 25166f00a022SMaxime Bizon u32 stringset, u8 *data) 25176f00a022SMaxime Bizon { 25186f00a022SMaxime Bizon int i; 25196f00a022SMaxime Bizon 25206f00a022SMaxime Bizon switch (stringset) { 25216f00a022SMaxime Bizon case ETH_SS_STATS: 25226f00a022SMaxime Bizon for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { 25236f00a022SMaxime Bizon memcpy(data + i * ETH_GSTRING_LEN, 25246f00a022SMaxime Bizon bcm_enetsw_gstrings_stats[i].stat_string, 25256f00a022SMaxime Bizon ETH_GSTRING_LEN); 25266f00a022SMaxime Bizon } 25276f00a022SMaxime Bizon break; 25286f00a022SMaxime Bizon } 25296f00a022SMaxime Bizon } 25306f00a022SMaxime Bizon 25316f00a022SMaxime Bizon static int bcm_enetsw_get_sset_count(struct net_device *netdev, 25326f00a022SMaxime Bizon int string_set) 25336f00a022SMaxime Bizon { 25346f00a022SMaxime Bizon switch (string_set) { 25356f00a022SMaxime Bizon case ETH_SS_STATS: 25366f00a022SMaxime Bizon return BCM_ENETSW_STATS_LEN; 25376f00a022SMaxime Bizon default: 25386f00a022SMaxime Bizon return -EINVAL; 25396f00a022SMaxime Bizon } 25406f00a022SMaxime Bizon } 25416f00a022SMaxime Bizon 25426f00a022SMaxime Bizon static void bcm_enetsw_get_drvinfo(struct net_device *netdev, 25436f00a022SMaxime Bizon struct ethtool_drvinfo *drvinfo) 25446f00a022SMaxime Bizon { 25456f00a022SMaxime Bizon strncpy(drvinfo->driver, bcm_enet_driver_name, 32); 25466f00a022SMaxime Bizon strncpy(drvinfo->version, bcm_enet_driver_version, 32); 25476f00a022SMaxime Bizon strncpy(drvinfo->fw_version, "N/A", 32); 25486f00a022SMaxime Bizon strncpy(drvinfo->bus_info, "bcm63xx", 32); 25496f00a022SMaxime Bizon } 25506f00a022SMaxime Bizon 25516f00a022SMaxime Bizon static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev, 25526f00a022SMaxime Bizon struct ethtool_stats *stats, 25536f00a022SMaxime Bizon u64 *data) 25546f00a022SMaxime Bizon { 25556f00a022SMaxime Bizon struct bcm_enet_priv *priv; 25566f00a022SMaxime Bizon int i; 25576f00a022SMaxime Bizon 25586f00a022SMaxime Bizon priv = netdev_priv(netdev); 25596f00a022SMaxime Bizon 25606f00a022SMaxime Bizon for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { 25616f00a022SMaxime Bizon const struct bcm_enet_stats *s; 25626f00a022SMaxime Bizon u32 lo, hi; 25636f00a022SMaxime Bizon char *p; 25646f00a022SMaxime Bizon int reg; 25656f00a022SMaxime Bizon 25666f00a022SMaxime Bizon s = &bcm_enetsw_gstrings_stats[i]; 25676f00a022SMaxime Bizon 25686f00a022SMaxime Bizon reg = s->mib_reg; 25696f00a022SMaxime Bizon if (reg == -1) 25706f00a022SMaxime Bizon continue; 25716f00a022SMaxime Bizon 25726f00a022SMaxime Bizon lo = enetsw_readl(priv, ENETSW_MIB_REG(reg)); 25736f00a022SMaxime Bizon p = (char *)priv + s->stat_offset; 25746f00a022SMaxime Bizon 25756f00a022SMaxime Bizon if (s->sizeof_stat == sizeof(u64)) { 25766f00a022SMaxime Bizon hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1)); 25776f00a022SMaxime Bizon *(u64 *)p = ((u64)hi << 32 | lo); 25786f00a022SMaxime Bizon } else { 25796f00a022SMaxime Bizon *(u32 *)p = lo; 25806f00a022SMaxime Bizon } 25816f00a022SMaxime Bizon } 25826f00a022SMaxime Bizon 25836f00a022SMaxime Bizon for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { 25846f00a022SMaxime Bizon const struct bcm_enet_stats *s; 25856f00a022SMaxime Bizon char *p; 25866f00a022SMaxime Bizon 25876f00a022SMaxime Bizon s = &bcm_enetsw_gstrings_stats[i]; 25886f00a022SMaxime Bizon 25896f00a022SMaxime Bizon if (s->mib_reg == -1) 25906f00a022SMaxime Bizon p = (char *)&netdev->stats + s->stat_offset; 25916f00a022SMaxime Bizon else 25926f00a022SMaxime Bizon p = (char *)priv + s->stat_offset; 25936f00a022SMaxime Bizon 25946f00a022SMaxime Bizon data[i] = (s->sizeof_stat == sizeof(u64)) ? 25956f00a022SMaxime Bizon *(u64 *)p : *(u32 *)p; 25966f00a022SMaxime Bizon } 25976f00a022SMaxime Bizon } 25986f00a022SMaxime Bizon 25996f00a022SMaxime Bizon static void bcm_enetsw_get_ringparam(struct net_device *dev, 26006f00a022SMaxime Bizon struct ethtool_ringparam *ering) 26016f00a022SMaxime Bizon { 26026f00a022SMaxime Bizon struct bcm_enet_priv *priv; 26036f00a022SMaxime Bizon 26046f00a022SMaxime Bizon priv = netdev_priv(dev); 26056f00a022SMaxime Bizon 26066f00a022SMaxime Bizon /* rx/tx ring is actually only limited by memory */ 26076f00a022SMaxime Bizon ering->rx_max_pending = 8192; 26086f00a022SMaxime Bizon ering->tx_max_pending = 8192; 26096f00a022SMaxime Bizon ering->rx_mini_max_pending = 0; 26106f00a022SMaxime Bizon ering->rx_jumbo_max_pending = 0; 26116f00a022SMaxime Bizon ering->rx_pending = priv->rx_ring_size; 26126f00a022SMaxime Bizon ering->tx_pending = priv->tx_ring_size; 26136f00a022SMaxime Bizon } 26146f00a022SMaxime Bizon 26156f00a022SMaxime Bizon static int bcm_enetsw_set_ringparam(struct net_device *dev, 26166f00a022SMaxime Bizon struct ethtool_ringparam *ering) 26176f00a022SMaxime Bizon { 26186f00a022SMaxime Bizon struct bcm_enet_priv *priv; 26196f00a022SMaxime Bizon int was_running; 26206f00a022SMaxime Bizon 26216f00a022SMaxime Bizon priv = netdev_priv(dev); 26226f00a022SMaxime Bizon 26236f00a022SMaxime Bizon was_running = 0; 26246f00a022SMaxime Bizon if (netif_running(dev)) { 26256f00a022SMaxime Bizon bcm_enetsw_stop(dev); 26266f00a022SMaxime Bizon was_running = 1; 26276f00a022SMaxime Bizon } 26286f00a022SMaxime Bizon 26296f00a022SMaxime Bizon priv->rx_ring_size = ering->rx_pending; 26306f00a022SMaxime Bizon priv->tx_ring_size = ering->tx_pending; 26316f00a022SMaxime Bizon 26326f00a022SMaxime Bizon if (was_running) { 26336f00a022SMaxime Bizon int err; 26346f00a022SMaxime Bizon 26356f00a022SMaxime Bizon err = bcm_enetsw_open(dev); 26366f00a022SMaxime Bizon if (err) 26376f00a022SMaxime Bizon dev_close(dev); 26386f00a022SMaxime Bizon } 26396f00a022SMaxime Bizon return 0; 26406f00a022SMaxime Bizon } 26416f00a022SMaxime Bizon 2642dc8007e8SBhumika Goyal static const struct ethtool_ops bcm_enetsw_ethtool_ops = { 26436f00a022SMaxime Bizon .get_strings = bcm_enetsw_get_strings, 26446f00a022SMaxime Bizon .get_sset_count = bcm_enetsw_get_sset_count, 26456f00a022SMaxime Bizon .get_ethtool_stats = bcm_enetsw_get_ethtool_stats, 26466f00a022SMaxime Bizon .get_drvinfo = bcm_enetsw_get_drvinfo, 26476f00a022SMaxime Bizon .get_ringparam = bcm_enetsw_get_ringparam, 26486f00a022SMaxime Bizon .set_ringparam = bcm_enetsw_set_ringparam, 26496f00a022SMaxime Bizon }; 26506f00a022SMaxime Bizon 26516f00a022SMaxime Bizon /* allocate netdevice, request register memory and register device. */ 26526f00a022SMaxime Bizon static int bcm_enetsw_probe(struct platform_device *pdev) 26536f00a022SMaxime Bizon { 26546f00a022SMaxime Bizon struct bcm_enet_priv *priv; 26556f00a022SMaxime Bizon struct net_device *dev; 26566f00a022SMaxime Bizon struct bcm63xx_enetsw_platform_data *pd; 26576f00a022SMaxime Bizon struct resource *res_mem; 26586f00a022SMaxime Bizon int ret, irq_rx, irq_tx; 26596f00a022SMaxime Bizon 26606f00a022SMaxime Bizon if (!bcm_enet_shared_base[0]) 2661527a4871SJonas Gorski return -EPROBE_DEFER; 26626f00a022SMaxime Bizon 26636f00a022SMaxime Bizon res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 26646f00a022SMaxime Bizon irq_rx = platform_get_irq(pdev, 0); 26656f00a022SMaxime Bizon irq_tx = platform_get_irq(pdev, 1); 26666f00a022SMaxime Bizon if (!res_mem || irq_rx < 0) 26676f00a022SMaxime Bizon return -ENODEV; 26686f00a022SMaxime Bizon 26696f00a022SMaxime Bizon ret = 0; 26706f00a022SMaxime Bizon dev = alloc_etherdev(sizeof(*priv)); 26716f00a022SMaxime Bizon if (!dev) 26726f00a022SMaxime Bizon return -ENOMEM; 26736f00a022SMaxime Bizon priv = netdev_priv(dev); 26746f00a022SMaxime Bizon memset(priv, 0, sizeof(*priv)); 26756f00a022SMaxime Bizon 26766f00a022SMaxime Bizon /* initialize default and fetch platform data */ 26776f00a022SMaxime Bizon priv->enet_is_sw = true; 26786f00a022SMaxime Bizon priv->irq_rx = irq_rx; 26796f00a022SMaxime Bizon priv->irq_tx = irq_tx; 26806f00a022SMaxime Bizon priv->rx_ring_size = BCMENET_DEF_RX_DESC; 26816f00a022SMaxime Bizon priv->tx_ring_size = BCMENET_DEF_TX_DESC; 26826f00a022SMaxime Bizon priv->dma_maxburst = BCMENETSW_DMA_MAXBURST; 26836f00a022SMaxime Bizon 2684cf0e7794SJingoo Han pd = dev_get_platdata(&pdev->dev); 26856f00a022SMaxime Bizon if (pd) { 26866f00a022SMaxime Bizon memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); 26876f00a022SMaxime Bizon memcpy(priv->used_ports, pd->used_ports, 26886f00a022SMaxime Bizon sizeof(pd->used_ports)); 26896f00a022SMaxime Bizon priv->num_ports = pd->num_ports; 26903dc6475cSFlorian Fainelli priv->dma_has_sram = pd->dma_has_sram; 26913dc6475cSFlorian Fainelli priv->dma_chan_en_mask = pd->dma_chan_en_mask; 26923dc6475cSFlorian Fainelli priv->dma_chan_int_mask = pd->dma_chan_int_mask; 26933dc6475cSFlorian Fainelli priv->dma_chan_width = pd->dma_chan_width; 26946f00a022SMaxime Bizon } 26956f00a022SMaxime Bizon 2696e1c6dccaSJarod Wilson ret = bcm_enet_change_mtu(dev, dev->mtu); 26976f00a022SMaxime Bizon if (ret) 26986f00a022SMaxime Bizon goto out; 26996f00a022SMaxime Bizon 27007e697ce9SJonas Gorski priv->base = devm_ioremap_resource(&pdev->dev, res_mem); 27017e697ce9SJonas Gorski if (IS_ERR(priv->base)) { 27027e697ce9SJonas Gorski ret = PTR_ERR(priv->base); 27036f00a022SMaxime Bizon goto out; 27046f00a022SMaxime Bizon } 27056f00a022SMaxime Bizon 27067e697ce9SJonas Gorski priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw"); 27076f00a022SMaxime Bizon if (IS_ERR(priv->mac_clk)) { 27086f00a022SMaxime Bizon ret = PTR_ERR(priv->mac_clk); 27097e697ce9SJonas Gorski goto out; 27106f00a022SMaxime Bizon } 27119c86b846SJonas Gorski ret = clk_prepare_enable(priv->mac_clk); 27129c86b846SJonas Gorski if (ret) 27137e697ce9SJonas Gorski goto out; 27146f00a022SMaxime Bizon 27156f00a022SMaxime Bizon priv->rx_chan = 0; 27166f00a022SMaxime Bizon priv->tx_chan = 1; 27176f00a022SMaxime Bizon spin_lock_init(&priv->rx_lock); 27186f00a022SMaxime Bizon 27196f00a022SMaxime Bizon /* init rx timeout (used for oom) */ 2720eb8c6b5bSKees Cook timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0); 27216f00a022SMaxime Bizon 27226f00a022SMaxime Bizon /* register netdevice */ 27236f00a022SMaxime Bizon dev->netdev_ops = &bcm_enetsw_ops; 27246f00a022SMaxime Bizon netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); 27257ad24ea4SWilfried Klaebe dev->ethtool_ops = &bcm_enetsw_ethtool_ops; 27266f00a022SMaxime Bizon SET_NETDEV_DEV(dev, &pdev->dev); 27276f00a022SMaxime Bizon 27286f00a022SMaxime Bizon spin_lock_init(&priv->enetsw_mdio_lock); 27296f00a022SMaxime Bizon 27306f00a022SMaxime Bizon ret = register_netdev(dev); 27316f00a022SMaxime Bizon if (ret) 27329c86b846SJonas Gorski goto out_disable_clk; 27336f00a022SMaxime Bizon 27346f00a022SMaxime Bizon netif_carrier_off(dev); 27356f00a022SMaxime Bizon platform_set_drvdata(pdev, dev); 27366f00a022SMaxime Bizon priv->pdev = pdev; 27376f00a022SMaxime Bizon priv->net_dev = dev; 27386f00a022SMaxime Bizon 27396f00a022SMaxime Bizon return 0; 27406f00a022SMaxime Bizon 27419c86b846SJonas Gorski out_disable_clk: 27429c86b846SJonas Gorski clk_disable_unprepare(priv->mac_clk); 27436f00a022SMaxime Bizon out: 27446f00a022SMaxime Bizon free_netdev(dev); 27456f00a022SMaxime Bizon return ret; 27466f00a022SMaxime Bizon } 27476f00a022SMaxime Bizon 27486f00a022SMaxime Bizon 27496f00a022SMaxime Bizon /* exit func, stops hardware and unregisters netdevice */ 27506f00a022SMaxime Bizon static int bcm_enetsw_remove(struct platform_device *pdev) 27516f00a022SMaxime Bizon { 27526f00a022SMaxime Bizon struct bcm_enet_priv *priv; 27536f00a022SMaxime Bizon struct net_device *dev; 27546f00a022SMaxime Bizon 27556f00a022SMaxime Bizon /* stop netdevice */ 27566f00a022SMaxime Bizon dev = platform_get_drvdata(pdev); 27576f00a022SMaxime Bizon priv = netdev_priv(dev); 27586f00a022SMaxime Bizon unregister_netdev(dev); 27596f00a022SMaxime Bizon 27609c86b846SJonas Gorski clk_disable_unprepare(priv->mac_clk); 27619c86b846SJonas Gorski 27626f00a022SMaxime Bizon free_netdev(dev); 27636f00a022SMaxime Bizon return 0; 27646f00a022SMaxime Bizon } 27656f00a022SMaxime Bizon 27666f00a022SMaxime Bizon struct platform_driver bcm63xx_enetsw_driver = { 27676f00a022SMaxime Bizon .probe = bcm_enetsw_probe, 27686f00a022SMaxime Bizon .remove = bcm_enetsw_remove, 27696f00a022SMaxime Bizon .driver = { 27706f00a022SMaxime Bizon .name = "bcm63xx_enetsw", 27716f00a022SMaxime Bizon .owner = THIS_MODULE, 27726f00a022SMaxime Bizon }, 27736f00a022SMaxime Bizon }; 27746f00a022SMaxime Bizon 27756f00a022SMaxime Bizon /* reserve & remap memory space shared between all macs */ 2776047fc566SBill Pemberton static int bcm_enet_shared_probe(struct platform_device *pdev) 2777adfc5217SJeff Kirsher { 2778adfc5217SJeff Kirsher struct resource *res; 27790ae99b5fSMaxime Bizon void __iomem *p[3]; 27800ae99b5fSMaxime Bizon unsigned int i; 2781adfc5217SJeff Kirsher 27820ae99b5fSMaxime Bizon memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base)); 2783adfc5217SJeff Kirsher 27840ae99b5fSMaxime Bizon for (i = 0; i < 3; i++) { 27850ae99b5fSMaxime Bizon res = platform_get_resource(pdev, IORESOURCE_MEM, i); 27860ae99b5fSMaxime Bizon p[i] = devm_ioremap_resource(&pdev->dev, res); 2787646093a2SWei Yongjun if (IS_ERR(p[i])) 2788646093a2SWei Yongjun return PTR_ERR(p[i]); 27890ae99b5fSMaxime Bizon } 27900ae99b5fSMaxime Bizon 27910ae99b5fSMaxime Bizon memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base)); 27921c03da05SJonas Gorski 2793adfc5217SJeff Kirsher return 0; 2794adfc5217SJeff Kirsher } 2795adfc5217SJeff Kirsher 2796047fc566SBill Pemberton static int bcm_enet_shared_remove(struct platform_device *pdev) 2797adfc5217SJeff Kirsher { 2798adfc5217SJeff Kirsher return 0; 2799adfc5217SJeff Kirsher } 2800adfc5217SJeff Kirsher 28016f00a022SMaxime Bizon /* this "shared" driver is needed because both macs share a single 2802adfc5217SJeff Kirsher * address space 2803adfc5217SJeff Kirsher */ 2804adfc5217SJeff Kirsher struct platform_driver bcm63xx_enet_shared_driver = { 2805adfc5217SJeff Kirsher .probe = bcm_enet_shared_probe, 2806047fc566SBill Pemberton .remove = bcm_enet_shared_remove, 2807adfc5217SJeff Kirsher .driver = { 2808adfc5217SJeff Kirsher .name = "bcm63xx_enet_shared", 2809adfc5217SJeff Kirsher .owner = THIS_MODULE, 2810adfc5217SJeff Kirsher }, 2811adfc5217SJeff Kirsher }; 2812adfc5217SJeff Kirsher 28130d1c744cSThierry Reding static struct platform_driver * const drivers[] = { 28140d1c744cSThierry Reding &bcm63xx_enet_shared_driver, 28150d1c744cSThierry Reding &bcm63xx_enet_driver, 28160d1c744cSThierry Reding &bcm63xx_enetsw_driver, 28170d1c744cSThierry Reding }; 28180d1c744cSThierry Reding 28196f00a022SMaxime Bizon /* entry point */ 2820adfc5217SJeff Kirsher static int __init bcm_enet_init(void) 2821adfc5217SJeff Kirsher { 28220d1c744cSThierry Reding return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); 2823adfc5217SJeff Kirsher } 2824adfc5217SJeff Kirsher 2825adfc5217SJeff Kirsher static void __exit bcm_enet_exit(void) 2826adfc5217SJeff Kirsher { 28270d1c744cSThierry Reding platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); 2828adfc5217SJeff Kirsher } 2829adfc5217SJeff Kirsher 2830adfc5217SJeff Kirsher 2831adfc5217SJeff Kirsher module_init(bcm_enet_init); 2832adfc5217SJeff Kirsher module_exit(bcm_enet_exit); 2833adfc5217SJeff Kirsher 2834adfc5217SJeff Kirsher MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver"); 2835adfc5217SJeff Kirsher MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); 2836adfc5217SJeff Kirsher MODULE_LICENSE("GPL"); 2837