1adfc5217SJeff Kirsher /* 2adfc5217SJeff Kirsher * Driver for BCM963xx builtin Ethernet mac 3adfc5217SJeff Kirsher * 4adfc5217SJeff Kirsher * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> 5adfc5217SJeff Kirsher * 6adfc5217SJeff Kirsher * This program is free software; you can redistribute it and/or modify 7adfc5217SJeff Kirsher * it under the terms of the GNU General Public License as published by 8adfc5217SJeff Kirsher * the Free Software Foundation; either version 2 of the License, or 9adfc5217SJeff Kirsher * (at your option) any later version. 10adfc5217SJeff Kirsher * 11adfc5217SJeff Kirsher * This program is distributed in the hope that it will be useful, 12adfc5217SJeff Kirsher * but WITHOUT ANY WARRANTY; without even the implied warranty of 13adfc5217SJeff Kirsher * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14adfc5217SJeff Kirsher * GNU General Public License for more details. 15adfc5217SJeff Kirsher * 16adfc5217SJeff Kirsher * You should have received a copy of the GNU General Public License 17adfc5217SJeff Kirsher * along with this program; if not, write to the Free Software 18adfc5217SJeff Kirsher * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19adfc5217SJeff Kirsher */ 20adfc5217SJeff Kirsher #include <linux/init.h> 21adfc5217SJeff Kirsher #include <linux/interrupt.h> 22adfc5217SJeff Kirsher #include <linux/module.h> 23adfc5217SJeff Kirsher #include <linux/clk.h> 24adfc5217SJeff Kirsher #include <linux/etherdevice.h> 25adfc5217SJeff Kirsher #include <linux/slab.h> 26adfc5217SJeff Kirsher #include <linux/delay.h> 27adfc5217SJeff Kirsher #include <linux/ethtool.h> 28adfc5217SJeff Kirsher #include <linux/crc32.h> 29adfc5217SJeff Kirsher #include <linux/err.h> 30adfc5217SJeff Kirsher #include <linux/dma-mapping.h> 31adfc5217SJeff Kirsher #include <linux/platform_device.h> 32adfc5217SJeff Kirsher #include <linux/if_vlan.h> 33adfc5217SJeff Kirsher 34adfc5217SJeff Kirsher #include <bcm63xx_dev_enet.h> 35adfc5217SJeff Kirsher #include "bcm63xx_enet.h" 36adfc5217SJeff Kirsher 37adfc5217SJeff Kirsher static char bcm_enet_driver_name[] = "bcm63xx_enet"; 38adfc5217SJeff Kirsher static char bcm_enet_driver_version[] = "1.0"; 39adfc5217SJeff Kirsher 40adfc5217SJeff Kirsher static int copybreak __read_mostly = 128; 41adfc5217SJeff Kirsher module_param(copybreak, int, 0); 42adfc5217SJeff Kirsher MODULE_PARM_DESC(copybreak, "Receive copy threshold"); 43adfc5217SJeff Kirsher 440ae99b5fSMaxime Bizon /* io registers memory shared between all devices */ 450ae99b5fSMaxime Bizon static void __iomem *bcm_enet_shared_base[3]; 46adfc5217SJeff Kirsher 47adfc5217SJeff Kirsher /* 48adfc5217SJeff Kirsher * io helpers to access mac registers 49adfc5217SJeff Kirsher */ 50adfc5217SJeff Kirsher static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off) 51adfc5217SJeff Kirsher { 52adfc5217SJeff Kirsher return bcm_readl(priv->base + off); 53adfc5217SJeff Kirsher } 54adfc5217SJeff Kirsher 55adfc5217SJeff Kirsher static inline void enet_writel(struct bcm_enet_priv *priv, 56adfc5217SJeff Kirsher u32 val, u32 off) 57adfc5217SJeff Kirsher { 58adfc5217SJeff Kirsher bcm_writel(val, priv->base + off); 59adfc5217SJeff Kirsher } 60adfc5217SJeff Kirsher 61adfc5217SJeff Kirsher /* 626f00a022SMaxime Bizon * io helpers to access switch registers 63adfc5217SJeff Kirsher */ 646f00a022SMaxime Bizon static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off) 656f00a022SMaxime Bizon { 666f00a022SMaxime Bizon return bcm_readl(priv->base + off); 676f00a022SMaxime Bizon } 686f00a022SMaxime Bizon 696f00a022SMaxime Bizon static inline void enetsw_writel(struct bcm_enet_priv *priv, 706f00a022SMaxime Bizon u32 val, u32 off) 716f00a022SMaxime Bizon { 726f00a022SMaxime Bizon bcm_writel(val, priv->base + off); 736f00a022SMaxime Bizon } 746f00a022SMaxime Bizon 756f00a022SMaxime Bizon static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off) 766f00a022SMaxime Bizon { 776f00a022SMaxime Bizon return bcm_readw(priv->base + off); 786f00a022SMaxime Bizon } 796f00a022SMaxime Bizon 806f00a022SMaxime Bizon static inline void enetsw_writew(struct bcm_enet_priv *priv, 816f00a022SMaxime Bizon u16 val, u32 off) 826f00a022SMaxime Bizon { 836f00a022SMaxime Bizon bcm_writew(val, priv->base + off); 846f00a022SMaxime Bizon } 856f00a022SMaxime Bizon 866f00a022SMaxime Bizon static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off) 876f00a022SMaxime Bizon { 886f00a022SMaxime Bizon return bcm_readb(priv->base + off); 896f00a022SMaxime Bizon } 906f00a022SMaxime Bizon 916f00a022SMaxime Bizon static inline void enetsw_writeb(struct bcm_enet_priv *priv, 926f00a022SMaxime Bizon u8 val, u32 off) 936f00a022SMaxime Bizon { 946f00a022SMaxime Bizon bcm_writeb(val, priv->base + off); 956f00a022SMaxime Bizon } 966f00a022SMaxime Bizon 976f00a022SMaxime Bizon 986f00a022SMaxime Bizon /* io helpers to access shared registers */ 99adfc5217SJeff Kirsher static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) 100adfc5217SJeff Kirsher { 1010ae99b5fSMaxime Bizon return bcm_readl(bcm_enet_shared_base[0] + off); 102adfc5217SJeff Kirsher } 103adfc5217SJeff Kirsher 104adfc5217SJeff Kirsher static inline void enet_dma_writel(struct bcm_enet_priv *priv, 105adfc5217SJeff Kirsher u32 val, u32 off) 106adfc5217SJeff Kirsher { 1070ae99b5fSMaxime Bizon bcm_writel(val, bcm_enet_shared_base[0] + off); 1080ae99b5fSMaxime Bizon } 1090ae99b5fSMaxime Bizon 1103dc6475cSFlorian Fainelli static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan) 1110ae99b5fSMaxime Bizon { 1123dc6475cSFlorian Fainelli return bcm_readl(bcm_enet_shared_base[1] + 1133dc6475cSFlorian Fainelli bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); 1140ae99b5fSMaxime Bizon } 1150ae99b5fSMaxime Bizon 1160ae99b5fSMaxime Bizon static inline void enet_dmac_writel(struct bcm_enet_priv *priv, 1173dc6475cSFlorian Fainelli u32 val, u32 off, int chan) 1180ae99b5fSMaxime Bizon { 1193dc6475cSFlorian Fainelli bcm_writel(val, bcm_enet_shared_base[1] + 1203dc6475cSFlorian Fainelli bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); 1210ae99b5fSMaxime Bizon } 1220ae99b5fSMaxime Bizon 1233dc6475cSFlorian Fainelli static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan) 1240ae99b5fSMaxime Bizon { 1253dc6475cSFlorian Fainelli return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); 1260ae99b5fSMaxime Bizon } 1270ae99b5fSMaxime Bizon 1280ae99b5fSMaxime Bizon static inline void enet_dmas_writel(struct bcm_enet_priv *priv, 1293dc6475cSFlorian Fainelli u32 val, u32 off, int chan) 1300ae99b5fSMaxime Bizon { 1313dc6475cSFlorian Fainelli bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); 132adfc5217SJeff Kirsher } 133adfc5217SJeff Kirsher 134adfc5217SJeff Kirsher /* 135adfc5217SJeff Kirsher * write given data into mii register and wait for transfer to end 136adfc5217SJeff Kirsher * with timeout (average measured transfer time is 25us) 137adfc5217SJeff Kirsher */ 138adfc5217SJeff Kirsher static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data) 139adfc5217SJeff Kirsher { 140adfc5217SJeff Kirsher int limit; 141adfc5217SJeff Kirsher 142adfc5217SJeff Kirsher /* make sure mii interrupt status is cleared */ 143adfc5217SJeff Kirsher enet_writel(priv, ENET_IR_MII, ENET_IR_REG); 144adfc5217SJeff Kirsher 145adfc5217SJeff Kirsher enet_writel(priv, data, ENET_MIIDATA_REG); 146adfc5217SJeff Kirsher wmb(); 147adfc5217SJeff Kirsher 148adfc5217SJeff Kirsher /* busy wait on mii interrupt bit, with timeout */ 149adfc5217SJeff Kirsher limit = 1000; 150adfc5217SJeff Kirsher do { 151adfc5217SJeff Kirsher if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) 152adfc5217SJeff Kirsher break; 153adfc5217SJeff Kirsher udelay(1); 154adfc5217SJeff Kirsher } while (limit-- > 0); 155adfc5217SJeff Kirsher 156adfc5217SJeff Kirsher return (limit < 0) ? 1 : 0; 157adfc5217SJeff Kirsher } 158adfc5217SJeff Kirsher 159adfc5217SJeff Kirsher /* 160adfc5217SJeff Kirsher * MII internal read callback 161adfc5217SJeff Kirsher */ 162adfc5217SJeff Kirsher static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id, 163adfc5217SJeff Kirsher int regnum) 164adfc5217SJeff Kirsher { 165adfc5217SJeff Kirsher u32 tmp, val; 166adfc5217SJeff Kirsher 167adfc5217SJeff Kirsher tmp = regnum << ENET_MIIDATA_REG_SHIFT; 168adfc5217SJeff Kirsher tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; 169adfc5217SJeff Kirsher tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; 170adfc5217SJeff Kirsher tmp |= ENET_MIIDATA_OP_READ_MASK; 171adfc5217SJeff Kirsher 172adfc5217SJeff Kirsher if (do_mdio_op(priv, tmp)) 173adfc5217SJeff Kirsher return -1; 174adfc5217SJeff Kirsher 175adfc5217SJeff Kirsher val = enet_readl(priv, ENET_MIIDATA_REG); 176adfc5217SJeff Kirsher val &= 0xffff; 177adfc5217SJeff Kirsher return val; 178adfc5217SJeff Kirsher } 179adfc5217SJeff Kirsher 180adfc5217SJeff Kirsher /* 181adfc5217SJeff Kirsher * MII internal write callback 182adfc5217SJeff Kirsher */ 183adfc5217SJeff Kirsher static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id, 184adfc5217SJeff Kirsher int regnum, u16 value) 185adfc5217SJeff Kirsher { 186adfc5217SJeff Kirsher u32 tmp; 187adfc5217SJeff Kirsher 188adfc5217SJeff Kirsher tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT; 189adfc5217SJeff Kirsher tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; 190adfc5217SJeff Kirsher tmp |= regnum << ENET_MIIDATA_REG_SHIFT; 191adfc5217SJeff Kirsher tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; 192adfc5217SJeff Kirsher tmp |= ENET_MIIDATA_OP_WRITE_MASK; 193adfc5217SJeff Kirsher 194adfc5217SJeff Kirsher (void)do_mdio_op(priv, tmp); 195adfc5217SJeff Kirsher return 0; 196adfc5217SJeff Kirsher } 197adfc5217SJeff Kirsher 198adfc5217SJeff Kirsher /* 199adfc5217SJeff Kirsher * MII read callback from phylib 200adfc5217SJeff Kirsher */ 201adfc5217SJeff Kirsher static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id, 202adfc5217SJeff Kirsher int regnum) 203adfc5217SJeff Kirsher { 204adfc5217SJeff Kirsher return bcm_enet_mdio_read(bus->priv, mii_id, regnum); 205adfc5217SJeff Kirsher } 206adfc5217SJeff Kirsher 207adfc5217SJeff Kirsher /* 208adfc5217SJeff Kirsher * MII write callback from phylib 209adfc5217SJeff Kirsher */ 210adfc5217SJeff Kirsher static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id, 211adfc5217SJeff Kirsher int regnum, u16 value) 212adfc5217SJeff Kirsher { 213adfc5217SJeff Kirsher return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value); 214adfc5217SJeff Kirsher } 215adfc5217SJeff Kirsher 216adfc5217SJeff Kirsher /* 217adfc5217SJeff Kirsher * MII read callback from mii core 218adfc5217SJeff Kirsher */ 219adfc5217SJeff Kirsher static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id, 220adfc5217SJeff Kirsher int regnum) 221adfc5217SJeff Kirsher { 222adfc5217SJeff Kirsher return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum); 223adfc5217SJeff Kirsher } 224adfc5217SJeff Kirsher 225adfc5217SJeff Kirsher /* 226adfc5217SJeff Kirsher * MII write callback from mii core 227adfc5217SJeff Kirsher */ 228adfc5217SJeff Kirsher static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id, 229adfc5217SJeff Kirsher int regnum, int value) 230adfc5217SJeff Kirsher { 231adfc5217SJeff Kirsher bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value); 232adfc5217SJeff Kirsher } 233adfc5217SJeff Kirsher 234adfc5217SJeff Kirsher /* 235adfc5217SJeff Kirsher * refill rx queue 236adfc5217SJeff Kirsher */ 237adfc5217SJeff Kirsher static int bcm_enet_refill_rx(struct net_device *dev) 238adfc5217SJeff Kirsher { 239adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 240adfc5217SJeff Kirsher 241adfc5217SJeff Kirsher priv = netdev_priv(dev); 242adfc5217SJeff Kirsher 243adfc5217SJeff Kirsher while (priv->rx_desc_count < priv->rx_ring_size) { 244adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 245adfc5217SJeff Kirsher struct sk_buff *skb; 246adfc5217SJeff Kirsher dma_addr_t p; 247adfc5217SJeff Kirsher int desc_idx; 248adfc5217SJeff Kirsher u32 len_stat; 249adfc5217SJeff Kirsher 250adfc5217SJeff Kirsher desc_idx = priv->rx_dirty_desc; 251adfc5217SJeff Kirsher desc = &priv->rx_desc_cpu[desc_idx]; 252adfc5217SJeff Kirsher 253adfc5217SJeff Kirsher if (!priv->rx_skb[desc_idx]) { 254adfc5217SJeff Kirsher skb = netdev_alloc_skb(dev, priv->rx_skb_size); 255adfc5217SJeff Kirsher if (!skb) 256adfc5217SJeff Kirsher break; 257adfc5217SJeff Kirsher priv->rx_skb[desc_idx] = skb; 258adfc5217SJeff Kirsher p = dma_map_single(&priv->pdev->dev, skb->data, 259adfc5217SJeff Kirsher priv->rx_skb_size, 260adfc5217SJeff Kirsher DMA_FROM_DEVICE); 261adfc5217SJeff Kirsher desc->address = p; 262adfc5217SJeff Kirsher } 263adfc5217SJeff Kirsher 264adfc5217SJeff Kirsher len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; 265adfc5217SJeff Kirsher len_stat |= DMADESC_OWNER_MASK; 266adfc5217SJeff Kirsher if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { 2673dc6475cSFlorian Fainelli len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); 268adfc5217SJeff Kirsher priv->rx_dirty_desc = 0; 269adfc5217SJeff Kirsher } else { 270adfc5217SJeff Kirsher priv->rx_dirty_desc++; 271adfc5217SJeff Kirsher } 272adfc5217SJeff Kirsher wmb(); 273adfc5217SJeff Kirsher desc->len_stat = len_stat; 274adfc5217SJeff Kirsher 275adfc5217SJeff Kirsher priv->rx_desc_count++; 276adfc5217SJeff Kirsher 277adfc5217SJeff Kirsher /* tell dma engine we allocated one buffer */ 2783dc6475cSFlorian Fainelli if (priv->dma_has_sram) 279adfc5217SJeff Kirsher enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); 2803dc6475cSFlorian Fainelli else 2813dc6475cSFlorian Fainelli enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan); 282adfc5217SJeff Kirsher } 283adfc5217SJeff Kirsher 284adfc5217SJeff Kirsher /* If rx ring is still empty, set a timer to try allocating 285adfc5217SJeff Kirsher * again at a later time. */ 286adfc5217SJeff Kirsher if (priv->rx_desc_count == 0 && netif_running(dev)) { 287adfc5217SJeff Kirsher dev_warn(&priv->pdev->dev, "unable to refill rx ring\n"); 288adfc5217SJeff Kirsher priv->rx_timeout.expires = jiffies + HZ; 289adfc5217SJeff Kirsher add_timer(&priv->rx_timeout); 290adfc5217SJeff Kirsher } 291adfc5217SJeff Kirsher 292adfc5217SJeff Kirsher return 0; 293adfc5217SJeff Kirsher } 294adfc5217SJeff Kirsher 295adfc5217SJeff Kirsher /* 296adfc5217SJeff Kirsher * timer callback to defer refill rx queue in case we're OOM 297adfc5217SJeff Kirsher */ 298adfc5217SJeff Kirsher static void bcm_enet_refill_rx_timer(unsigned long data) 299adfc5217SJeff Kirsher { 300adfc5217SJeff Kirsher struct net_device *dev; 301adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 302adfc5217SJeff Kirsher 303adfc5217SJeff Kirsher dev = (struct net_device *)data; 304adfc5217SJeff Kirsher priv = netdev_priv(dev); 305adfc5217SJeff Kirsher 306adfc5217SJeff Kirsher spin_lock(&priv->rx_lock); 307adfc5217SJeff Kirsher bcm_enet_refill_rx((struct net_device *)data); 308adfc5217SJeff Kirsher spin_unlock(&priv->rx_lock); 309adfc5217SJeff Kirsher } 310adfc5217SJeff Kirsher 311adfc5217SJeff Kirsher /* 312adfc5217SJeff Kirsher * extract packet from rx queue 313adfc5217SJeff Kirsher */ 314adfc5217SJeff Kirsher static int bcm_enet_receive_queue(struct net_device *dev, int budget) 315adfc5217SJeff Kirsher { 316adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 317adfc5217SJeff Kirsher struct device *kdev; 318adfc5217SJeff Kirsher int processed; 319adfc5217SJeff Kirsher 320adfc5217SJeff Kirsher priv = netdev_priv(dev); 321adfc5217SJeff Kirsher kdev = &priv->pdev->dev; 322adfc5217SJeff Kirsher processed = 0; 323adfc5217SJeff Kirsher 324adfc5217SJeff Kirsher /* don't scan ring further than number of refilled 325adfc5217SJeff Kirsher * descriptor */ 326adfc5217SJeff Kirsher if (budget > priv->rx_desc_count) 327adfc5217SJeff Kirsher budget = priv->rx_desc_count; 328adfc5217SJeff Kirsher 329adfc5217SJeff Kirsher do { 330adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 331adfc5217SJeff Kirsher struct sk_buff *skb; 332adfc5217SJeff Kirsher int desc_idx; 333adfc5217SJeff Kirsher u32 len_stat; 334adfc5217SJeff Kirsher unsigned int len; 335adfc5217SJeff Kirsher 336adfc5217SJeff Kirsher desc_idx = priv->rx_curr_desc; 337adfc5217SJeff Kirsher desc = &priv->rx_desc_cpu[desc_idx]; 338adfc5217SJeff Kirsher 339adfc5217SJeff Kirsher /* make sure we actually read the descriptor status at 340adfc5217SJeff Kirsher * each loop */ 341adfc5217SJeff Kirsher rmb(); 342adfc5217SJeff Kirsher 343adfc5217SJeff Kirsher len_stat = desc->len_stat; 344adfc5217SJeff Kirsher 345adfc5217SJeff Kirsher /* break if dma ownership belongs to hw */ 346adfc5217SJeff Kirsher if (len_stat & DMADESC_OWNER_MASK) 347adfc5217SJeff Kirsher break; 348adfc5217SJeff Kirsher 349adfc5217SJeff Kirsher processed++; 350adfc5217SJeff Kirsher priv->rx_curr_desc++; 351adfc5217SJeff Kirsher if (priv->rx_curr_desc == priv->rx_ring_size) 352adfc5217SJeff Kirsher priv->rx_curr_desc = 0; 353adfc5217SJeff Kirsher priv->rx_desc_count--; 354adfc5217SJeff Kirsher 355adfc5217SJeff Kirsher /* if the packet does not have start of packet _and_ 356adfc5217SJeff Kirsher * end of packet flag set, then just recycle it */ 3573dc6475cSFlorian Fainelli if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) != 3583dc6475cSFlorian Fainelli (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) { 359adfc5217SJeff Kirsher dev->stats.rx_dropped++; 360adfc5217SJeff Kirsher continue; 361adfc5217SJeff Kirsher } 362adfc5217SJeff Kirsher 363adfc5217SJeff Kirsher /* recycle packet if it's marked as bad */ 3646f00a022SMaxime Bizon if (!priv->enet_is_sw && 3656f00a022SMaxime Bizon unlikely(len_stat & DMADESC_ERR_MASK)) { 366adfc5217SJeff Kirsher dev->stats.rx_errors++; 367adfc5217SJeff Kirsher 368adfc5217SJeff Kirsher if (len_stat & DMADESC_OVSIZE_MASK) 369adfc5217SJeff Kirsher dev->stats.rx_length_errors++; 370adfc5217SJeff Kirsher if (len_stat & DMADESC_CRC_MASK) 371adfc5217SJeff Kirsher dev->stats.rx_crc_errors++; 372adfc5217SJeff Kirsher if (len_stat & DMADESC_UNDER_MASK) 373adfc5217SJeff Kirsher dev->stats.rx_frame_errors++; 374adfc5217SJeff Kirsher if (len_stat & DMADESC_OV_MASK) 375adfc5217SJeff Kirsher dev->stats.rx_fifo_errors++; 376adfc5217SJeff Kirsher continue; 377adfc5217SJeff Kirsher } 378adfc5217SJeff Kirsher 379adfc5217SJeff Kirsher /* valid packet */ 380adfc5217SJeff Kirsher skb = priv->rx_skb[desc_idx]; 381adfc5217SJeff Kirsher len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT; 382adfc5217SJeff Kirsher /* don't include FCS */ 383adfc5217SJeff Kirsher len -= 4; 384adfc5217SJeff Kirsher 385adfc5217SJeff Kirsher if (len < copybreak) { 386adfc5217SJeff Kirsher struct sk_buff *nskb; 387adfc5217SJeff Kirsher 38845abfb10SAlexander Duyck nskb = napi_alloc_skb(&priv->napi, len); 389adfc5217SJeff Kirsher if (!nskb) { 390adfc5217SJeff Kirsher /* forget packet, just rearm desc */ 391adfc5217SJeff Kirsher dev->stats.rx_dropped++; 392adfc5217SJeff Kirsher continue; 393adfc5217SJeff Kirsher } 394adfc5217SJeff Kirsher 395adfc5217SJeff Kirsher dma_sync_single_for_cpu(kdev, desc->address, 396adfc5217SJeff Kirsher len, DMA_FROM_DEVICE); 397adfc5217SJeff Kirsher memcpy(nskb->data, skb->data, len); 398adfc5217SJeff Kirsher dma_sync_single_for_device(kdev, desc->address, 399adfc5217SJeff Kirsher len, DMA_FROM_DEVICE); 400adfc5217SJeff Kirsher skb = nskb; 401adfc5217SJeff Kirsher } else { 402adfc5217SJeff Kirsher dma_unmap_single(&priv->pdev->dev, desc->address, 403adfc5217SJeff Kirsher priv->rx_skb_size, DMA_FROM_DEVICE); 404adfc5217SJeff Kirsher priv->rx_skb[desc_idx] = NULL; 405adfc5217SJeff Kirsher } 406adfc5217SJeff Kirsher 407adfc5217SJeff Kirsher skb_put(skb, len); 408adfc5217SJeff Kirsher skb->protocol = eth_type_trans(skb, dev); 409adfc5217SJeff Kirsher dev->stats.rx_packets++; 410adfc5217SJeff Kirsher dev->stats.rx_bytes += len; 411adfc5217SJeff Kirsher netif_receive_skb(skb); 412adfc5217SJeff Kirsher 413adfc5217SJeff Kirsher } while (--budget > 0); 414adfc5217SJeff Kirsher 415adfc5217SJeff Kirsher if (processed || !priv->rx_desc_count) { 416adfc5217SJeff Kirsher bcm_enet_refill_rx(dev); 417adfc5217SJeff Kirsher 418adfc5217SJeff Kirsher /* kick rx dma */ 4193dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_en_mask, 4203dc6475cSFlorian Fainelli ENETDMAC_CHANCFG, priv->rx_chan); 421adfc5217SJeff Kirsher } 422adfc5217SJeff Kirsher 423adfc5217SJeff Kirsher return processed; 424adfc5217SJeff Kirsher } 425adfc5217SJeff Kirsher 426adfc5217SJeff Kirsher 427adfc5217SJeff Kirsher /* 428adfc5217SJeff Kirsher * try to or force reclaim of transmitted buffers 429adfc5217SJeff Kirsher */ 430adfc5217SJeff Kirsher static int bcm_enet_tx_reclaim(struct net_device *dev, int force) 431adfc5217SJeff Kirsher { 432adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 433adfc5217SJeff Kirsher int released; 434adfc5217SJeff Kirsher 435adfc5217SJeff Kirsher priv = netdev_priv(dev); 436adfc5217SJeff Kirsher released = 0; 437adfc5217SJeff Kirsher 438adfc5217SJeff Kirsher while (priv->tx_desc_count < priv->tx_ring_size) { 439adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 440adfc5217SJeff Kirsher struct sk_buff *skb; 441adfc5217SJeff Kirsher 442adfc5217SJeff Kirsher /* We run in a bh and fight against start_xmit, which 443adfc5217SJeff Kirsher * is called with bh disabled */ 444adfc5217SJeff Kirsher spin_lock(&priv->tx_lock); 445adfc5217SJeff Kirsher 446adfc5217SJeff Kirsher desc = &priv->tx_desc_cpu[priv->tx_dirty_desc]; 447adfc5217SJeff Kirsher 448adfc5217SJeff Kirsher if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) { 449adfc5217SJeff Kirsher spin_unlock(&priv->tx_lock); 450adfc5217SJeff Kirsher break; 451adfc5217SJeff Kirsher } 452adfc5217SJeff Kirsher 453adfc5217SJeff Kirsher /* ensure other field of the descriptor were not read 454adfc5217SJeff Kirsher * before we checked ownership */ 455adfc5217SJeff Kirsher rmb(); 456adfc5217SJeff Kirsher 457adfc5217SJeff Kirsher skb = priv->tx_skb[priv->tx_dirty_desc]; 458adfc5217SJeff Kirsher priv->tx_skb[priv->tx_dirty_desc] = NULL; 459adfc5217SJeff Kirsher dma_unmap_single(&priv->pdev->dev, desc->address, skb->len, 460adfc5217SJeff Kirsher DMA_TO_DEVICE); 461adfc5217SJeff Kirsher 462adfc5217SJeff Kirsher priv->tx_dirty_desc++; 463adfc5217SJeff Kirsher if (priv->tx_dirty_desc == priv->tx_ring_size) 464adfc5217SJeff Kirsher priv->tx_dirty_desc = 0; 465adfc5217SJeff Kirsher priv->tx_desc_count++; 466adfc5217SJeff Kirsher 467adfc5217SJeff Kirsher spin_unlock(&priv->tx_lock); 468adfc5217SJeff Kirsher 469adfc5217SJeff Kirsher if (desc->len_stat & DMADESC_UNDER_MASK) 470adfc5217SJeff Kirsher dev->stats.tx_errors++; 471adfc5217SJeff Kirsher 472adfc5217SJeff Kirsher dev_kfree_skb(skb); 473adfc5217SJeff Kirsher released++; 474adfc5217SJeff Kirsher } 475adfc5217SJeff Kirsher 476adfc5217SJeff Kirsher if (netif_queue_stopped(dev) && released) 477adfc5217SJeff Kirsher netif_wake_queue(dev); 478adfc5217SJeff Kirsher 479adfc5217SJeff Kirsher return released; 480adfc5217SJeff Kirsher } 481adfc5217SJeff Kirsher 482adfc5217SJeff Kirsher /* 483adfc5217SJeff Kirsher * poll func, called by network core 484adfc5217SJeff Kirsher */ 485adfc5217SJeff Kirsher static int bcm_enet_poll(struct napi_struct *napi, int budget) 486adfc5217SJeff Kirsher { 487adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 488adfc5217SJeff Kirsher struct net_device *dev; 489cd33ccf5SNicolas Schichan int rx_work_done; 490adfc5217SJeff Kirsher 491adfc5217SJeff Kirsher priv = container_of(napi, struct bcm_enet_priv, napi); 492adfc5217SJeff Kirsher dev = priv->net_dev; 493adfc5217SJeff Kirsher 494adfc5217SJeff Kirsher /* ack interrupts */ 4953dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 4963dc6475cSFlorian Fainelli ENETDMAC_IR, priv->rx_chan); 4973dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 4983dc6475cSFlorian Fainelli ENETDMAC_IR, priv->tx_chan); 499adfc5217SJeff Kirsher 500adfc5217SJeff Kirsher /* reclaim sent skb */ 501cd33ccf5SNicolas Schichan bcm_enet_tx_reclaim(dev, 0); 502adfc5217SJeff Kirsher 503adfc5217SJeff Kirsher spin_lock(&priv->rx_lock); 504adfc5217SJeff Kirsher rx_work_done = bcm_enet_receive_queue(dev, budget); 505adfc5217SJeff Kirsher spin_unlock(&priv->rx_lock); 506adfc5217SJeff Kirsher 507cd33ccf5SNicolas Schichan if (rx_work_done >= budget) { 508cd33ccf5SNicolas Schichan /* rx queue is not yet empty/clean */ 509adfc5217SJeff Kirsher return rx_work_done; 510adfc5217SJeff Kirsher } 511adfc5217SJeff Kirsher 512adfc5217SJeff Kirsher /* no more packet in rx/tx queue, remove device from poll 513adfc5217SJeff Kirsher * queue */ 514adfc5217SJeff Kirsher napi_complete(napi); 515adfc5217SJeff Kirsher 516adfc5217SJeff Kirsher /* restore rx/tx interrupt */ 5173dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 5183dc6475cSFlorian Fainelli ENETDMAC_IRMASK, priv->rx_chan); 5193dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 5203dc6475cSFlorian Fainelli ENETDMAC_IRMASK, priv->tx_chan); 521adfc5217SJeff Kirsher 522adfc5217SJeff Kirsher return rx_work_done; 523adfc5217SJeff Kirsher } 524adfc5217SJeff Kirsher 525adfc5217SJeff Kirsher /* 526adfc5217SJeff Kirsher * mac interrupt handler 527adfc5217SJeff Kirsher */ 528adfc5217SJeff Kirsher static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id) 529adfc5217SJeff Kirsher { 530adfc5217SJeff Kirsher struct net_device *dev; 531adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 532adfc5217SJeff Kirsher u32 stat; 533adfc5217SJeff Kirsher 534adfc5217SJeff Kirsher dev = dev_id; 535adfc5217SJeff Kirsher priv = netdev_priv(dev); 536adfc5217SJeff Kirsher 537adfc5217SJeff Kirsher stat = enet_readl(priv, ENET_IR_REG); 538adfc5217SJeff Kirsher if (!(stat & ENET_IR_MIB)) 539adfc5217SJeff Kirsher return IRQ_NONE; 540adfc5217SJeff Kirsher 541adfc5217SJeff Kirsher /* clear & mask interrupt */ 542adfc5217SJeff Kirsher enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); 543adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_IRMASK_REG); 544adfc5217SJeff Kirsher 545adfc5217SJeff Kirsher /* read mib registers in workqueue */ 546adfc5217SJeff Kirsher schedule_work(&priv->mib_update_task); 547adfc5217SJeff Kirsher 548adfc5217SJeff Kirsher return IRQ_HANDLED; 549adfc5217SJeff Kirsher } 550adfc5217SJeff Kirsher 551adfc5217SJeff Kirsher /* 552adfc5217SJeff Kirsher * rx/tx dma interrupt handler 553adfc5217SJeff Kirsher */ 554adfc5217SJeff Kirsher static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) 555adfc5217SJeff Kirsher { 556adfc5217SJeff Kirsher struct net_device *dev; 557adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 558adfc5217SJeff Kirsher 559adfc5217SJeff Kirsher dev = dev_id; 560adfc5217SJeff Kirsher priv = netdev_priv(dev); 561adfc5217SJeff Kirsher 562adfc5217SJeff Kirsher /* mask rx/tx interrupts */ 5633dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 5643dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 565adfc5217SJeff Kirsher 566adfc5217SJeff Kirsher napi_schedule(&priv->napi); 567adfc5217SJeff Kirsher 568adfc5217SJeff Kirsher return IRQ_HANDLED; 569adfc5217SJeff Kirsher } 570adfc5217SJeff Kirsher 571adfc5217SJeff Kirsher /* 572adfc5217SJeff Kirsher * tx request callback 573adfc5217SJeff Kirsher */ 574adfc5217SJeff Kirsher static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 575adfc5217SJeff Kirsher { 576adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 577adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 578adfc5217SJeff Kirsher u32 len_stat; 579adfc5217SJeff Kirsher int ret; 580adfc5217SJeff Kirsher 581adfc5217SJeff Kirsher priv = netdev_priv(dev); 582adfc5217SJeff Kirsher 583adfc5217SJeff Kirsher /* lock against tx reclaim */ 584adfc5217SJeff Kirsher spin_lock(&priv->tx_lock); 585adfc5217SJeff Kirsher 586adfc5217SJeff Kirsher /* make sure the tx hw queue is not full, should not happen 587adfc5217SJeff Kirsher * since we stop queue before it's the case */ 588adfc5217SJeff Kirsher if (unlikely(!priv->tx_desc_count)) { 589adfc5217SJeff Kirsher netif_stop_queue(dev); 590adfc5217SJeff Kirsher dev_err(&priv->pdev->dev, "xmit called with no tx desc " 591adfc5217SJeff Kirsher "available?\n"); 592adfc5217SJeff Kirsher ret = NETDEV_TX_BUSY; 593adfc5217SJeff Kirsher goto out_unlock; 594adfc5217SJeff Kirsher } 595adfc5217SJeff Kirsher 5966f00a022SMaxime Bizon /* pad small packets sent on a switch device */ 5976f00a022SMaxime Bizon if (priv->enet_is_sw && skb->len < 64) { 5986f00a022SMaxime Bizon int needed = 64 - skb->len; 5996f00a022SMaxime Bizon char *data; 6006f00a022SMaxime Bizon 6016f00a022SMaxime Bizon if (unlikely(skb_tailroom(skb) < needed)) { 6026f00a022SMaxime Bizon struct sk_buff *nskb; 6036f00a022SMaxime Bizon 6046f00a022SMaxime Bizon nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC); 6056f00a022SMaxime Bizon if (!nskb) { 6066f00a022SMaxime Bizon ret = NETDEV_TX_BUSY; 6076f00a022SMaxime Bizon goto out_unlock; 6086f00a022SMaxime Bizon } 6096f00a022SMaxime Bizon dev_kfree_skb(skb); 6106f00a022SMaxime Bizon skb = nskb; 6116f00a022SMaxime Bizon } 6126f00a022SMaxime Bizon data = skb_put(skb, needed); 6136f00a022SMaxime Bizon memset(data, 0, needed); 6146f00a022SMaxime Bizon } 6156f00a022SMaxime Bizon 616adfc5217SJeff Kirsher /* point to the next available desc */ 617adfc5217SJeff Kirsher desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; 618adfc5217SJeff Kirsher priv->tx_skb[priv->tx_curr_desc] = skb; 619adfc5217SJeff Kirsher 620adfc5217SJeff Kirsher /* fill descriptor */ 621adfc5217SJeff Kirsher desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len, 622adfc5217SJeff Kirsher DMA_TO_DEVICE); 623adfc5217SJeff Kirsher 624adfc5217SJeff Kirsher len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; 6253dc6475cSFlorian Fainelli len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) | 626adfc5217SJeff Kirsher DMADESC_APPEND_CRC | 627adfc5217SJeff Kirsher DMADESC_OWNER_MASK; 628adfc5217SJeff Kirsher 629adfc5217SJeff Kirsher priv->tx_curr_desc++; 630adfc5217SJeff Kirsher if (priv->tx_curr_desc == priv->tx_ring_size) { 631adfc5217SJeff Kirsher priv->tx_curr_desc = 0; 6323dc6475cSFlorian Fainelli len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); 633adfc5217SJeff Kirsher } 634adfc5217SJeff Kirsher priv->tx_desc_count--; 635adfc5217SJeff Kirsher 636adfc5217SJeff Kirsher /* dma might be already polling, make sure we update desc 637adfc5217SJeff Kirsher * fields in correct order */ 638adfc5217SJeff Kirsher wmb(); 639adfc5217SJeff Kirsher desc->len_stat = len_stat; 640adfc5217SJeff Kirsher wmb(); 641adfc5217SJeff Kirsher 642adfc5217SJeff Kirsher /* kick tx dma */ 6433dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_en_mask, 6443dc6475cSFlorian Fainelli ENETDMAC_CHANCFG, priv->tx_chan); 645adfc5217SJeff Kirsher 646adfc5217SJeff Kirsher /* stop queue if no more desc available */ 647adfc5217SJeff Kirsher if (!priv->tx_desc_count) 648adfc5217SJeff Kirsher netif_stop_queue(dev); 649adfc5217SJeff Kirsher 650adfc5217SJeff Kirsher dev->stats.tx_bytes += skb->len; 651adfc5217SJeff Kirsher dev->stats.tx_packets++; 652adfc5217SJeff Kirsher ret = NETDEV_TX_OK; 653adfc5217SJeff Kirsher 654adfc5217SJeff Kirsher out_unlock: 655adfc5217SJeff Kirsher spin_unlock(&priv->tx_lock); 656adfc5217SJeff Kirsher return ret; 657adfc5217SJeff Kirsher } 658adfc5217SJeff Kirsher 659adfc5217SJeff Kirsher /* 660adfc5217SJeff Kirsher * Change the interface's mac address. 661adfc5217SJeff Kirsher */ 662adfc5217SJeff Kirsher static int bcm_enet_set_mac_address(struct net_device *dev, void *p) 663adfc5217SJeff Kirsher { 664adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 665adfc5217SJeff Kirsher struct sockaddr *addr = p; 666adfc5217SJeff Kirsher u32 val; 667adfc5217SJeff Kirsher 668adfc5217SJeff Kirsher priv = netdev_priv(dev); 669adfc5217SJeff Kirsher memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 670adfc5217SJeff Kirsher 671adfc5217SJeff Kirsher /* use perfect match register 0 to store my mac address */ 672adfc5217SJeff Kirsher val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | 673adfc5217SJeff Kirsher (dev->dev_addr[4] << 8) | dev->dev_addr[5]; 674adfc5217SJeff Kirsher enet_writel(priv, val, ENET_PML_REG(0)); 675adfc5217SJeff Kirsher 676adfc5217SJeff Kirsher val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]); 677adfc5217SJeff Kirsher val |= ENET_PMH_DATAVALID_MASK; 678adfc5217SJeff Kirsher enet_writel(priv, val, ENET_PMH_REG(0)); 679adfc5217SJeff Kirsher 680adfc5217SJeff Kirsher return 0; 681adfc5217SJeff Kirsher } 682adfc5217SJeff Kirsher 683adfc5217SJeff Kirsher /* 684adfc5217SJeff Kirsher * Change rx mode (promiscuous/allmulti) and update multicast list 685adfc5217SJeff Kirsher */ 686adfc5217SJeff Kirsher static void bcm_enet_set_multicast_list(struct net_device *dev) 687adfc5217SJeff Kirsher { 688adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 689adfc5217SJeff Kirsher struct netdev_hw_addr *ha; 690adfc5217SJeff Kirsher u32 val; 691adfc5217SJeff Kirsher int i; 692adfc5217SJeff Kirsher 693adfc5217SJeff Kirsher priv = netdev_priv(dev); 694adfc5217SJeff Kirsher 695adfc5217SJeff Kirsher val = enet_readl(priv, ENET_RXCFG_REG); 696adfc5217SJeff Kirsher 697adfc5217SJeff Kirsher if (dev->flags & IFF_PROMISC) 698adfc5217SJeff Kirsher val |= ENET_RXCFG_PROMISC_MASK; 699adfc5217SJeff Kirsher else 700adfc5217SJeff Kirsher val &= ~ENET_RXCFG_PROMISC_MASK; 701adfc5217SJeff Kirsher 702adfc5217SJeff Kirsher /* only 3 perfect match registers left, first one is used for 703adfc5217SJeff Kirsher * own mac address */ 704adfc5217SJeff Kirsher if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3) 705adfc5217SJeff Kirsher val |= ENET_RXCFG_ALLMCAST_MASK; 706adfc5217SJeff Kirsher else 707adfc5217SJeff Kirsher val &= ~ENET_RXCFG_ALLMCAST_MASK; 708adfc5217SJeff Kirsher 709adfc5217SJeff Kirsher /* no need to set perfect match registers if we catch all 710adfc5217SJeff Kirsher * multicast */ 711adfc5217SJeff Kirsher if (val & ENET_RXCFG_ALLMCAST_MASK) { 712adfc5217SJeff Kirsher enet_writel(priv, val, ENET_RXCFG_REG); 713adfc5217SJeff Kirsher return; 714adfc5217SJeff Kirsher } 715adfc5217SJeff Kirsher 716adfc5217SJeff Kirsher i = 0; 717adfc5217SJeff Kirsher netdev_for_each_mc_addr(ha, dev) { 718adfc5217SJeff Kirsher u8 *dmi_addr; 719adfc5217SJeff Kirsher u32 tmp; 720adfc5217SJeff Kirsher 721adfc5217SJeff Kirsher if (i == 3) 722adfc5217SJeff Kirsher break; 723adfc5217SJeff Kirsher /* update perfect match registers */ 724adfc5217SJeff Kirsher dmi_addr = ha->addr; 725adfc5217SJeff Kirsher tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | 726adfc5217SJeff Kirsher (dmi_addr[4] << 8) | dmi_addr[5]; 727adfc5217SJeff Kirsher enet_writel(priv, tmp, ENET_PML_REG(i + 1)); 728adfc5217SJeff Kirsher 729adfc5217SJeff Kirsher tmp = (dmi_addr[0] << 8 | dmi_addr[1]); 730adfc5217SJeff Kirsher tmp |= ENET_PMH_DATAVALID_MASK; 731adfc5217SJeff Kirsher enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1)); 732adfc5217SJeff Kirsher } 733adfc5217SJeff Kirsher 734adfc5217SJeff Kirsher for (; i < 3; i++) { 735adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_PML_REG(i + 1)); 736adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_PMH_REG(i + 1)); 737adfc5217SJeff Kirsher } 738adfc5217SJeff Kirsher 739adfc5217SJeff Kirsher enet_writel(priv, val, ENET_RXCFG_REG); 740adfc5217SJeff Kirsher } 741adfc5217SJeff Kirsher 742adfc5217SJeff Kirsher /* 743adfc5217SJeff Kirsher * set mac duplex parameters 744adfc5217SJeff Kirsher */ 745adfc5217SJeff Kirsher static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex) 746adfc5217SJeff Kirsher { 747adfc5217SJeff Kirsher u32 val; 748adfc5217SJeff Kirsher 749adfc5217SJeff Kirsher val = enet_readl(priv, ENET_TXCTL_REG); 750adfc5217SJeff Kirsher if (fullduplex) 751adfc5217SJeff Kirsher val |= ENET_TXCTL_FD_MASK; 752adfc5217SJeff Kirsher else 753adfc5217SJeff Kirsher val &= ~ENET_TXCTL_FD_MASK; 754adfc5217SJeff Kirsher enet_writel(priv, val, ENET_TXCTL_REG); 755adfc5217SJeff Kirsher } 756adfc5217SJeff Kirsher 757adfc5217SJeff Kirsher /* 758adfc5217SJeff Kirsher * set mac flow control parameters 759adfc5217SJeff Kirsher */ 760adfc5217SJeff Kirsher static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en) 761adfc5217SJeff Kirsher { 762adfc5217SJeff Kirsher u32 val; 763adfc5217SJeff Kirsher 764adfc5217SJeff Kirsher /* rx flow control (pause frame handling) */ 765adfc5217SJeff Kirsher val = enet_readl(priv, ENET_RXCFG_REG); 766adfc5217SJeff Kirsher if (rx_en) 767adfc5217SJeff Kirsher val |= ENET_RXCFG_ENFLOW_MASK; 768adfc5217SJeff Kirsher else 769adfc5217SJeff Kirsher val &= ~ENET_RXCFG_ENFLOW_MASK; 770adfc5217SJeff Kirsher enet_writel(priv, val, ENET_RXCFG_REG); 771adfc5217SJeff Kirsher 7723dc6475cSFlorian Fainelli if (!priv->dma_has_sram) 7733dc6475cSFlorian Fainelli return; 7743dc6475cSFlorian Fainelli 775adfc5217SJeff Kirsher /* tx flow control (pause frame generation) */ 776adfc5217SJeff Kirsher val = enet_dma_readl(priv, ENETDMA_CFG_REG); 777adfc5217SJeff Kirsher if (tx_en) 778adfc5217SJeff Kirsher val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); 779adfc5217SJeff Kirsher else 780adfc5217SJeff Kirsher val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); 781adfc5217SJeff Kirsher enet_dma_writel(priv, val, ENETDMA_CFG_REG); 782adfc5217SJeff Kirsher } 783adfc5217SJeff Kirsher 784adfc5217SJeff Kirsher /* 785adfc5217SJeff Kirsher * link changed callback (from phylib) 786adfc5217SJeff Kirsher */ 787adfc5217SJeff Kirsher static void bcm_enet_adjust_phy_link(struct net_device *dev) 788adfc5217SJeff Kirsher { 789adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 790adfc5217SJeff Kirsher struct phy_device *phydev; 791adfc5217SJeff Kirsher int status_changed; 792adfc5217SJeff Kirsher 793adfc5217SJeff Kirsher priv = netdev_priv(dev); 794adfc5217SJeff Kirsher phydev = priv->phydev; 795adfc5217SJeff Kirsher status_changed = 0; 796adfc5217SJeff Kirsher 797adfc5217SJeff Kirsher if (priv->old_link != phydev->link) { 798adfc5217SJeff Kirsher status_changed = 1; 799adfc5217SJeff Kirsher priv->old_link = phydev->link; 800adfc5217SJeff Kirsher } 801adfc5217SJeff Kirsher 802adfc5217SJeff Kirsher /* reflect duplex change in mac configuration */ 803adfc5217SJeff Kirsher if (phydev->link && phydev->duplex != priv->old_duplex) { 804adfc5217SJeff Kirsher bcm_enet_set_duplex(priv, 805adfc5217SJeff Kirsher (phydev->duplex == DUPLEX_FULL) ? 1 : 0); 806adfc5217SJeff Kirsher status_changed = 1; 807adfc5217SJeff Kirsher priv->old_duplex = phydev->duplex; 808adfc5217SJeff Kirsher } 809adfc5217SJeff Kirsher 810adfc5217SJeff Kirsher /* enable flow control if remote advertise it (trust phylib to 811adfc5217SJeff Kirsher * check that duplex is full */ 812adfc5217SJeff Kirsher if (phydev->link && phydev->pause != priv->old_pause) { 813adfc5217SJeff Kirsher int rx_pause_en, tx_pause_en; 814adfc5217SJeff Kirsher 815adfc5217SJeff Kirsher if (phydev->pause) { 816adfc5217SJeff Kirsher /* pause was advertised by lpa and us */ 817adfc5217SJeff Kirsher rx_pause_en = 1; 818adfc5217SJeff Kirsher tx_pause_en = 1; 819adfc5217SJeff Kirsher } else if (!priv->pause_auto) { 820adfc5217SJeff Kirsher /* pause setting overrided by user */ 821adfc5217SJeff Kirsher rx_pause_en = priv->pause_rx; 822adfc5217SJeff Kirsher tx_pause_en = priv->pause_tx; 823adfc5217SJeff Kirsher } else { 824adfc5217SJeff Kirsher rx_pause_en = 0; 825adfc5217SJeff Kirsher tx_pause_en = 0; 826adfc5217SJeff Kirsher } 827adfc5217SJeff Kirsher 828adfc5217SJeff Kirsher bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en); 829adfc5217SJeff Kirsher status_changed = 1; 830adfc5217SJeff Kirsher priv->old_pause = phydev->pause; 831adfc5217SJeff Kirsher } 832adfc5217SJeff Kirsher 833adfc5217SJeff Kirsher if (status_changed) { 834adfc5217SJeff Kirsher pr_info("%s: link %s", dev->name, phydev->link ? 835adfc5217SJeff Kirsher "UP" : "DOWN"); 836adfc5217SJeff Kirsher if (phydev->link) 837adfc5217SJeff Kirsher pr_cont(" - %d/%s - flow control %s", phydev->speed, 838adfc5217SJeff Kirsher DUPLEX_FULL == phydev->duplex ? "full" : "half", 839adfc5217SJeff Kirsher phydev->pause == 1 ? "rx&tx" : "off"); 840adfc5217SJeff Kirsher 841adfc5217SJeff Kirsher pr_cont("\n"); 842adfc5217SJeff Kirsher } 843adfc5217SJeff Kirsher } 844adfc5217SJeff Kirsher 845adfc5217SJeff Kirsher /* 846adfc5217SJeff Kirsher * link changed callback (if phylib is not used) 847adfc5217SJeff Kirsher */ 848adfc5217SJeff Kirsher static void bcm_enet_adjust_link(struct net_device *dev) 849adfc5217SJeff Kirsher { 850adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 851adfc5217SJeff Kirsher 852adfc5217SJeff Kirsher priv = netdev_priv(dev); 853adfc5217SJeff Kirsher bcm_enet_set_duplex(priv, priv->force_duplex_full); 854adfc5217SJeff Kirsher bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx); 855adfc5217SJeff Kirsher netif_carrier_on(dev); 856adfc5217SJeff Kirsher 857adfc5217SJeff Kirsher pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n", 858adfc5217SJeff Kirsher dev->name, 859adfc5217SJeff Kirsher priv->force_speed_100 ? 100 : 10, 860adfc5217SJeff Kirsher priv->force_duplex_full ? "full" : "half", 861adfc5217SJeff Kirsher priv->pause_rx ? "rx" : "off", 862adfc5217SJeff Kirsher priv->pause_tx ? "tx" : "off"); 863adfc5217SJeff Kirsher } 864adfc5217SJeff Kirsher 865adfc5217SJeff Kirsher /* 866adfc5217SJeff Kirsher * open callback, allocate dma rings & buffers and start rx operation 867adfc5217SJeff Kirsher */ 868adfc5217SJeff Kirsher static int bcm_enet_open(struct net_device *dev) 869adfc5217SJeff Kirsher { 870adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 871adfc5217SJeff Kirsher struct sockaddr addr; 872adfc5217SJeff Kirsher struct device *kdev; 873adfc5217SJeff Kirsher struct phy_device *phydev; 874adfc5217SJeff Kirsher int i, ret; 875adfc5217SJeff Kirsher unsigned int size; 876adfc5217SJeff Kirsher char phy_id[MII_BUS_ID_SIZE + 3]; 877adfc5217SJeff Kirsher void *p; 878adfc5217SJeff Kirsher u32 val; 879adfc5217SJeff Kirsher 880adfc5217SJeff Kirsher priv = netdev_priv(dev); 881adfc5217SJeff Kirsher kdev = &priv->pdev->dev; 882adfc5217SJeff Kirsher 883adfc5217SJeff Kirsher if (priv->has_phy) { 884adfc5217SJeff Kirsher /* connect to PHY */ 885adfc5217SJeff Kirsher snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 886c56e9e2aSFlorian Fainelli priv->mii_bus->id, priv->phy_id); 887adfc5217SJeff Kirsher 888f9a8f83bSFlorian Fainelli phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 889adfc5217SJeff Kirsher PHY_INTERFACE_MODE_MII); 890adfc5217SJeff Kirsher 891adfc5217SJeff Kirsher if (IS_ERR(phydev)) { 892adfc5217SJeff Kirsher dev_err(kdev, "could not attach to PHY\n"); 893adfc5217SJeff Kirsher return PTR_ERR(phydev); 894adfc5217SJeff Kirsher } 895adfc5217SJeff Kirsher 896adfc5217SJeff Kirsher /* mask with MAC supported features */ 897adfc5217SJeff Kirsher phydev->supported &= (SUPPORTED_10baseT_Half | 898adfc5217SJeff Kirsher SUPPORTED_10baseT_Full | 899adfc5217SJeff Kirsher SUPPORTED_100baseT_Half | 900adfc5217SJeff Kirsher SUPPORTED_100baseT_Full | 901adfc5217SJeff Kirsher SUPPORTED_Autoneg | 902adfc5217SJeff Kirsher SUPPORTED_Pause | 903adfc5217SJeff Kirsher SUPPORTED_MII); 904adfc5217SJeff Kirsher phydev->advertising = phydev->supported; 905adfc5217SJeff Kirsher 906adfc5217SJeff Kirsher if (priv->pause_auto && priv->pause_rx && priv->pause_tx) 907adfc5217SJeff Kirsher phydev->advertising |= SUPPORTED_Pause; 908adfc5217SJeff Kirsher else 909adfc5217SJeff Kirsher phydev->advertising &= ~SUPPORTED_Pause; 910adfc5217SJeff Kirsher 911adfc5217SJeff Kirsher dev_info(kdev, "attached PHY at address %d [%s]\n", 912adfc5217SJeff Kirsher phydev->addr, phydev->drv->name); 913adfc5217SJeff Kirsher 914adfc5217SJeff Kirsher priv->old_link = 0; 915adfc5217SJeff Kirsher priv->old_duplex = -1; 916adfc5217SJeff Kirsher priv->old_pause = -1; 917adfc5217SJeff Kirsher priv->phydev = phydev; 918adfc5217SJeff Kirsher } 919adfc5217SJeff Kirsher 920adfc5217SJeff Kirsher /* mask all interrupts and request them */ 921adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_IRMASK_REG); 9223dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 9233dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 924adfc5217SJeff Kirsher 925adfc5217SJeff Kirsher ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); 926adfc5217SJeff Kirsher if (ret) 927adfc5217SJeff Kirsher goto out_phy_disconnect; 928adfc5217SJeff Kirsher 929df9f1b9fSMichael Opdenacker ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0, 930adfc5217SJeff Kirsher dev->name, dev); 931adfc5217SJeff Kirsher if (ret) 932adfc5217SJeff Kirsher goto out_freeirq; 933adfc5217SJeff Kirsher 934adfc5217SJeff Kirsher ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, 935df9f1b9fSMichael Opdenacker 0, dev->name, dev); 936adfc5217SJeff Kirsher if (ret) 937adfc5217SJeff Kirsher goto out_freeirq_rx; 938adfc5217SJeff Kirsher 939adfc5217SJeff Kirsher /* initialize perfect match registers */ 940adfc5217SJeff Kirsher for (i = 0; i < 4; i++) { 941adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_PML_REG(i)); 942adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_PMH_REG(i)); 943adfc5217SJeff Kirsher } 944adfc5217SJeff Kirsher 945adfc5217SJeff Kirsher /* write device mac address */ 946adfc5217SJeff Kirsher memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN); 947adfc5217SJeff Kirsher bcm_enet_set_mac_address(dev, &addr); 948adfc5217SJeff Kirsher 949adfc5217SJeff Kirsher /* allocate rx dma ring */ 950adfc5217SJeff Kirsher size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 951ede23fa8SJoe Perches p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 952adfc5217SJeff Kirsher if (!p) { 953adfc5217SJeff Kirsher ret = -ENOMEM; 954adfc5217SJeff Kirsher goto out_freeirq_tx; 955adfc5217SJeff Kirsher } 956adfc5217SJeff Kirsher 957adfc5217SJeff Kirsher priv->rx_desc_alloc_size = size; 958adfc5217SJeff Kirsher priv->rx_desc_cpu = p; 959adfc5217SJeff Kirsher 960adfc5217SJeff Kirsher /* allocate tx dma ring */ 961adfc5217SJeff Kirsher size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 962ede23fa8SJoe Perches p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 963adfc5217SJeff Kirsher if (!p) { 964adfc5217SJeff Kirsher ret = -ENOMEM; 965adfc5217SJeff Kirsher goto out_free_rx_ring; 966adfc5217SJeff Kirsher } 967adfc5217SJeff Kirsher 968adfc5217SJeff Kirsher priv->tx_desc_alloc_size = size; 969adfc5217SJeff Kirsher priv->tx_desc_cpu = p; 970adfc5217SJeff Kirsher 971b2adaca9SJoe Perches priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *), 972adfc5217SJeff Kirsher GFP_KERNEL); 973adfc5217SJeff Kirsher if (!priv->tx_skb) { 974adfc5217SJeff Kirsher ret = -ENOMEM; 975adfc5217SJeff Kirsher goto out_free_tx_ring; 976adfc5217SJeff Kirsher } 977adfc5217SJeff Kirsher 978adfc5217SJeff Kirsher priv->tx_desc_count = priv->tx_ring_size; 979adfc5217SJeff Kirsher priv->tx_dirty_desc = 0; 980adfc5217SJeff Kirsher priv->tx_curr_desc = 0; 981adfc5217SJeff Kirsher spin_lock_init(&priv->tx_lock); 982adfc5217SJeff Kirsher 983adfc5217SJeff Kirsher /* init & fill rx ring with skbs */ 984b2adaca9SJoe Perches priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *), 985adfc5217SJeff Kirsher GFP_KERNEL); 986adfc5217SJeff Kirsher if (!priv->rx_skb) { 987adfc5217SJeff Kirsher ret = -ENOMEM; 988adfc5217SJeff Kirsher goto out_free_tx_skb; 989adfc5217SJeff Kirsher } 990adfc5217SJeff Kirsher 991adfc5217SJeff Kirsher priv->rx_desc_count = 0; 992adfc5217SJeff Kirsher priv->rx_dirty_desc = 0; 993adfc5217SJeff Kirsher priv->rx_curr_desc = 0; 994adfc5217SJeff Kirsher 995adfc5217SJeff Kirsher /* initialize flow control buffer allocation */ 9963dc6475cSFlorian Fainelli if (priv->dma_has_sram) 997adfc5217SJeff Kirsher enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, 998adfc5217SJeff Kirsher ENETDMA_BUFALLOC_REG(priv->rx_chan)); 9993dc6475cSFlorian Fainelli else 10003dc6475cSFlorian Fainelli enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, 10013dc6475cSFlorian Fainelli ENETDMAC_BUFALLOC, priv->rx_chan); 1002adfc5217SJeff Kirsher 1003adfc5217SJeff Kirsher if (bcm_enet_refill_rx(dev)) { 1004adfc5217SJeff Kirsher dev_err(kdev, "cannot allocate rx skb queue\n"); 1005adfc5217SJeff Kirsher ret = -ENOMEM; 1006adfc5217SJeff Kirsher goto out; 1007adfc5217SJeff Kirsher } 1008adfc5217SJeff Kirsher 1009adfc5217SJeff Kirsher /* write rx & tx ring addresses */ 10103dc6475cSFlorian Fainelli if (priv->dma_has_sram) { 10110ae99b5fSMaxime Bizon enet_dmas_writel(priv, priv->rx_desc_dma, 10123dc6475cSFlorian Fainelli ENETDMAS_RSTART_REG, priv->rx_chan); 10130ae99b5fSMaxime Bizon enet_dmas_writel(priv, priv->tx_desc_dma, 10143dc6475cSFlorian Fainelli ENETDMAS_RSTART_REG, priv->tx_chan); 10153dc6475cSFlorian Fainelli } else { 10163dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->rx_desc_dma, 10173dc6475cSFlorian Fainelli ENETDMAC_RSTART, priv->rx_chan); 10183dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->tx_desc_dma, 10193dc6475cSFlorian Fainelli ENETDMAC_RSTART, priv->tx_chan); 10203dc6475cSFlorian Fainelli } 1021adfc5217SJeff Kirsher 1022adfc5217SJeff Kirsher /* clear remaining state ram for rx & tx channel */ 10233dc6475cSFlorian Fainelli if (priv->dma_has_sram) { 10243dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); 10253dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); 10263dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); 10273dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); 10283dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); 10293dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); 10303dc6475cSFlorian Fainelli } else { 10313dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan); 10323dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan); 10333dc6475cSFlorian Fainelli } 1034adfc5217SJeff Kirsher 1035adfc5217SJeff Kirsher /* set max rx/tx length */ 1036adfc5217SJeff Kirsher enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); 1037adfc5217SJeff Kirsher enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); 1038adfc5217SJeff Kirsher 1039adfc5217SJeff Kirsher /* set dma maximum burst len */ 10406f00a022SMaxime Bizon enet_dmac_writel(priv, priv->dma_maxburst, 10413dc6475cSFlorian Fainelli ENETDMAC_MAXBURST, priv->rx_chan); 10426f00a022SMaxime Bizon enet_dmac_writel(priv, priv->dma_maxburst, 10433dc6475cSFlorian Fainelli ENETDMAC_MAXBURST, priv->tx_chan); 1044adfc5217SJeff Kirsher 1045adfc5217SJeff Kirsher /* set correct transmit fifo watermark */ 1046adfc5217SJeff Kirsher enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); 1047adfc5217SJeff Kirsher 1048adfc5217SJeff Kirsher /* set flow control low/high threshold to 1/3 / 2/3 */ 10493dc6475cSFlorian Fainelli if (priv->dma_has_sram) { 1050adfc5217SJeff Kirsher val = priv->rx_ring_size / 3; 1051adfc5217SJeff Kirsher enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); 1052adfc5217SJeff Kirsher val = (priv->rx_ring_size * 2) / 3; 1053adfc5217SJeff Kirsher enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); 10543dc6475cSFlorian Fainelli } else { 10553dc6475cSFlorian Fainelli enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan); 10563dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan); 10573dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan); 10583dc6475cSFlorian Fainelli } 1059adfc5217SJeff Kirsher 1060adfc5217SJeff Kirsher /* all set, enable mac and interrupts, start dma engine and 1061adfc5217SJeff Kirsher * kick rx dma channel */ 1062adfc5217SJeff Kirsher wmb(); 1063adfc5217SJeff Kirsher val = enet_readl(priv, ENET_CTL_REG); 1064adfc5217SJeff Kirsher val |= ENET_CTL_ENABLE_MASK; 1065adfc5217SJeff Kirsher enet_writel(priv, val, ENET_CTL_REG); 1066adfc5217SJeff Kirsher enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); 10673dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_en_mask, 10683dc6475cSFlorian Fainelli ENETDMAC_CHANCFG, priv->rx_chan); 1069adfc5217SJeff Kirsher 1070adfc5217SJeff Kirsher /* watch "mib counters about to overflow" interrupt */ 1071adfc5217SJeff Kirsher enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); 1072adfc5217SJeff Kirsher enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); 1073adfc5217SJeff Kirsher 1074adfc5217SJeff Kirsher /* watch "packet transferred" interrupt in rx and tx */ 10753dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 10763dc6475cSFlorian Fainelli ENETDMAC_IR, priv->rx_chan); 10773dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 10783dc6475cSFlorian Fainelli ENETDMAC_IR, priv->tx_chan); 1079adfc5217SJeff Kirsher 1080adfc5217SJeff Kirsher /* make sure we enable napi before rx interrupt */ 1081adfc5217SJeff Kirsher napi_enable(&priv->napi); 1082adfc5217SJeff Kirsher 10833dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 10843dc6475cSFlorian Fainelli ENETDMAC_IRMASK, priv->rx_chan); 10853dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 10863dc6475cSFlorian Fainelli ENETDMAC_IRMASK, priv->tx_chan); 1087adfc5217SJeff Kirsher 1088adfc5217SJeff Kirsher if (priv->has_phy) 1089adfc5217SJeff Kirsher phy_start(priv->phydev); 1090adfc5217SJeff Kirsher else 1091adfc5217SJeff Kirsher bcm_enet_adjust_link(dev); 1092adfc5217SJeff Kirsher 1093adfc5217SJeff Kirsher netif_start_queue(dev); 1094adfc5217SJeff Kirsher return 0; 1095adfc5217SJeff Kirsher 1096adfc5217SJeff Kirsher out: 1097adfc5217SJeff Kirsher for (i = 0; i < priv->rx_ring_size; i++) { 1098adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 1099adfc5217SJeff Kirsher 1100adfc5217SJeff Kirsher if (!priv->rx_skb[i]) 1101adfc5217SJeff Kirsher continue; 1102adfc5217SJeff Kirsher 1103adfc5217SJeff Kirsher desc = &priv->rx_desc_cpu[i]; 1104adfc5217SJeff Kirsher dma_unmap_single(kdev, desc->address, priv->rx_skb_size, 1105adfc5217SJeff Kirsher DMA_FROM_DEVICE); 1106adfc5217SJeff Kirsher kfree_skb(priv->rx_skb[i]); 1107adfc5217SJeff Kirsher } 1108adfc5217SJeff Kirsher kfree(priv->rx_skb); 1109adfc5217SJeff Kirsher 1110adfc5217SJeff Kirsher out_free_tx_skb: 1111adfc5217SJeff Kirsher kfree(priv->tx_skb); 1112adfc5217SJeff Kirsher 1113adfc5217SJeff Kirsher out_free_tx_ring: 1114adfc5217SJeff Kirsher dma_free_coherent(kdev, priv->tx_desc_alloc_size, 1115adfc5217SJeff Kirsher priv->tx_desc_cpu, priv->tx_desc_dma); 1116adfc5217SJeff Kirsher 1117adfc5217SJeff Kirsher out_free_rx_ring: 1118adfc5217SJeff Kirsher dma_free_coherent(kdev, priv->rx_desc_alloc_size, 1119adfc5217SJeff Kirsher priv->rx_desc_cpu, priv->rx_desc_dma); 1120adfc5217SJeff Kirsher 1121adfc5217SJeff Kirsher out_freeirq_tx: 1122adfc5217SJeff Kirsher free_irq(priv->irq_tx, dev); 1123adfc5217SJeff Kirsher 1124adfc5217SJeff Kirsher out_freeirq_rx: 1125adfc5217SJeff Kirsher free_irq(priv->irq_rx, dev); 1126adfc5217SJeff Kirsher 1127adfc5217SJeff Kirsher out_freeirq: 1128adfc5217SJeff Kirsher free_irq(dev->irq, dev); 1129adfc5217SJeff Kirsher 1130adfc5217SJeff Kirsher out_phy_disconnect: 1131adfc5217SJeff Kirsher phy_disconnect(priv->phydev); 1132adfc5217SJeff Kirsher 1133adfc5217SJeff Kirsher return ret; 1134adfc5217SJeff Kirsher } 1135adfc5217SJeff Kirsher 1136adfc5217SJeff Kirsher /* 1137adfc5217SJeff Kirsher * disable mac 1138adfc5217SJeff Kirsher */ 1139adfc5217SJeff Kirsher static void bcm_enet_disable_mac(struct bcm_enet_priv *priv) 1140adfc5217SJeff Kirsher { 1141adfc5217SJeff Kirsher int limit; 1142adfc5217SJeff Kirsher u32 val; 1143adfc5217SJeff Kirsher 1144adfc5217SJeff Kirsher val = enet_readl(priv, ENET_CTL_REG); 1145adfc5217SJeff Kirsher val |= ENET_CTL_DISABLE_MASK; 1146adfc5217SJeff Kirsher enet_writel(priv, val, ENET_CTL_REG); 1147adfc5217SJeff Kirsher 1148adfc5217SJeff Kirsher limit = 1000; 1149adfc5217SJeff Kirsher do { 1150adfc5217SJeff Kirsher u32 val; 1151adfc5217SJeff Kirsher 1152adfc5217SJeff Kirsher val = enet_readl(priv, ENET_CTL_REG); 1153adfc5217SJeff Kirsher if (!(val & ENET_CTL_DISABLE_MASK)) 1154adfc5217SJeff Kirsher break; 1155adfc5217SJeff Kirsher udelay(1); 1156adfc5217SJeff Kirsher } while (limit--); 1157adfc5217SJeff Kirsher } 1158adfc5217SJeff Kirsher 1159adfc5217SJeff Kirsher /* 1160adfc5217SJeff Kirsher * disable dma in given channel 1161adfc5217SJeff Kirsher */ 1162adfc5217SJeff Kirsher static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan) 1163adfc5217SJeff Kirsher { 1164adfc5217SJeff Kirsher int limit; 1165adfc5217SJeff Kirsher 11663dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan); 1167adfc5217SJeff Kirsher 1168adfc5217SJeff Kirsher limit = 1000; 1169adfc5217SJeff Kirsher do { 1170adfc5217SJeff Kirsher u32 val; 1171adfc5217SJeff Kirsher 11723dc6475cSFlorian Fainelli val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan); 11730ae99b5fSMaxime Bizon if (!(val & ENETDMAC_CHANCFG_EN_MASK)) 1174adfc5217SJeff Kirsher break; 1175adfc5217SJeff Kirsher udelay(1); 1176adfc5217SJeff Kirsher } while (limit--); 1177adfc5217SJeff Kirsher } 1178adfc5217SJeff Kirsher 1179adfc5217SJeff Kirsher /* 1180adfc5217SJeff Kirsher * stop callback 1181adfc5217SJeff Kirsher */ 1182adfc5217SJeff Kirsher static int bcm_enet_stop(struct net_device *dev) 1183adfc5217SJeff Kirsher { 1184adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1185adfc5217SJeff Kirsher struct device *kdev; 1186adfc5217SJeff Kirsher int i; 1187adfc5217SJeff Kirsher 1188adfc5217SJeff Kirsher priv = netdev_priv(dev); 1189adfc5217SJeff Kirsher kdev = &priv->pdev->dev; 1190adfc5217SJeff Kirsher 1191adfc5217SJeff Kirsher netif_stop_queue(dev); 1192adfc5217SJeff Kirsher napi_disable(&priv->napi); 1193adfc5217SJeff Kirsher if (priv->has_phy) 1194adfc5217SJeff Kirsher phy_stop(priv->phydev); 1195adfc5217SJeff Kirsher del_timer_sync(&priv->rx_timeout); 1196adfc5217SJeff Kirsher 1197adfc5217SJeff Kirsher /* mask all interrupts */ 1198adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_IRMASK_REG); 11993dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 12003dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 1201adfc5217SJeff Kirsher 1202adfc5217SJeff Kirsher /* make sure no mib update is scheduled */ 1203adfc5217SJeff Kirsher cancel_work_sync(&priv->mib_update_task); 1204adfc5217SJeff Kirsher 1205adfc5217SJeff Kirsher /* disable dma & mac */ 1206adfc5217SJeff Kirsher bcm_enet_disable_dma(priv, priv->tx_chan); 1207adfc5217SJeff Kirsher bcm_enet_disable_dma(priv, priv->rx_chan); 1208adfc5217SJeff Kirsher bcm_enet_disable_mac(priv); 1209adfc5217SJeff Kirsher 1210adfc5217SJeff Kirsher /* force reclaim of all tx buffers */ 1211adfc5217SJeff Kirsher bcm_enet_tx_reclaim(dev, 1); 1212adfc5217SJeff Kirsher 1213adfc5217SJeff Kirsher /* free the rx skb ring */ 1214adfc5217SJeff Kirsher for (i = 0; i < priv->rx_ring_size; i++) { 1215adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 1216adfc5217SJeff Kirsher 1217adfc5217SJeff Kirsher if (!priv->rx_skb[i]) 1218adfc5217SJeff Kirsher continue; 1219adfc5217SJeff Kirsher 1220adfc5217SJeff Kirsher desc = &priv->rx_desc_cpu[i]; 1221adfc5217SJeff Kirsher dma_unmap_single(kdev, desc->address, priv->rx_skb_size, 1222adfc5217SJeff Kirsher DMA_FROM_DEVICE); 1223adfc5217SJeff Kirsher kfree_skb(priv->rx_skb[i]); 1224adfc5217SJeff Kirsher } 1225adfc5217SJeff Kirsher 1226adfc5217SJeff Kirsher /* free remaining allocated memory */ 1227adfc5217SJeff Kirsher kfree(priv->rx_skb); 1228adfc5217SJeff Kirsher kfree(priv->tx_skb); 1229adfc5217SJeff Kirsher dma_free_coherent(kdev, priv->rx_desc_alloc_size, 1230adfc5217SJeff Kirsher priv->rx_desc_cpu, priv->rx_desc_dma); 1231adfc5217SJeff Kirsher dma_free_coherent(kdev, priv->tx_desc_alloc_size, 1232adfc5217SJeff Kirsher priv->tx_desc_cpu, priv->tx_desc_dma); 1233adfc5217SJeff Kirsher free_irq(priv->irq_tx, dev); 1234adfc5217SJeff Kirsher free_irq(priv->irq_rx, dev); 1235adfc5217SJeff Kirsher free_irq(dev->irq, dev); 1236adfc5217SJeff Kirsher 1237adfc5217SJeff Kirsher /* release phy */ 1238adfc5217SJeff Kirsher if (priv->has_phy) { 1239adfc5217SJeff Kirsher phy_disconnect(priv->phydev); 1240adfc5217SJeff Kirsher priv->phydev = NULL; 1241adfc5217SJeff Kirsher } 1242adfc5217SJeff Kirsher 1243adfc5217SJeff Kirsher return 0; 1244adfc5217SJeff Kirsher } 1245adfc5217SJeff Kirsher 1246adfc5217SJeff Kirsher /* 1247adfc5217SJeff Kirsher * ethtool callbacks 1248adfc5217SJeff Kirsher */ 1249adfc5217SJeff Kirsher struct bcm_enet_stats { 1250adfc5217SJeff Kirsher char stat_string[ETH_GSTRING_LEN]; 1251adfc5217SJeff Kirsher int sizeof_stat; 1252adfc5217SJeff Kirsher int stat_offset; 1253adfc5217SJeff Kirsher int mib_reg; 1254adfc5217SJeff Kirsher }; 1255adfc5217SJeff Kirsher 1256adfc5217SJeff Kirsher #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \ 1257adfc5217SJeff Kirsher offsetof(struct bcm_enet_priv, m) 1258adfc5217SJeff Kirsher #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \ 1259adfc5217SJeff Kirsher offsetof(struct net_device_stats, m) 1260adfc5217SJeff Kirsher 1261adfc5217SJeff Kirsher static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = { 1262adfc5217SJeff Kirsher { "rx_packets", DEV_STAT(rx_packets), -1 }, 1263adfc5217SJeff Kirsher { "tx_packets", DEV_STAT(tx_packets), -1 }, 1264adfc5217SJeff Kirsher { "rx_bytes", DEV_STAT(rx_bytes), -1 }, 1265adfc5217SJeff Kirsher { "tx_bytes", DEV_STAT(tx_bytes), -1 }, 1266adfc5217SJeff Kirsher { "rx_errors", DEV_STAT(rx_errors), -1 }, 1267adfc5217SJeff Kirsher { "tx_errors", DEV_STAT(tx_errors), -1 }, 1268adfc5217SJeff Kirsher { "rx_dropped", DEV_STAT(rx_dropped), -1 }, 1269adfc5217SJeff Kirsher { "tx_dropped", DEV_STAT(tx_dropped), -1 }, 1270adfc5217SJeff Kirsher 1271adfc5217SJeff Kirsher { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS}, 1272adfc5217SJeff Kirsher { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS }, 1273adfc5217SJeff Kirsher { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST }, 1274adfc5217SJeff Kirsher { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT }, 1275adfc5217SJeff Kirsher { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 }, 1276adfc5217SJeff Kirsher { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 }, 1277adfc5217SJeff Kirsher { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 }, 1278adfc5217SJeff Kirsher { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 }, 1279adfc5217SJeff Kirsher { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 }, 1280adfc5217SJeff Kirsher { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX }, 1281adfc5217SJeff Kirsher { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB }, 1282adfc5217SJeff Kirsher { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR }, 1283adfc5217SJeff Kirsher { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG }, 1284adfc5217SJeff Kirsher { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP }, 1285adfc5217SJeff Kirsher { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN }, 1286adfc5217SJeff Kirsher { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND }, 1287adfc5217SJeff Kirsher { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC }, 1288adfc5217SJeff Kirsher { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN }, 1289adfc5217SJeff Kirsher { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM }, 1290adfc5217SJeff Kirsher { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE }, 1291adfc5217SJeff Kirsher { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL }, 1292adfc5217SJeff Kirsher 1293adfc5217SJeff Kirsher { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS }, 1294adfc5217SJeff Kirsher { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS }, 1295adfc5217SJeff Kirsher { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST }, 1296adfc5217SJeff Kirsher { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT }, 1297adfc5217SJeff Kirsher { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 }, 1298adfc5217SJeff Kirsher { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 }, 1299adfc5217SJeff Kirsher { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 }, 1300adfc5217SJeff Kirsher { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 }, 1301adfc5217SJeff Kirsher { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023}, 1302adfc5217SJeff Kirsher { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX }, 1303adfc5217SJeff Kirsher { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB }, 1304adfc5217SJeff Kirsher { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR }, 1305adfc5217SJeff Kirsher { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG }, 1306adfc5217SJeff Kirsher { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN }, 1307adfc5217SJeff Kirsher { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL }, 1308adfc5217SJeff Kirsher { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL }, 1309adfc5217SJeff Kirsher { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL }, 1310adfc5217SJeff Kirsher { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL }, 1311adfc5217SJeff Kirsher { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE }, 1312adfc5217SJeff Kirsher { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF }, 1313adfc5217SJeff Kirsher { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS }, 1314adfc5217SJeff Kirsher { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE }, 1315adfc5217SJeff Kirsher 1316adfc5217SJeff Kirsher }; 1317adfc5217SJeff Kirsher 13186afc0d7aSTobias Klauser #define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats) 1319adfc5217SJeff Kirsher 1320adfc5217SJeff Kirsher static const u32 unused_mib_regs[] = { 1321adfc5217SJeff Kirsher ETH_MIB_TX_ALL_OCTETS, 1322adfc5217SJeff Kirsher ETH_MIB_TX_ALL_PKTS, 1323adfc5217SJeff Kirsher ETH_MIB_RX_ALL_OCTETS, 1324adfc5217SJeff Kirsher ETH_MIB_RX_ALL_PKTS, 1325adfc5217SJeff Kirsher }; 1326adfc5217SJeff Kirsher 1327adfc5217SJeff Kirsher 1328adfc5217SJeff Kirsher static void bcm_enet_get_drvinfo(struct net_device *netdev, 1329adfc5217SJeff Kirsher struct ethtool_drvinfo *drvinfo) 1330adfc5217SJeff Kirsher { 13317826d43fSJiri Pirko strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver)); 13327826d43fSJiri Pirko strlcpy(drvinfo->version, bcm_enet_driver_version, 13337826d43fSJiri Pirko sizeof(drvinfo->version)); 13347826d43fSJiri Pirko strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); 13357826d43fSJiri Pirko strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info)); 1336adfc5217SJeff Kirsher drvinfo->n_stats = BCM_ENET_STATS_LEN; 1337adfc5217SJeff Kirsher } 1338adfc5217SJeff Kirsher 1339adfc5217SJeff Kirsher static int bcm_enet_get_sset_count(struct net_device *netdev, 1340adfc5217SJeff Kirsher int string_set) 1341adfc5217SJeff Kirsher { 1342adfc5217SJeff Kirsher switch (string_set) { 1343adfc5217SJeff Kirsher case ETH_SS_STATS: 1344adfc5217SJeff Kirsher return BCM_ENET_STATS_LEN; 1345adfc5217SJeff Kirsher default: 1346adfc5217SJeff Kirsher return -EINVAL; 1347adfc5217SJeff Kirsher } 1348adfc5217SJeff Kirsher } 1349adfc5217SJeff Kirsher 1350adfc5217SJeff Kirsher static void bcm_enet_get_strings(struct net_device *netdev, 1351adfc5217SJeff Kirsher u32 stringset, u8 *data) 1352adfc5217SJeff Kirsher { 1353adfc5217SJeff Kirsher int i; 1354adfc5217SJeff Kirsher 1355adfc5217SJeff Kirsher switch (stringset) { 1356adfc5217SJeff Kirsher case ETH_SS_STATS: 1357adfc5217SJeff Kirsher for (i = 0; i < BCM_ENET_STATS_LEN; i++) { 1358adfc5217SJeff Kirsher memcpy(data + i * ETH_GSTRING_LEN, 1359adfc5217SJeff Kirsher bcm_enet_gstrings_stats[i].stat_string, 1360adfc5217SJeff Kirsher ETH_GSTRING_LEN); 1361adfc5217SJeff Kirsher } 1362adfc5217SJeff Kirsher break; 1363adfc5217SJeff Kirsher } 1364adfc5217SJeff Kirsher } 1365adfc5217SJeff Kirsher 1366adfc5217SJeff Kirsher static void update_mib_counters(struct bcm_enet_priv *priv) 1367adfc5217SJeff Kirsher { 1368adfc5217SJeff Kirsher int i; 1369adfc5217SJeff Kirsher 1370adfc5217SJeff Kirsher for (i = 0; i < BCM_ENET_STATS_LEN; i++) { 1371adfc5217SJeff Kirsher const struct bcm_enet_stats *s; 1372adfc5217SJeff Kirsher u32 val; 1373adfc5217SJeff Kirsher char *p; 1374adfc5217SJeff Kirsher 1375adfc5217SJeff Kirsher s = &bcm_enet_gstrings_stats[i]; 1376adfc5217SJeff Kirsher if (s->mib_reg == -1) 1377adfc5217SJeff Kirsher continue; 1378adfc5217SJeff Kirsher 1379adfc5217SJeff Kirsher val = enet_readl(priv, ENET_MIB_REG(s->mib_reg)); 1380adfc5217SJeff Kirsher p = (char *)priv + s->stat_offset; 1381adfc5217SJeff Kirsher 1382adfc5217SJeff Kirsher if (s->sizeof_stat == sizeof(u64)) 1383adfc5217SJeff Kirsher *(u64 *)p += val; 1384adfc5217SJeff Kirsher else 1385adfc5217SJeff Kirsher *(u32 *)p += val; 1386adfc5217SJeff Kirsher } 1387adfc5217SJeff Kirsher 1388adfc5217SJeff Kirsher /* also empty unused mib counters to make sure mib counter 1389adfc5217SJeff Kirsher * overflow interrupt is cleared */ 1390adfc5217SJeff Kirsher for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++) 1391adfc5217SJeff Kirsher (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i])); 1392adfc5217SJeff Kirsher } 1393adfc5217SJeff Kirsher 1394adfc5217SJeff Kirsher static void bcm_enet_update_mib_counters_defer(struct work_struct *t) 1395adfc5217SJeff Kirsher { 1396adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1397adfc5217SJeff Kirsher 1398adfc5217SJeff Kirsher priv = container_of(t, struct bcm_enet_priv, mib_update_task); 1399adfc5217SJeff Kirsher mutex_lock(&priv->mib_update_lock); 1400adfc5217SJeff Kirsher update_mib_counters(priv); 1401adfc5217SJeff Kirsher mutex_unlock(&priv->mib_update_lock); 1402adfc5217SJeff Kirsher 1403adfc5217SJeff Kirsher /* reenable mib interrupt */ 1404adfc5217SJeff Kirsher if (netif_running(priv->net_dev)) 1405adfc5217SJeff Kirsher enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); 1406adfc5217SJeff Kirsher } 1407adfc5217SJeff Kirsher 1408adfc5217SJeff Kirsher static void bcm_enet_get_ethtool_stats(struct net_device *netdev, 1409adfc5217SJeff Kirsher struct ethtool_stats *stats, 1410adfc5217SJeff Kirsher u64 *data) 1411adfc5217SJeff Kirsher { 1412adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1413adfc5217SJeff Kirsher int i; 1414adfc5217SJeff Kirsher 1415adfc5217SJeff Kirsher priv = netdev_priv(netdev); 1416adfc5217SJeff Kirsher 1417adfc5217SJeff Kirsher mutex_lock(&priv->mib_update_lock); 1418adfc5217SJeff Kirsher update_mib_counters(priv); 1419adfc5217SJeff Kirsher 1420adfc5217SJeff Kirsher for (i = 0; i < BCM_ENET_STATS_LEN; i++) { 1421adfc5217SJeff Kirsher const struct bcm_enet_stats *s; 1422adfc5217SJeff Kirsher char *p; 1423adfc5217SJeff Kirsher 1424adfc5217SJeff Kirsher s = &bcm_enet_gstrings_stats[i]; 1425adfc5217SJeff Kirsher if (s->mib_reg == -1) 1426adfc5217SJeff Kirsher p = (char *)&netdev->stats; 1427adfc5217SJeff Kirsher else 1428adfc5217SJeff Kirsher p = (char *)priv; 1429adfc5217SJeff Kirsher p += s->stat_offset; 1430adfc5217SJeff Kirsher data[i] = (s->sizeof_stat == sizeof(u64)) ? 1431adfc5217SJeff Kirsher *(u64 *)p : *(u32 *)p; 1432adfc5217SJeff Kirsher } 1433adfc5217SJeff Kirsher mutex_unlock(&priv->mib_update_lock); 1434adfc5217SJeff Kirsher } 1435adfc5217SJeff Kirsher 14367260aac9SMaxime Bizon static int bcm_enet_nway_reset(struct net_device *dev) 14377260aac9SMaxime Bizon { 14387260aac9SMaxime Bizon struct bcm_enet_priv *priv; 14397260aac9SMaxime Bizon 14407260aac9SMaxime Bizon priv = netdev_priv(dev); 14417260aac9SMaxime Bizon if (priv->has_phy) { 14427260aac9SMaxime Bizon if (!priv->phydev) 14437260aac9SMaxime Bizon return -ENODEV; 14447260aac9SMaxime Bizon return genphy_restart_aneg(priv->phydev); 14457260aac9SMaxime Bizon } 14467260aac9SMaxime Bizon 14477260aac9SMaxime Bizon return -EOPNOTSUPP; 14487260aac9SMaxime Bizon } 14497260aac9SMaxime Bizon 1450adfc5217SJeff Kirsher static int bcm_enet_get_settings(struct net_device *dev, 1451adfc5217SJeff Kirsher struct ethtool_cmd *cmd) 1452adfc5217SJeff Kirsher { 1453adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1454adfc5217SJeff Kirsher 1455adfc5217SJeff Kirsher priv = netdev_priv(dev); 1456adfc5217SJeff Kirsher 1457adfc5217SJeff Kirsher cmd->maxrxpkt = 0; 1458adfc5217SJeff Kirsher cmd->maxtxpkt = 0; 1459adfc5217SJeff Kirsher 1460adfc5217SJeff Kirsher if (priv->has_phy) { 1461adfc5217SJeff Kirsher if (!priv->phydev) 1462adfc5217SJeff Kirsher return -ENODEV; 1463adfc5217SJeff Kirsher return phy_ethtool_gset(priv->phydev, cmd); 1464adfc5217SJeff Kirsher } else { 1465adfc5217SJeff Kirsher cmd->autoneg = 0; 1466adfc5217SJeff Kirsher ethtool_cmd_speed_set(cmd, ((priv->force_speed_100) 1467adfc5217SJeff Kirsher ? SPEED_100 : SPEED_10)); 1468adfc5217SJeff Kirsher cmd->duplex = (priv->force_duplex_full) ? 1469adfc5217SJeff Kirsher DUPLEX_FULL : DUPLEX_HALF; 1470adfc5217SJeff Kirsher cmd->supported = ADVERTISED_10baseT_Half | 1471adfc5217SJeff Kirsher ADVERTISED_10baseT_Full | 1472adfc5217SJeff Kirsher ADVERTISED_100baseT_Half | 1473adfc5217SJeff Kirsher ADVERTISED_100baseT_Full; 1474adfc5217SJeff Kirsher cmd->advertising = 0; 1475adfc5217SJeff Kirsher cmd->port = PORT_MII; 1476adfc5217SJeff Kirsher cmd->transceiver = XCVR_EXTERNAL; 1477adfc5217SJeff Kirsher } 1478adfc5217SJeff Kirsher return 0; 1479adfc5217SJeff Kirsher } 1480adfc5217SJeff Kirsher 1481adfc5217SJeff Kirsher static int bcm_enet_set_settings(struct net_device *dev, 1482adfc5217SJeff Kirsher struct ethtool_cmd *cmd) 1483adfc5217SJeff Kirsher { 1484adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1485adfc5217SJeff Kirsher 1486adfc5217SJeff Kirsher priv = netdev_priv(dev); 1487adfc5217SJeff Kirsher if (priv->has_phy) { 1488adfc5217SJeff Kirsher if (!priv->phydev) 1489adfc5217SJeff Kirsher return -ENODEV; 1490adfc5217SJeff Kirsher return phy_ethtool_sset(priv->phydev, cmd); 1491adfc5217SJeff Kirsher } else { 1492adfc5217SJeff Kirsher 1493adfc5217SJeff Kirsher if (cmd->autoneg || 1494adfc5217SJeff Kirsher (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) || 1495adfc5217SJeff Kirsher cmd->port != PORT_MII) 1496adfc5217SJeff Kirsher return -EINVAL; 1497adfc5217SJeff Kirsher 1498adfc5217SJeff Kirsher priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0; 1499adfc5217SJeff Kirsher priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0; 1500adfc5217SJeff Kirsher 1501adfc5217SJeff Kirsher if (netif_running(dev)) 1502adfc5217SJeff Kirsher bcm_enet_adjust_link(dev); 1503adfc5217SJeff Kirsher return 0; 1504adfc5217SJeff Kirsher } 1505adfc5217SJeff Kirsher } 1506adfc5217SJeff Kirsher 1507adfc5217SJeff Kirsher static void bcm_enet_get_ringparam(struct net_device *dev, 1508adfc5217SJeff Kirsher struct ethtool_ringparam *ering) 1509adfc5217SJeff Kirsher { 1510adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1511adfc5217SJeff Kirsher 1512adfc5217SJeff Kirsher priv = netdev_priv(dev); 1513adfc5217SJeff Kirsher 1514adfc5217SJeff Kirsher /* rx/tx ring is actually only limited by memory */ 1515adfc5217SJeff Kirsher ering->rx_max_pending = 8192; 1516adfc5217SJeff Kirsher ering->tx_max_pending = 8192; 1517adfc5217SJeff Kirsher ering->rx_pending = priv->rx_ring_size; 1518adfc5217SJeff Kirsher ering->tx_pending = priv->tx_ring_size; 1519adfc5217SJeff Kirsher } 1520adfc5217SJeff Kirsher 1521adfc5217SJeff Kirsher static int bcm_enet_set_ringparam(struct net_device *dev, 1522adfc5217SJeff Kirsher struct ethtool_ringparam *ering) 1523adfc5217SJeff Kirsher { 1524adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1525adfc5217SJeff Kirsher int was_running; 1526adfc5217SJeff Kirsher 1527adfc5217SJeff Kirsher priv = netdev_priv(dev); 1528adfc5217SJeff Kirsher 1529adfc5217SJeff Kirsher was_running = 0; 1530adfc5217SJeff Kirsher if (netif_running(dev)) { 1531adfc5217SJeff Kirsher bcm_enet_stop(dev); 1532adfc5217SJeff Kirsher was_running = 1; 1533adfc5217SJeff Kirsher } 1534adfc5217SJeff Kirsher 1535adfc5217SJeff Kirsher priv->rx_ring_size = ering->rx_pending; 1536adfc5217SJeff Kirsher priv->tx_ring_size = ering->tx_pending; 1537adfc5217SJeff Kirsher 1538adfc5217SJeff Kirsher if (was_running) { 1539adfc5217SJeff Kirsher int err; 1540adfc5217SJeff Kirsher 1541adfc5217SJeff Kirsher err = bcm_enet_open(dev); 1542adfc5217SJeff Kirsher if (err) 1543adfc5217SJeff Kirsher dev_close(dev); 1544adfc5217SJeff Kirsher else 1545adfc5217SJeff Kirsher bcm_enet_set_multicast_list(dev); 1546adfc5217SJeff Kirsher } 1547adfc5217SJeff Kirsher return 0; 1548adfc5217SJeff Kirsher } 1549adfc5217SJeff Kirsher 1550adfc5217SJeff Kirsher static void bcm_enet_get_pauseparam(struct net_device *dev, 1551adfc5217SJeff Kirsher struct ethtool_pauseparam *ecmd) 1552adfc5217SJeff Kirsher { 1553adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1554adfc5217SJeff Kirsher 1555adfc5217SJeff Kirsher priv = netdev_priv(dev); 1556adfc5217SJeff Kirsher ecmd->autoneg = priv->pause_auto; 1557adfc5217SJeff Kirsher ecmd->rx_pause = priv->pause_rx; 1558adfc5217SJeff Kirsher ecmd->tx_pause = priv->pause_tx; 1559adfc5217SJeff Kirsher } 1560adfc5217SJeff Kirsher 1561adfc5217SJeff Kirsher static int bcm_enet_set_pauseparam(struct net_device *dev, 1562adfc5217SJeff Kirsher struct ethtool_pauseparam *ecmd) 1563adfc5217SJeff Kirsher { 1564adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1565adfc5217SJeff Kirsher 1566adfc5217SJeff Kirsher priv = netdev_priv(dev); 1567adfc5217SJeff Kirsher 1568adfc5217SJeff Kirsher if (priv->has_phy) { 1569adfc5217SJeff Kirsher if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) { 1570adfc5217SJeff Kirsher /* asymetric pause mode not supported, 1571adfc5217SJeff Kirsher * actually possible but integrated PHY has RO 1572adfc5217SJeff Kirsher * asym_pause bit */ 1573adfc5217SJeff Kirsher return -EINVAL; 1574adfc5217SJeff Kirsher } 1575adfc5217SJeff Kirsher } else { 1576adfc5217SJeff Kirsher /* no pause autoneg on direct mii connection */ 1577adfc5217SJeff Kirsher if (ecmd->autoneg) 1578adfc5217SJeff Kirsher return -EINVAL; 1579adfc5217SJeff Kirsher } 1580adfc5217SJeff Kirsher 1581adfc5217SJeff Kirsher priv->pause_auto = ecmd->autoneg; 1582adfc5217SJeff Kirsher priv->pause_rx = ecmd->rx_pause; 1583adfc5217SJeff Kirsher priv->pause_tx = ecmd->tx_pause; 1584adfc5217SJeff Kirsher 1585adfc5217SJeff Kirsher return 0; 1586adfc5217SJeff Kirsher } 1587adfc5217SJeff Kirsher 15881aff0cbeSstephen hemminger static const struct ethtool_ops bcm_enet_ethtool_ops = { 1589adfc5217SJeff Kirsher .get_strings = bcm_enet_get_strings, 1590adfc5217SJeff Kirsher .get_sset_count = bcm_enet_get_sset_count, 1591adfc5217SJeff Kirsher .get_ethtool_stats = bcm_enet_get_ethtool_stats, 15927260aac9SMaxime Bizon .nway_reset = bcm_enet_nway_reset, 1593adfc5217SJeff Kirsher .get_settings = bcm_enet_get_settings, 1594adfc5217SJeff Kirsher .set_settings = bcm_enet_set_settings, 1595adfc5217SJeff Kirsher .get_drvinfo = bcm_enet_get_drvinfo, 1596adfc5217SJeff Kirsher .get_link = ethtool_op_get_link, 1597adfc5217SJeff Kirsher .get_ringparam = bcm_enet_get_ringparam, 1598adfc5217SJeff Kirsher .set_ringparam = bcm_enet_set_ringparam, 1599adfc5217SJeff Kirsher .get_pauseparam = bcm_enet_get_pauseparam, 1600adfc5217SJeff Kirsher .set_pauseparam = bcm_enet_set_pauseparam, 1601adfc5217SJeff Kirsher }; 1602adfc5217SJeff Kirsher 1603adfc5217SJeff Kirsher static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1604adfc5217SJeff Kirsher { 1605adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1606adfc5217SJeff Kirsher 1607adfc5217SJeff Kirsher priv = netdev_priv(dev); 1608adfc5217SJeff Kirsher if (priv->has_phy) { 1609adfc5217SJeff Kirsher if (!priv->phydev) 1610adfc5217SJeff Kirsher return -ENODEV; 1611adfc5217SJeff Kirsher return phy_mii_ioctl(priv->phydev, rq, cmd); 1612adfc5217SJeff Kirsher } else { 1613adfc5217SJeff Kirsher struct mii_if_info mii; 1614adfc5217SJeff Kirsher 1615adfc5217SJeff Kirsher mii.dev = dev; 1616adfc5217SJeff Kirsher mii.mdio_read = bcm_enet_mdio_read_mii; 1617adfc5217SJeff Kirsher mii.mdio_write = bcm_enet_mdio_write_mii; 1618adfc5217SJeff Kirsher mii.phy_id = 0; 1619adfc5217SJeff Kirsher mii.phy_id_mask = 0x3f; 1620adfc5217SJeff Kirsher mii.reg_num_mask = 0x1f; 1621adfc5217SJeff Kirsher return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); 1622adfc5217SJeff Kirsher } 1623adfc5217SJeff Kirsher } 1624adfc5217SJeff Kirsher 1625adfc5217SJeff Kirsher /* 1626adfc5217SJeff Kirsher * calculate actual hardware mtu 1627adfc5217SJeff Kirsher */ 1628adfc5217SJeff Kirsher static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu) 1629adfc5217SJeff Kirsher { 1630adfc5217SJeff Kirsher int actual_mtu; 1631adfc5217SJeff Kirsher 1632adfc5217SJeff Kirsher actual_mtu = mtu; 1633adfc5217SJeff Kirsher 1634adfc5217SJeff Kirsher /* add ethernet header + vlan tag size */ 1635adfc5217SJeff Kirsher actual_mtu += VLAN_ETH_HLEN; 1636adfc5217SJeff Kirsher 1637adfc5217SJeff Kirsher if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU) 1638adfc5217SJeff Kirsher return -EINVAL; 1639adfc5217SJeff Kirsher 1640adfc5217SJeff Kirsher /* 1641adfc5217SJeff Kirsher * setup maximum size before we get overflow mark in 1642adfc5217SJeff Kirsher * descriptor, note that this will not prevent reception of 1643adfc5217SJeff Kirsher * big frames, they will be split into multiple buffers 1644adfc5217SJeff Kirsher * anyway 1645adfc5217SJeff Kirsher */ 1646adfc5217SJeff Kirsher priv->hw_mtu = actual_mtu; 1647adfc5217SJeff Kirsher 1648adfc5217SJeff Kirsher /* 1649adfc5217SJeff Kirsher * align rx buffer size to dma burst len, account FCS since 1650adfc5217SJeff Kirsher * it's appended 1651adfc5217SJeff Kirsher */ 1652adfc5217SJeff Kirsher priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN, 16536f00a022SMaxime Bizon priv->dma_maxburst * 4); 1654adfc5217SJeff Kirsher return 0; 1655adfc5217SJeff Kirsher } 1656adfc5217SJeff Kirsher 1657adfc5217SJeff Kirsher /* 1658adfc5217SJeff Kirsher * adjust mtu, can't be called while device is running 1659adfc5217SJeff Kirsher */ 1660adfc5217SJeff Kirsher static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu) 1661adfc5217SJeff Kirsher { 1662adfc5217SJeff Kirsher int ret; 1663adfc5217SJeff Kirsher 1664adfc5217SJeff Kirsher if (netif_running(dev)) 1665adfc5217SJeff Kirsher return -EBUSY; 1666adfc5217SJeff Kirsher 1667adfc5217SJeff Kirsher ret = compute_hw_mtu(netdev_priv(dev), new_mtu); 1668adfc5217SJeff Kirsher if (ret) 1669adfc5217SJeff Kirsher return ret; 1670adfc5217SJeff Kirsher dev->mtu = new_mtu; 1671adfc5217SJeff Kirsher return 0; 1672adfc5217SJeff Kirsher } 1673adfc5217SJeff Kirsher 1674adfc5217SJeff Kirsher /* 1675adfc5217SJeff Kirsher * preinit hardware to allow mii operation while device is down 1676adfc5217SJeff Kirsher */ 1677adfc5217SJeff Kirsher static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv) 1678adfc5217SJeff Kirsher { 1679adfc5217SJeff Kirsher u32 val; 1680adfc5217SJeff Kirsher int limit; 1681adfc5217SJeff Kirsher 1682adfc5217SJeff Kirsher /* make sure mac is disabled */ 1683adfc5217SJeff Kirsher bcm_enet_disable_mac(priv); 1684adfc5217SJeff Kirsher 1685adfc5217SJeff Kirsher /* soft reset mac */ 1686adfc5217SJeff Kirsher val = ENET_CTL_SRESET_MASK; 1687adfc5217SJeff Kirsher enet_writel(priv, val, ENET_CTL_REG); 1688adfc5217SJeff Kirsher wmb(); 1689adfc5217SJeff Kirsher 1690adfc5217SJeff Kirsher limit = 1000; 1691adfc5217SJeff Kirsher do { 1692adfc5217SJeff Kirsher val = enet_readl(priv, ENET_CTL_REG); 1693adfc5217SJeff Kirsher if (!(val & ENET_CTL_SRESET_MASK)) 1694adfc5217SJeff Kirsher break; 1695adfc5217SJeff Kirsher udelay(1); 1696adfc5217SJeff Kirsher } while (limit--); 1697adfc5217SJeff Kirsher 1698adfc5217SJeff Kirsher /* select correct mii interface */ 1699adfc5217SJeff Kirsher val = enet_readl(priv, ENET_CTL_REG); 1700adfc5217SJeff Kirsher if (priv->use_external_mii) 1701adfc5217SJeff Kirsher val |= ENET_CTL_EPHYSEL_MASK; 1702adfc5217SJeff Kirsher else 1703adfc5217SJeff Kirsher val &= ~ENET_CTL_EPHYSEL_MASK; 1704adfc5217SJeff Kirsher enet_writel(priv, val, ENET_CTL_REG); 1705adfc5217SJeff Kirsher 1706adfc5217SJeff Kirsher /* turn on mdc clock */ 1707adfc5217SJeff Kirsher enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) | 1708adfc5217SJeff Kirsher ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG); 1709adfc5217SJeff Kirsher 1710adfc5217SJeff Kirsher /* set mib counters to self-clear when read */ 1711adfc5217SJeff Kirsher val = enet_readl(priv, ENET_MIBCTL_REG); 1712adfc5217SJeff Kirsher val |= ENET_MIBCTL_RDCLEAR_MASK; 1713adfc5217SJeff Kirsher enet_writel(priv, val, ENET_MIBCTL_REG); 1714adfc5217SJeff Kirsher } 1715adfc5217SJeff Kirsher 1716adfc5217SJeff Kirsher static const struct net_device_ops bcm_enet_ops = { 1717adfc5217SJeff Kirsher .ndo_open = bcm_enet_open, 1718adfc5217SJeff Kirsher .ndo_stop = bcm_enet_stop, 1719adfc5217SJeff Kirsher .ndo_start_xmit = bcm_enet_start_xmit, 1720adfc5217SJeff Kirsher .ndo_set_mac_address = bcm_enet_set_mac_address, 1721afc4b13dSJiri Pirko .ndo_set_rx_mode = bcm_enet_set_multicast_list, 1722adfc5217SJeff Kirsher .ndo_do_ioctl = bcm_enet_ioctl, 1723adfc5217SJeff Kirsher .ndo_change_mtu = bcm_enet_change_mtu, 1724adfc5217SJeff Kirsher }; 1725adfc5217SJeff Kirsher 1726adfc5217SJeff Kirsher /* 1727adfc5217SJeff Kirsher * allocate netdevice, request register memory and register device. 1728adfc5217SJeff Kirsher */ 1729047fc566SBill Pemberton static int bcm_enet_probe(struct platform_device *pdev) 1730adfc5217SJeff Kirsher { 1731adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1732adfc5217SJeff Kirsher struct net_device *dev; 1733adfc5217SJeff Kirsher struct bcm63xx_enet_platform_data *pd; 1734adfc5217SJeff Kirsher struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx; 1735adfc5217SJeff Kirsher struct mii_bus *bus; 1736adfc5217SJeff Kirsher const char *clk_name; 1737adfc5217SJeff Kirsher int i, ret; 1738adfc5217SJeff Kirsher 1739adfc5217SJeff Kirsher /* stop if shared driver failed, assume driver->probe will be 1740adfc5217SJeff Kirsher * called in the same order we register devices (correct ?) */ 17410ae99b5fSMaxime Bizon if (!bcm_enet_shared_base[0]) 1742adfc5217SJeff Kirsher return -ENODEV; 1743adfc5217SJeff Kirsher 1744adfc5217SJeff Kirsher res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1745adfc5217SJeff Kirsher res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 1746adfc5217SJeff Kirsher res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2); 1747f607e059SJulia Lawall if (!res_irq || !res_irq_rx || !res_irq_tx) 1748adfc5217SJeff Kirsher return -ENODEV; 1749adfc5217SJeff Kirsher 1750adfc5217SJeff Kirsher ret = 0; 1751adfc5217SJeff Kirsher dev = alloc_etherdev(sizeof(*priv)); 1752adfc5217SJeff Kirsher if (!dev) 1753adfc5217SJeff Kirsher return -ENOMEM; 1754adfc5217SJeff Kirsher priv = netdev_priv(dev); 1755adfc5217SJeff Kirsher 17566f00a022SMaxime Bizon priv->enet_is_sw = false; 17576f00a022SMaxime Bizon priv->dma_maxburst = BCMENET_DMA_MAXBURST; 17586f00a022SMaxime Bizon 1759adfc5217SJeff Kirsher ret = compute_hw_mtu(priv, dev->mtu); 1760adfc5217SJeff Kirsher if (ret) 1761adfc5217SJeff Kirsher goto out; 1762adfc5217SJeff Kirsher 1763f607e059SJulia Lawall res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1764f607e059SJulia Lawall priv->base = devm_ioremap_resource(&pdev->dev, res_mem); 1765f607e059SJulia Lawall if (IS_ERR(priv->base)) { 1766f607e059SJulia Lawall ret = PTR_ERR(priv->base); 1767adfc5217SJeff Kirsher goto out; 1768adfc5217SJeff Kirsher } 1769adfc5217SJeff Kirsher 1770adfc5217SJeff Kirsher dev->irq = priv->irq = res_irq->start; 1771adfc5217SJeff Kirsher priv->irq_rx = res_irq_rx->start; 1772adfc5217SJeff Kirsher priv->irq_tx = res_irq_tx->start; 1773adfc5217SJeff Kirsher priv->mac_id = pdev->id; 1774adfc5217SJeff Kirsher 1775adfc5217SJeff Kirsher /* get rx & tx dma channel id for this mac */ 1776adfc5217SJeff Kirsher if (priv->mac_id == 0) { 1777adfc5217SJeff Kirsher priv->rx_chan = 0; 1778adfc5217SJeff Kirsher priv->tx_chan = 1; 1779adfc5217SJeff Kirsher clk_name = "enet0"; 1780adfc5217SJeff Kirsher } else { 1781adfc5217SJeff Kirsher priv->rx_chan = 2; 1782adfc5217SJeff Kirsher priv->tx_chan = 3; 1783adfc5217SJeff Kirsher clk_name = "enet1"; 1784adfc5217SJeff Kirsher } 1785adfc5217SJeff Kirsher 1786adfc5217SJeff Kirsher priv->mac_clk = clk_get(&pdev->dev, clk_name); 1787adfc5217SJeff Kirsher if (IS_ERR(priv->mac_clk)) { 1788adfc5217SJeff Kirsher ret = PTR_ERR(priv->mac_clk); 17891c03da05SJonas Gorski goto out; 1790adfc5217SJeff Kirsher } 1791624e2d21SJonas Gorski clk_prepare_enable(priv->mac_clk); 1792adfc5217SJeff Kirsher 1793adfc5217SJeff Kirsher /* initialize default and fetch platform data */ 1794adfc5217SJeff Kirsher priv->rx_ring_size = BCMENET_DEF_RX_DESC; 1795adfc5217SJeff Kirsher priv->tx_ring_size = BCMENET_DEF_TX_DESC; 1796adfc5217SJeff Kirsher 1797cf0e7794SJingoo Han pd = dev_get_platdata(&pdev->dev); 1798adfc5217SJeff Kirsher if (pd) { 1799adfc5217SJeff Kirsher memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); 1800adfc5217SJeff Kirsher priv->has_phy = pd->has_phy; 1801adfc5217SJeff Kirsher priv->phy_id = pd->phy_id; 1802adfc5217SJeff Kirsher priv->has_phy_interrupt = pd->has_phy_interrupt; 1803adfc5217SJeff Kirsher priv->phy_interrupt = pd->phy_interrupt; 1804adfc5217SJeff Kirsher priv->use_external_mii = !pd->use_internal_phy; 1805adfc5217SJeff Kirsher priv->pause_auto = pd->pause_auto; 1806adfc5217SJeff Kirsher priv->pause_rx = pd->pause_rx; 1807adfc5217SJeff Kirsher priv->pause_tx = pd->pause_tx; 1808adfc5217SJeff Kirsher priv->force_duplex_full = pd->force_duplex_full; 1809adfc5217SJeff Kirsher priv->force_speed_100 = pd->force_speed_100; 18103dc6475cSFlorian Fainelli priv->dma_chan_en_mask = pd->dma_chan_en_mask; 18113dc6475cSFlorian Fainelli priv->dma_chan_int_mask = pd->dma_chan_int_mask; 18123dc6475cSFlorian Fainelli priv->dma_chan_width = pd->dma_chan_width; 18133dc6475cSFlorian Fainelli priv->dma_has_sram = pd->dma_has_sram; 18143dc6475cSFlorian Fainelli priv->dma_desc_shift = pd->dma_desc_shift; 1815adfc5217SJeff Kirsher } 1816adfc5217SJeff Kirsher 1817adfc5217SJeff Kirsher if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) { 1818adfc5217SJeff Kirsher /* using internal PHY, enable clock */ 1819adfc5217SJeff Kirsher priv->phy_clk = clk_get(&pdev->dev, "ephy"); 1820adfc5217SJeff Kirsher if (IS_ERR(priv->phy_clk)) { 1821adfc5217SJeff Kirsher ret = PTR_ERR(priv->phy_clk); 1822adfc5217SJeff Kirsher priv->phy_clk = NULL; 1823adfc5217SJeff Kirsher goto out_put_clk_mac; 1824adfc5217SJeff Kirsher } 1825624e2d21SJonas Gorski clk_prepare_enable(priv->phy_clk); 1826adfc5217SJeff Kirsher } 1827adfc5217SJeff Kirsher 1828adfc5217SJeff Kirsher /* do minimal hardware init to be able to probe mii bus */ 1829adfc5217SJeff Kirsher bcm_enet_hw_preinit(priv); 1830adfc5217SJeff Kirsher 1831adfc5217SJeff Kirsher /* MII bus registration */ 1832adfc5217SJeff Kirsher if (priv->has_phy) { 1833adfc5217SJeff Kirsher 1834adfc5217SJeff Kirsher priv->mii_bus = mdiobus_alloc(); 1835adfc5217SJeff Kirsher if (!priv->mii_bus) { 1836adfc5217SJeff Kirsher ret = -ENOMEM; 1837adfc5217SJeff Kirsher goto out_uninit_hw; 1838adfc5217SJeff Kirsher } 1839adfc5217SJeff Kirsher 1840adfc5217SJeff Kirsher bus = priv->mii_bus; 1841adfc5217SJeff Kirsher bus->name = "bcm63xx_enet MII bus"; 1842adfc5217SJeff Kirsher bus->parent = &pdev->dev; 1843adfc5217SJeff Kirsher bus->priv = priv; 1844adfc5217SJeff Kirsher bus->read = bcm_enet_mdio_read_phylib; 1845adfc5217SJeff Kirsher bus->write = bcm_enet_mdio_write_phylib; 18463e617506SFlorian Fainelli sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id); 1847adfc5217SJeff Kirsher 1848adfc5217SJeff Kirsher /* only probe bus where we think the PHY is, because 1849adfc5217SJeff Kirsher * the mdio read operation return 0 instead of 0xffff 1850adfc5217SJeff Kirsher * if a slave is not present on hw */ 1851adfc5217SJeff Kirsher bus->phy_mask = ~(1 << priv->phy_id); 1852adfc5217SJeff Kirsher 18532a80b5e1SJonas Gorski bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR, 18542a80b5e1SJonas Gorski GFP_KERNEL); 1855adfc5217SJeff Kirsher if (!bus->irq) { 1856adfc5217SJeff Kirsher ret = -ENOMEM; 1857adfc5217SJeff Kirsher goto out_free_mdio; 1858adfc5217SJeff Kirsher } 1859adfc5217SJeff Kirsher 1860adfc5217SJeff Kirsher if (priv->has_phy_interrupt) 1861adfc5217SJeff Kirsher bus->irq[priv->phy_id] = priv->phy_interrupt; 1862adfc5217SJeff Kirsher else 1863adfc5217SJeff Kirsher bus->irq[priv->phy_id] = PHY_POLL; 1864adfc5217SJeff Kirsher 1865adfc5217SJeff Kirsher ret = mdiobus_register(bus); 1866adfc5217SJeff Kirsher if (ret) { 1867adfc5217SJeff Kirsher dev_err(&pdev->dev, "unable to register mdio bus\n"); 1868adfc5217SJeff Kirsher goto out_free_mdio; 1869adfc5217SJeff Kirsher } 1870adfc5217SJeff Kirsher } else { 1871adfc5217SJeff Kirsher 1872adfc5217SJeff Kirsher /* run platform code to initialize PHY device */ 1873adfc5217SJeff Kirsher if (pd->mii_config && 1874adfc5217SJeff Kirsher pd->mii_config(dev, 1, bcm_enet_mdio_read_mii, 1875adfc5217SJeff Kirsher bcm_enet_mdio_write_mii)) { 1876adfc5217SJeff Kirsher dev_err(&pdev->dev, "unable to configure mdio bus\n"); 1877adfc5217SJeff Kirsher goto out_uninit_hw; 1878adfc5217SJeff Kirsher } 1879adfc5217SJeff Kirsher } 1880adfc5217SJeff Kirsher 1881adfc5217SJeff Kirsher spin_lock_init(&priv->rx_lock); 1882adfc5217SJeff Kirsher 1883adfc5217SJeff Kirsher /* init rx timeout (used for oom) */ 1884adfc5217SJeff Kirsher init_timer(&priv->rx_timeout); 1885adfc5217SJeff Kirsher priv->rx_timeout.function = bcm_enet_refill_rx_timer; 1886adfc5217SJeff Kirsher priv->rx_timeout.data = (unsigned long)dev; 1887adfc5217SJeff Kirsher 1888adfc5217SJeff Kirsher /* init the mib update lock&work */ 1889adfc5217SJeff Kirsher mutex_init(&priv->mib_update_lock); 1890adfc5217SJeff Kirsher INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer); 1891adfc5217SJeff Kirsher 1892adfc5217SJeff Kirsher /* zero mib counters */ 1893adfc5217SJeff Kirsher for (i = 0; i < ENET_MIB_REG_COUNT; i++) 1894adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_MIB_REG(i)); 1895adfc5217SJeff Kirsher 1896adfc5217SJeff Kirsher /* register netdevice */ 1897adfc5217SJeff Kirsher dev->netdev_ops = &bcm_enet_ops; 1898adfc5217SJeff Kirsher netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); 1899adfc5217SJeff Kirsher 19007ad24ea4SWilfried Klaebe dev->ethtool_ops = &bcm_enet_ethtool_ops; 1901adfc5217SJeff Kirsher SET_NETDEV_DEV(dev, &pdev->dev); 1902adfc5217SJeff Kirsher 1903adfc5217SJeff Kirsher ret = register_netdev(dev); 1904adfc5217SJeff Kirsher if (ret) 1905adfc5217SJeff Kirsher goto out_unregister_mdio; 1906adfc5217SJeff Kirsher 1907adfc5217SJeff Kirsher netif_carrier_off(dev); 1908adfc5217SJeff Kirsher platform_set_drvdata(pdev, dev); 1909adfc5217SJeff Kirsher priv->pdev = pdev; 1910adfc5217SJeff Kirsher priv->net_dev = dev; 1911adfc5217SJeff Kirsher 1912adfc5217SJeff Kirsher return 0; 1913adfc5217SJeff Kirsher 1914adfc5217SJeff Kirsher out_unregister_mdio: 19152a80b5e1SJonas Gorski if (priv->mii_bus) 1916adfc5217SJeff Kirsher mdiobus_unregister(priv->mii_bus); 1917adfc5217SJeff Kirsher 1918adfc5217SJeff Kirsher out_free_mdio: 1919adfc5217SJeff Kirsher if (priv->mii_bus) 1920adfc5217SJeff Kirsher mdiobus_free(priv->mii_bus); 1921adfc5217SJeff Kirsher 1922adfc5217SJeff Kirsher out_uninit_hw: 1923adfc5217SJeff Kirsher /* turn off mdc clock */ 1924adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_MIISC_REG); 1925adfc5217SJeff Kirsher if (priv->phy_clk) { 1926624e2d21SJonas Gorski clk_disable_unprepare(priv->phy_clk); 1927adfc5217SJeff Kirsher clk_put(priv->phy_clk); 1928adfc5217SJeff Kirsher } 1929adfc5217SJeff Kirsher 1930adfc5217SJeff Kirsher out_put_clk_mac: 1931624e2d21SJonas Gorski clk_disable_unprepare(priv->mac_clk); 1932adfc5217SJeff Kirsher clk_put(priv->mac_clk); 1933adfc5217SJeff Kirsher out: 1934adfc5217SJeff Kirsher free_netdev(dev); 1935adfc5217SJeff Kirsher return ret; 1936adfc5217SJeff Kirsher } 1937adfc5217SJeff Kirsher 1938adfc5217SJeff Kirsher 1939adfc5217SJeff Kirsher /* 1940adfc5217SJeff Kirsher * exit func, stops hardware and unregisters netdevice 1941adfc5217SJeff Kirsher */ 1942047fc566SBill Pemberton static int bcm_enet_remove(struct platform_device *pdev) 1943adfc5217SJeff Kirsher { 1944adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1945adfc5217SJeff Kirsher struct net_device *dev; 1946adfc5217SJeff Kirsher 1947adfc5217SJeff Kirsher /* stop netdevice */ 1948adfc5217SJeff Kirsher dev = platform_get_drvdata(pdev); 1949adfc5217SJeff Kirsher priv = netdev_priv(dev); 1950adfc5217SJeff Kirsher unregister_netdev(dev); 1951adfc5217SJeff Kirsher 1952adfc5217SJeff Kirsher /* turn off mdc clock */ 1953adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_MIISC_REG); 1954adfc5217SJeff Kirsher 1955adfc5217SJeff Kirsher if (priv->has_phy) { 1956adfc5217SJeff Kirsher mdiobus_unregister(priv->mii_bus); 1957adfc5217SJeff Kirsher mdiobus_free(priv->mii_bus); 1958adfc5217SJeff Kirsher } else { 1959adfc5217SJeff Kirsher struct bcm63xx_enet_platform_data *pd; 1960adfc5217SJeff Kirsher 1961cf0e7794SJingoo Han pd = dev_get_platdata(&pdev->dev); 1962adfc5217SJeff Kirsher if (pd && pd->mii_config) 1963adfc5217SJeff Kirsher pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, 1964adfc5217SJeff Kirsher bcm_enet_mdio_write_mii); 1965adfc5217SJeff Kirsher } 1966adfc5217SJeff Kirsher 1967adfc5217SJeff Kirsher /* disable hw block clocks */ 1968adfc5217SJeff Kirsher if (priv->phy_clk) { 1969624e2d21SJonas Gorski clk_disable_unprepare(priv->phy_clk); 1970adfc5217SJeff Kirsher clk_put(priv->phy_clk); 1971adfc5217SJeff Kirsher } 1972624e2d21SJonas Gorski clk_disable_unprepare(priv->mac_clk); 1973adfc5217SJeff Kirsher clk_put(priv->mac_clk); 1974adfc5217SJeff Kirsher 1975adfc5217SJeff Kirsher free_netdev(dev); 1976adfc5217SJeff Kirsher return 0; 1977adfc5217SJeff Kirsher } 1978adfc5217SJeff Kirsher 1979adfc5217SJeff Kirsher struct platform_driver bcm63xx_enet_driver = { 1980adfc5217SJeff Kirsher .probe = bcm_enet_probe, 1981047fc566SBill Pemberton .remove = bcm_enet_remove, 1982adfc5217SJeff Kirsher .driver = { 1983adfc5217SJeff Kirsher .name = "bcm63xx_enet", 1984adfc5217SJeff Kirsher .owner = THIS_MODULE, 1985adfc5217SJeff Kirsher }, 1986adfc5217SJeff Kirsher }; 1987adfc5217SJeff Kirsher 1988adfc5217SJeff Kirsher /* 19896f00a022SMaxime Bizon * switch mii access callbacks 1990adfc5217SJeff Kirsher */ 19916f00a022SMaxime Bizon static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv, 19926f00a022SMaxime Bizon int ext, int phy_id, int location) 19936f00a022SMaxime Bizon { 19946f00a022SMaxime Bizon u32 reg; 19956f00a022SMaxime Bizon int ret; 19966f00a022SMaxime Bizon 19976f00a022SMaxime Bizon spin_lock_bh(&priv->enetsw_mdio_lock); 19986f00a022SMaxime Bizon enetsw_writel(priv, 0, ENETSW_MDIOC_REG); 19996f00a022SMaxime Bizon 20006f00a022SMaxime Bizon reg = ENETSW_MDIOC_RD_MASK | 20016f00a022SMaxime Bizon (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | 20026f00a022SMaxime Bizon (location << ENETSW_MDIOC_REG_SHIFT); 20036f00a022SMaxime Bizon 20046f00a022SMaxime Bizon if (ext) 20056f00a022SMaxime Bizon reg |= ENETSW_MDIOC_EXT_MASK; 20066f00a022SMaxime Bizon 20076f00a022SMaxime Bizon enetsw_writel(priv, reg, ENETSW_MDIOC_REG); 20086f00a022SMaxime Bizon udelay(50); 20096f00a022SMaxime Bizon ret = enetsw_readw(priv, ENETSW_MDIOD_REG); 20106f00a022SMaxime Bizon spin_unlock_bh(&priv->enetsw_mdio_lock); 20116f00a022SMaxime Bizon return ret; 20126f00a022SMaxime Bizon } 20136f00a022SMaxime Bizon 20146f00a022SMaxime Bizon static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv, 20156f00a022SMaxime Bizon int ext, int phy_id, int location, 20166f00a022SMaxime Bizon uint16_t data) 20176f00a022SMaxime Bizon { 20186f00a022SMaxime Bizon u32 reg; 20196f00a022SMaxime Bizon 20206f00a022SMaxime Bizon spin_lock_bh(&priv->enetsw_mdio_lock); 20216f00a022SMaxime Bizon enetsw_writel(priv, 0, ENETSW_MDIOC_REG); 20226f00a022SMaxime Bizon 20236f00a022SMaxime Bizon reg = ENETSW_MDIOC_WR_MASK | 20246f00a022SMaxime Bizon (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | 20256f00a022SMaxime Bizon (location << ENETSW_MDIOC_REG_SHIFT); 20266f00a022SMaxime Bizon 20276f00a022SMaxime Bizon if (ext) 20286f00a022SMaxime Bizon reg |= ENETSW_MDIOC_EXT_MASK; 20296f00a022SMaxime Bizon 20306f00a022SMaxime Bizon reg |= data; 20316f00a022SMaxime Bizon 20326f00a022SMaxime Bizon enetsw_writel(priv, reg, ENETSW_MDIOC_REG); 20336f00a022SMaxime Bizon udelay(50); 20346f00a022SMaxime Bizon spin_unlock_bh(&priv->enetsw_mdio_lock); 20356f00a022SMaxime Bizon } 20366f00a022SMaxime Bizon 20376f00a022SMaxime Bizon static inline int bcm_enet_port_is_rgmii(int portid) 20386f00a022SMaxime Bizon { 20396f00a022SMaxime Bizon return portid >= ENETSW_RGMII_PORT0; 20406f00a022SMaxime Bizon } 20416f00a022SMaxime Bizon 20426f00a022SMaxime Bizon /* 20436f00a022SMaxime Bizon * enet sw PHY polling 20446f00a022SMaxime Bizon */ 20456f00a022SMaxime Bizon static void swphy_poll_timer(unsigned long data) 20466f00a022SMaxime Bizon { 20476f00a022SMaxime Bizon struct bcm_enet_priv *priv = (struct bcm_enet_priv *)data; 20486f00a022SMaxime Bizon unsigned int i; 20496f00a022SMaxime Bizon 20506f00a022SMaxime Bizon for (i = 0; i < priv->num_ports; i++) { 20516f00a022SMaxime Bizon struct bcm63xx_enetsw_port *port; 2052aebd9947SSimon Arlott int val, j, up, advertise, lpa, speed, duplex, media; 20536f00a022SMaxime Bizon int external_phy = bcm_enet_port_is_rgmii(i); 20546f00a022SMaxime Bizon u8 override; 20556f00a022SMaxime Bizon 20566f00a022SMaxime Bizon port = &priv->used_ports[i]; 20576f00a022SMaxime Bizon if (!port->used) 20586f00a022SMaxime Bizon continue; 20596f00a022SMaxime Bizon 20606f00a022SMaxime Bizon if (port->bypass_link) 20616f00a022SMaxime Bizon continue; 20626f00a022SMaxime Bizon 20636f00a022SMaxime Bizon /* dummy read to clear */ 20646f00a022SMaxime Bizon for (j = 0; j < 2; j++) 20656f00a022SMaxime Bizon val = bcmenet_sw_mdio_read(priv, external_phy, 20666f00a022SMaxime Bizon port->phy_id, MII_BMSR); 20676f00a022SMaxime Bizon 20686f00a022SMaxime Bizon if (val == 0xffff) 20696f00a022SMaxime Bizon continue; 20706f00a022SMaxime Bizon 20716f00a022SMaxime Bizon up = (val & BMSR_LSTATUS) ? 1 : 0; 20726f00a022SMaxime Bizon if (!(up ^ priv->sw_port_link[i])) 20736f00a022SMaxime Bizon continue; 20746f00a022SMaxime Bizon 20756f00a022SMaxime Bizon priv->sw_port_link[i] = up; 20766f00a022SMaxime Bizon 20776f00a022SMaxime Bizon /* link changed */ 20786f00a022SMaxime Bizon if (!up) { 20796f00a022SMaxime Bizon dev_info(&priv->pdev->dev, "link DOWN on %s\n", 20806f00a022SMaxime Bizon port->name); 20816f00a022SMaxime Bizon enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, 20826f00a022SMaxime Bizon ENETSW_PORTOV_REG(i)); 20836f00a022SMaxime Bizon enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | 20846f00a022SMaxime Bizon ENETSW_PTCTRL_TXDIS_MASK, 20856f00a022SMaxime Bizon ENETSW_PTCTRL_REG(i)); 20866f00a022SMaxime Bizon continue; 20876f00a022SMaxime Bizon } 20886f00a022SMaxime Bizon 20896f00a022SMaxime Bizon advertise = bcmenet_sw_mdio_read(priv, external_phy, 20906f00a022SMaxime Bizon port->phy_id, MII_ADVERTISE); 20916f00a022SMaxime Bizon 20926f00a022SMaxime Bizon lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, 20936f00a022SMaxime Bizon MII_LPA); 20946f00a022SMaxime Bizon 20956f00a022SMaxime Bizon /* figure out media and duplex from advertise and LPA values */ 20966f00a022SMaxime Bizon media = mii_nway_result(lpa & advertise); 20976f00a022SMaxime Bizon duplex = (media & ADVERTISE_FULL) ? 1 : 0; 20986f00a022SMaxime Bizon 20996f00a022SMaxime Bizon if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) 21006f00a022SMaxime Bizon speed = 100; 21016f00a022SMaxime Bizon else 21026f00a022SMaxime Bizon speed = 10; 2103aebd9947SSimon Arlott 2104aebd9947SSimon Arlott if (val & BMSR_ESTATEN) { 2105aebd9947SSimon Arlott advertise = bcmenet_sw_mdio_read(priv, external_phy, 2106aebd9947SSimon Arlott port->phy_id, MII_CTRL1000); 2107aebd9947SSimon Arlott 2108aebd9947SSimon Arlott lpa = bcmenet_sw_mdio_read(priv, external_phy, 2109aebd9947SSimon Arlott port->phy_id, MII_STAT1000); 2110aebd9947SSimon Arlott 2111aebd9947SSimon Arlott if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF) 2112aebd9947SSimon Arlott && lpa & (LPA_1000FULL | LPA_1000HALF)) { 2113aebd9947SSimon Arlott speed = 1000; 2114aebd9947SSimon Arlott duplex = (lpa & LPA_1000FULL); 2115aebd9947SSimon Arlott } 21166f00a022SMaxime Bizon } 21176f00a022SMaxime Bizon 21186f00a022SMaxime Bizon dev_info(&priv->pdev->dev, 21196f00a022SMaxime Bizon "link UP on %s, %dMbps, %s-duplex\n", 21206f00a022SMaxime Bizon port->name, speed, duplex ? "full" : "half"); 21216f00a022SMaxime Bizon 21226f00a022SMaxime Bizon override = ENETSW_PORTOV_ENABLE_MASK | 21236f00a022SMaxime Bizon ENETSW_PORTOV_LINKUP_MASK; 21246f00a022SMaxime Bizon 21256f00a022SMaxime Bizon if (speed == 1000) 21266f00a022SMaxime Bizon override |= ENETSW_IMPOV_1000_MASK; 21276f00a022SMaxime Bizon else if (speed == 100) 21286f00a022SMaxime Bizon override |= ENETSW_IMPOV_100_MASK; 21296f00a022SMaxime Bizon if (duplex) 21306f00a022SMaxime Bizon override |= ENETSW_IMPOV_FDX_MASK; 21316f00a022SMaxime Bizon 21326f00a022SMaxime Bizon enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i)); 21336f00a022SMaxime Bizon enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i)); 21346f00a022SMaxime Bizon } 21356f00a022SMaxime Bizon 21366f00a022SMaxime Bizon priv->swphy_poll.expires = jiffies + HZ; 21376f00a022SMaxime Bizon add_timer(&priv->swphy_poll); 21386f00a022SMaxime Bizon } 21396f00a022SMaxime Bizon 21406f00a022SMaxime Bizon /* 21416f00a022SMaxime Bizon * open callback, allocate dma rings & buffers and start rx operation 21426f00a022SMaxime Bizon */ 21436f00a022SMaxime Bizon static int bcm_enetsw_open(struct net_device *dev) 21446f00a022SMaxime Bizon { 21456f00a022SMaxime Bizon struct bcm_enet_priv *priv; 21466f00a022SMaxime Bizon struct device *kdev; 21476f00a022SMaxime Bizon int i, ret; 21486f00a022SMaxime Bizon unsigned int size; 21496f00a022SMaxime Bizon void *p; 21506f00a022SMaxime Bizon u32 val; 21516f00a022SMaxime Bizon 21526f00a022SMaxime Bizon priv = netdev_priv(dev); 21536f00a022SMaxime Bizon kdev = &priv->pdev->dev; 21546f00a022SMaxime Bizon 21556f00a022SMaxime Bizon /* mask all interrupts and request them */ 21563dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 21573dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 21586f00a022SMaxime Bizon 21596f00a022SMaxime Bizon ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 2160df9f1b9fSMichael Opdenacker 0, dev->name, dev); 21616f00a022SMaxime Bizon if (ret) 21626f00a022SMaxime Bizon goto out_freeirq; 21636f00a022SMaxime Bizon 21646f00a022SMaxime Bizon if (priv->irq_tx != -1) { 21656f00a022SMaxime Bizon ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, 2166df9f1b9fSMichael Opdenacker 0, dev->name, dev); 21676f00a022SMaxime Bizon if (ret) 21686f00a022SMaxime Bizon goto out_freeirq_rx; 21696f00a022SMaxime Bizon } 21706f00a022SMaxime Bizon 21716f00a022SMaxime Bizon /* allocate rx dma ring */ 21726f00a022SMaxime Bizon size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 21736f00a022SMaxime Bizon p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 21746f00a022SMaxime Bizon if (!p) { 21756f00a022SMaxime Bizon dev_err(kdev, "cannot allocate rx ring %u\n", size); 21766f00a022SMaxime Bizon ret = -ENOMEM; 21776f00a022SMaxime Bizon goto out_freeirq_tx; 21786f00a022SMaxime Bizon } 21796f00a022SMaxime Bizon 21806f00a022SMaxime Bizon memset(p, 0, size); 21816f00a022SMaxime Bizon priv->rx_desc_alloc_size = size; 21826f00a022SMaxime Bizon priv->rx_desc_cpu = p; 21836f00a022SMaxime Bizon 21846f00a022SMaxime Bizon /* allocate tx dma ring */ 21856f00a022SMaxime Bizon size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 21866f00a022SMaxime Bizon p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 21876f00a022SMaxime Bizon if (!p) { 21886f00a022SMaxime Bizon dev_err(kdev, "cannot allocate tx ring\n"); 21896f00a022SMaxime Bizon ret = -ENOMEM; 21906f00a022SMaxime Bizon goto out_free_rx_ring; 21916f00a022SMaxime Bizon } 21926f00a022SMaxime Bizon 21936f00a022SMaxime Bizon memset(p, 0, size); 21946f00a022SMaxime Bizon priv->tx_desc_alloc_size = size; 21956f00a022SMaxime Bizon priv->tx_desc_cpu = p; 21966f00a022SMaxime Bizon 21976f00a022SMaxime Bizon priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size, 21986f00a022SMaxime Bizon GFP_KERNEL); 21996f00a022SMaxime Bizon if (!priv->tx_skb) { 22006f00a022SMaxime Bizon dev_err(kdev, "cannot allocate rx skb queue\n"); 22016f00a022SMaxime Bizon ret = -ENOMEM; 22026f00a022SMaxime Bizon goto out_free_tx_ring; 22036f00a022SMaxime Bizon } 22046f00a022SMaxime Bizon 22056f00a022SMaxime Bizon priv->tx_desc_count = priv->tx_ring_size; 22066f00a022SMaxime Bizon priv->tx_dirty_desc = 0; 22076f00a022SMaxime Bizon priv->tx_curr_desc = 0; 22086f00a022SMaxime Bizon spin_lock_init(&priv->tx_lock); 22096f00a022SMaxime Bizon 22106f00a022SMaxime Bizon /* init & fill rx ring with skbs */ 22116f00a022SMaxime Bizon priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size, 22126f00a022SMaxime Bizon GFP_KERNEL); 22136f00a022SMaxime Bizon if (!priv->rx_skb) { 22146f00a022SMaxime Bizon dev_err(kdev, "cannot allocate rx skb queue\n"); 22156f00a022SMaxime Bizon ret = -ENOMEM; 22166f00a022SMaxime Bizon goto out_free_tx_skb; 22176f00a022SMaxime Bizon } 22186f00a022SMaxime Bizon 22196f00a022SMaxime Bizon priv->rx_desc_count = 0; 22206f00a022SMaxime Bizon priv->rx_dirty_desc = 0; 22216f00a022SMaxime Bizon priv->rx_curr_desc = 0; 22226f00a022SMaxime Bizon 22236f00a022SMaxime Bizon /* disable all ports */ 22246f00a022SMaxime Bizon for (i = 0; i < priv->num_ports; i++) { 22256f00a022SMaxime Bizon enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, 22266f00a022SMaxime Bizon ENETSW_PORTOV_REG(i)); 22276f00a022SMaxime Bizon enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | 22286f00a022SMaxime Bizon ENETSW_PTCTRL_TXDIS_MASK, 22296f00a022SMaxime Bizon ENETSW_PTCTRL_REG(i)); 22306f00a022SMaxime Bizon 22316f00a022SMaxime Bizon priv->sw_port_link[i] = 0; 22326f00a022SMaxime Bizon } 22336f00a022SMaxime Bizon 22346f00a022SMaxime Bizon /* reset mib */ 22356f00a022SMaxime Bizon val = enetsw_readb(priv, ENETSW_GMCR_REG); 22366f00a022SMaxime Bizon val |= ENETSW_GMCR_RST_MIB_MASK; 22376f00a022SMaxime Bizon enetsw_writeb(priv, val, ENETSW_GMCR_REG); 22386f00a022SMaxime Bizon mdelay(1); 22396f00a022SMaxime Bizon val &= ~ENETSW_GMCR_RST_MIB_MASK; 22406f00a022SMaxime Bizon enetsw_writeb(priv, val, ENETSW_GMCR_REG); 22416f00a022SMaxime Bizon mdelay(1); 22426f00a022SMaxime Bizon 22436f00a022SMaxime Bizon /* force CPU port state */ 22446f00a022SMaxime Bizon val = enetsw_readb(priv, ENETSW_IMPOV_REG); 22456f00a022SMaxime Bizon val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK; 22466f00a022SMaxime Bizon enetsw_writeb(priv, val, ENETSW_IMPOV_REG); 22476f00a022SMaxime Bizon 22486f00a022SMaxime Bizon /* enable switch forward engine */ 22496f00a022SMaxime Bizon val = enetsw_readb(priv, ENETSW_SWMODE_REG); 22506f00a022SMaxime Bizon val |= ENETSW_SWMODE_FWD_EN_MASK; 22516f00a022SMaxime Bizon enetsw_writeb(priv, val, ENETSW_SWMODE_REG); 22526f00a022SMaxime Bizon 22536f00a022SMaxime Bizon /* enable jumbo on all ports */ 22546f00a022SMaxime Bizon enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG); 22556f00a022SMaxime Bizon enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG); 22566f00a022SMaxime Bizon 22576f00a022SMaxime Bizon /* initialize flow control buffer allocation */ 22586f00a022SMaxime Bizon enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, 22596f00a022SMaxime Bizon ENETDMA_BUFALLOC_REG(priv->rx_chan)); 22606f00a022SMaxime Bizon 22616f00a022SMaxime Bizon if (bcm_enet_refill_rx(dev)) { 22626f00a022SMaxime Bizon dev_err(kdev, "cannot allocate rx skb queue\n"); 22636f00a022SMaxime Bizon ret = -ENOMEM; 22646f00a022SMaxime Bizon goto out; 22656f00a022SMaxime Bizon } 22666f00a022SMaxime Bizon 22676f00a022SMaxime Bizon /* write rx & tx ring addresses */ 22686f00a022SMaxime Bizon enet_dmas_writel(priv, priv->rx_desc_dma, 22693dc6475cSFlorian Fainelli ENETDMAS_RSTART_REG, priv->rx_chan); 22706f00a022SMaxime Bizon enet_dmas_writel(priv, priv->tx_desc_dma, 22713dc6475cSFlorian Fainelli ENETDMAS_RSTART_REG, priv->tx_chan); 22726f00a022SMaxime Bizon 22736f00a022SMaxime Bizon /* clear remaining state ram for rx & tx channel */ 22743dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); 22753dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); 22763dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); 22773dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); 22783dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); 22793dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); 22806f00a022SMaxime Bizon 22816f00a022SMaxime Bizon /* set dma maximum burst len */ 22826f00a022SMaxime Bizon enet_dmac_writel(priv, priv->dma_maxburst, 22833dc6475cSFlorian Fainelli ENETDMAC_MAXBURST, priv->rx_chan); 22846f00a022SMaxime Bizon enet_dmac_writel(priv, priv->dma_maxburst, 22853dc6475cSFlorian Fainelli ENETDMAC_MAXBURST, priv->tx_chan); 22866f00a022SMaxime Bizon 22876f00a022SMaxime Bizon /* set flow control low/high threshold to 1/3 / 2/3 */ 22886f00a022SMaxime Bizon val = priv->rx_ring_size / 3; 22896f00a022SMaxime Bizon enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); 22906f00a022SMaxime Bizon val = (priv->rx_ring_size * 2) / 3; 22916f00a022SMaxime Bizon enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); 22926f00a022SMaxime Bizon 22936f00a022SMaxime Bizon /* all set, enable mac and interrupts, start dma engine and 22946f00a022SMaxime Bizon * kick rx dma channel 22956f00a022SMaxime Bizon */ 22966f00a022SMaxime Bizon wmb(); 22976f00a022SMaxime Bizon enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); 22986f00a022SMaxime Bizon enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, 22993dc6475cSFlorian Fainelli ENETDMAC_CHANCFG, priv->rx_chan); 23006f00a022SMaxime Bizon 23016f00a022SMaxime Bizon /* watch "packet transferred" interrupt in rx and tx */ 23026f00a022SMaxime Bizon enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 23033dc6475cSFlorian Fainelli ENETDMAC_IR, priv->rx_chan); 23046f00a022SMaxime Bizon enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 23053dc6475cSFlorian Fainelli ENETDMAC_IR, priv->tx_chan); 23066f00a022SMaxime Bizon 23076f00a022SMaxime Bizon /* make sure we enable napi before rx interrupt */ 23086f00a022SMaxime Bizon napi_enable(&priv->napi); 23096f00a022SMaxime Bizon 23106f00a022SMaxime Bizon enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 23113dc6475cSFlorian Fainelli ENETDMAC_IRMASK, priv->rx_chan); 23126f00a022SMaxime Bizon enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 23133dc6475cSFlorian Fainelli ENETDMAC_IRMASK, priv->tx_chan); 23146f00a022SMaxime Bizon 23156f00a022SMaxime Bizon netif_carrier_on(dev); 23166f00a022SMaxime Bizon netif_start_queue(dev); 23176f00a022SMaxime Bizon 23186f00a022SMaxime Bizon /* apply override config for bypass_link ports here. */ 23196f00a022SMaxime Bizon for (i = 0; i < priv->num_ports; i++) { 23206f00a022SMaxime Bizon struct bcm63xx_enetsw_port *port; 23216f00a022SMaxime Bizon u8 override; 23226f00a022SMaxime Bizon port = &priv->used_ports[i]; 23236f00a022SMaxime Bizon if (!port->used) 23246f00a022SMaxime Bizon continue; 23256f00a022SMaxime Bizon 23266f00a022SMaxime Bizon if (!port->bypass_link) 23276f00a022SMaxime Bizon continue; 23286f00a022SMaxime Bizon 23296f00a022SMaxime Bizon override = ENETSW_PORTOV_ENABLE_MASK | 23306f00a022SMaxime Bizon ENETSW_PORTOV_LINKUP_MASK; 23316f00a022SMaxime Bizon 23326f00a022SMaxime Bizon switch (port->force_speed) { 23336f00a022SMaxime Bizon case 1000: 23346f00a022SMaxime Bizon override |= ENETSW_IMPOV_1000_MASK; 23356f00a022SMaxime Bizon break; 23366f00a022SMaxime Bizon case 100: 23376f00a022SMaxime Bizon override |= ENETSW_IMPOV_100_MASK; 23386f00a022SMaxime Bizon break; 23396f00a022SMaxime Bizon case 10: 23406f00a022SMaxime Bizon break; 23416f00a022SMaxime Bizon default: 23426f00a022SMaxime Bizon pr_warn("invalid forced speed on port %s: assume 10\n", 23436f00a022SMaxime Bizon port->name); 23446f00a022SMaxime Bizon break; 23456f00a022SMaxime Bizon } 23466f00a022SMaxime Bizon 23476f00a022SMaxime Bizon if (port->force_duplex_full) 23486f00a022SMaxime Bizon override |= ENETSW_IMPOV_FDX_MASK; 23496f00a022SMaxime Bizon 23506f00a022SMaxime Bizon 23516f00a022SMaxime Bizon enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i)); 23526f00a022SMaxime Bizon enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i)); 23536f00a022SMaxime Bizon } 23546f00a022SMaxime Bizon 23556f00a022SMaxime Bizon /* start phy polling timer */ 23566f00a022SMaxime Bizon init_timer(&priv->swphy_poll); 23576f00a022SMaxime Bizon priv->swphy_poll.function = swphy_poll_timer; 23586f00a022SMaxime Bizon priv->swphy_poll.data = (unsigned long)priv; 23596f00a022SMaxime Bizon priv->swphy_poll.expires = jiffies; 23606f00a022SMaxime Bizon add_timer(&priv->swphy_poll); 23616f00a022SMaxime Bizon return 0; 23626f00a022SMaxime Bizon 23636f00a022SMaxime Bizon out: 23646f00a022SMaxime Bizon for (i = 0; i < priv->rx_ring_size; i++) { 23656f00a022SMaxime Bizon struct bcm_enet_desc *desc; 23666f00a022SMaxime Bizon 23676f00a022SMaxime Bizon if (!priv->rx_skb[i]) 23686f00a022SMaxime Bizon continue; 23696f00a022SMaxime Bizon 23706f00a022SMaxime Bizon desc = &priv->rx_desc_cpu[i]; 23716f00a022SMaxime Bizon dma_unmap_single(kdev, desc->address, priv->rx_skb_size, 23726f00a022SMaxime Bizon DMA_FROM_DEVICE); 23736f00a022SMaxime Bizon kfree_skb(priv->rx_skb[i]); 23746f00a022SMaxime Bizon } 23756f00a022SMaxime Bizon kfree(priv->rx_skb); 23766f00a022SMaxime Bizon 23776f00a022SMaxime Bizon out_free_tx_skb: 23786f00a022SMaxime Bizon kfree(priv->tx_skb); 23796f00a022SMaxime Bizon 23806f00a022SMaxime Bizon out_free_tx_ring: 23816f00a022SMaxime Bizon dma_free_coherent(kdev, priv->tx_desc_alloc_size, 23826f00a022SMaxime Bizon priv->tx_desc_cpu, priv->tx_desc_dma); 23836f00a022SMaxime Bizon 23846f00a022SMaxime Bizon out_free_rx_ring: 23856f00a022SMaxime Bizon dma_free_coherent(kdev, priv->rx_desc_alloc_size, 23866f00a022SMaxime Bizon priv->rx_desc_cpu, priv->rx_desc_dma); 23876f00a022SMaxime Bizon 23886f00a022SMaxime Bizon out_freeirq_tx: 23896f00a022SMaxime Bizon if (priv->irq_tx != -1) 23906f00a022SMaxime Bizon free_irq(priv->irq_tx, dev); 23916f00a022SMaxime Bizon 23926f00a022SMaxime Bizon out_freeirq_rx: 23936f00a022SMaxime Bizon free_irq(priv->irq_rx, dev); 23946f00a022SMaxime Bizon 23956f00a022SMaxime Bizon out_freeirq: 23966f00a022SMaxime Bizon return ret; 23976f00a022SMaxime Bizon } 23986f00a022SMaxime Bizon 23996f00a022SMaxime Bizon /* stop callback */ 24006f00a022SMaxime Bizon static int bcm_enetsw_stop(struct net_device *dev) 24016f00a022SMaxime Bizon { 24026f00a022SMaxime Bizon struct bcm_enet_priv *priv; 24036f00a022SMaxime Bizon struct device *kdev; 24046f00a022SMaxime Bizon int i; 24056f00a022SMaxime Bizon 24066f00a022SMaxime Bizon priv = netdev_priv(dev); 24076f00a022SMaxime Bizon kdev = &priv->pdev->dev; 24086f00a022SMaxime Bizon 24096f00a022SMaxime Bizon del_timer_sync(&priv->swphy_poll); 24106f00a022SMaxime Bizon netif_stop_queue(dev); 24116f00a022SMaxime Bizon napi_disable(&priv->napi); 24126f00a022SMaxime Bizon del_timer_sync(&priv->rx_timeout); 24136f00a022SMaxime Bizon 24146f00a022SMaxime Bizon /* mask all interrupts */ 24153dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 24163dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 24176f00a022SMaxime Bizon 24186f00a022SMaxime Bizon /* disable dma & mac */ 24196f00a022SMaxime Bizon bcm_enet_disable_dma(priv, priv->tx_chan); 24206f00a022SMaxime Bizon bcm_enet_disable_dma(priv, priv->rx_chan); 24216f00a022SMaxime Bizon 24226f00a022SMaxime Bizon /* force reclaim of all tx buffers */ 24236f00a022SMaxime Bizon bcm_enet_tx_reclaim(dev, 1); 24246f00a022SMaxime Bizon 24256f00a022SMaxime Bizon /* free the rx skb ring */ 24266f00a022SMaxime Bizon for (i = 0; i < priv->rx_ring_size; i++) { 24276f00a022SMaxime Bizon struct bcm_enet_desc *desc; 24286f00a022SMaxime Bizon 24296f00a022SMaxime Bizon if (!priv->rx_skb[i]) 24306f00a022SMaxime Bizon continue; 24316f00a022SMaxime Bizon 24326f00a022SMaxime Bizon desc = &priv->rx_desc_cpu[i]; 24336f00a022SMaxime Bizon dma_unmap_single(kdev, desc->address, priv->rx_skb_size, 24346f00a022SMaxime Bizon DMA_FROM_DEVICE); 24356f00a022SMaxime Bizon kfree_skb(priv->rx_skb[i]); 24366f00a022SMaxime Bizon } 24376f00a022SMaxime Bizon 24386f00a022SMaxime Bizon /* free remaining allocated memory */ 24396f00a022SMaxime Bizon kfree(priv->rx_skb); 24406f00a022SMaxime Bizon kfree(priv->tx_skb); 24416f00a022SMaxime Bizon dma_free_coherent(kdev, priv->rx_desc_alloc_size, 24426f00a022SMaxime Bizon priv->rx_desc_cpu, priv->rx_desc_dma); 24436f00a022SMaxime Bizon dma_free_coherent(kdev, priv->tx_desc_alloc_size, 24446f00a022SMaxime Bizon priv->tx_desc_cpu, priv->tx_desc_dma); 24456f00a022SMaxime Bizon if (priv->irq_tx != -1) 24466f00a022SMaxime Bizon free_irq(priv->irq_tx, dev); 24476f00a022SMaxime Bizon free_irq(priv->irq_rx, dev); 24486f00a022SMaxime Bizon 24496f00a022SMaxime Bizon return 0; 24506f00a022SMaxime Bizon } 24516f00a022SMaxime Bizon 24526f00a022SMaxime Bizon /* try to sort out phy external status by walking the used_port field 24536f00a022SMaxime Bizon * in the bcm_enet_priv structure. in case the phy address is not 24546f00a022SMaxime Bizon * assigned to any physical port on the switch, assume it is external 24556f00a022SMaxime Bizon * (and yell at the user). 24566f00a022SMaxime Bizon */ 24576f00a022SMaxime Bizon static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id) 24586f00a022SMaxime Bizon { 24596f00a022SMaxime Bizon int i; 24606f00a022SMaxime Bizon 24616f00a022SMaxime Bizon for (i = 0; i < priv->num_ports; ++i) { 24626f00a022SMaxime Bizon if (!priv->used_ports[i].used) 24636f00a022SMaxime Bizon continue; 24646f00a022SMaxime Bizon if (priv->used_ports[i].phy_id == phy_id) 24656f00a022SMaxime Bizon return bcm_enet_port_is_rgmii(i); 24666f00a022SMaxime Bizon } 24676f00a022SMaxime Bizon 24686f00a022SMaxime Bizon printk_once(KERN_WARNING "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n", 24696f00a022SMaxime Bizon phy_id); 24706f00a022SMaxime Bizon return 1; 24716f00a022SMaxime Bizon } 24726f00a022SMaxime Bizon 24736f00a022SMaxime Bizon /* can't use bcmenet_sw_mdio_read directly as we need to sort out 24746f00a022SMaxime Bizon * external/internal status of the given phy_id first. 24756f00a022SMaxime Bizon */ 24766f00a022SMaxime Bizon static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id, 24776f00a022SMaxime Bizon int location) 24786f00a022SMaxime Bizon { 24796f00a022SMaxime Bizon struct bcm_enet_priv *priv; 24806f00a022SMaxime Bizon 24816f00a022SMaxime Bizon priv = netdev_priv(dev); 24826f00a022SMaxime Bizon return bcmenet_sw_mdio_read(priv, 24836f00a022SMaxime Bizon bcm_enetsw_phy_is_external(priv, phy_id), 24846f00a022SMaxime Bizon phy_id, location); 24856f00a022SMaxime Bizon } 24866f00a022SMaxime Bizon 24876f00a022SMaxime Bizon /* can't use bcmenet_sw_mdio_write directly as we need to sort out 24886f00a022SMaxime Bizon * external/internal status of the given phy_id first. 24896f00a022SMaxime Bizon */ 24906f00a022SMaxime Bizon static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id, 24916f00a022SMaxime Bizon int location, 24926f00a022SMaxime Bizon int val) 24936f00a022SMaxime Bizon { 24946f00a022SMaxime Bizon struct bcm_enet_priv *priv; 24956f00a022SMaxime Bizon 24966f00a022SMaxime Bizon priv = netdev_priv(dev); 24976f00a022SMaxime Bizon bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id), 24986f00a022SMaxime Bizon phy_id, location, val); 24996f00a022SMaxime Bizon } 25006f00a022SMaxime Bizon 25016f00a022SMaxime Bizon static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 25026f00a022SMaxime Bizon { 25036f00a022SMaxime Bizon struct mii_if_info mii; 25046f00a022SMaxime Bizon 25056f00a022SMaxime Bizon mii.dev = dev; 25066f00a022SMaxime Bizon mii.mdio_read = bcm_enetsw_mii_mdio_read; 25076f00a022SMaxime Bizon mii.mdio_write = bcm_enetsw_mii_mdio_write; 25086f00a022SMaxime Bizon mii.phy_id = 0; 25096f00a022SMaxime Bizon mii.phy_id_mask = 0x3f; 25106f00a022SMaxime Bizon mii.reg_num_mask = 0x1f; 25116f00a022SMaxime Bizon return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); 25126f00a022SMaxime Bizon 25136f00a022SMaxime Bizon } 25146f00a022SMaxime Bizon 25156f00a022SMaxime Bizon static const struct net_device_ops bcm_enetsw_ops = { 25166f00a022SMaxime Bizon .ndo_open = bcm_enetsw_open, 25176f00a022SMaxime Bizon .ndo_stop = bcm_enetsw_stop, 25186f00a022SMaxime Bizon .ndo_start_xmit = bcm_enet_start_xmit, 25196f00a022SMaxime Bizon .ndo_change_mtu = bcm_enet_change_mtu, 25206f00a022SMaxime Bizon .ndo_do_ioctl = bcm_enetsw_ioctl, 25216f00a022SMaxime Bizon }; 25226f00a022SMaxime Bizon 25236f00a022SMaxime Bizon 25246f00a022SMaxime Bizon static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = { 25256f00a022SMaxime Bizon { "rx_packets", DEV_STAT(rx_packets), -1 }, 25266f00a022SMaxime Bizon { "tx_packets", DEV_STAT(tx_packets), -1 }, 25276f00a022SMaxime Bizon { "rx_bytes", DEV_STAT(rx_bytes), -1 }, 25286f00a022SMaxime Bizon { "tx_bytes", DEV_STAT(tx_bytes), -1 }, 25296f00a022SMaxime Bizon { "rx_errors", DEV_STAT(rx_errors), -1 }, 25306f00a022SMaxime Bizon { "tx_errors", DEV_STAT(tx_errors), -1 }, 25316f00a022SMaxime Bizon { "rx_dropped", DEV_STAT(rx_dropped), -1 }, 25326f00a022SMaxime Bizon { "tx_dropped", DEV_STAT(tx_dropped), -1 }, 25336f00a022SMaxime Bizon 25346f00a022SMaxime Bizon { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT }, 25356f00a022SMaxime Bizon { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST }, 25366f00a022SMaxime Bizon { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST }, 25376f00a022SMaxime Bizon { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT }, 25386f00a022SMaxime Bizon { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 }, 25396f00a022SMaxime Bizon { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 }, 25406f00a022SMaxime Bizon { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 }, 25416f00a022SMaxime Bizon { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 }, 25426f00a022SMaxime Bizon { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023}, 25436f00a022SMaxime Bizon { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max), 25446f00a022SMaxime Bizon ETHSW_MIB_RX_1024_1522 }, 25456f00a022SMaxime Bizon { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047), 25466f00a022SMaxime Bizon ETHSW_MIB_RX_1523_2047 }, 25476f00a022SMaxime Bizon { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095), 25486f00a022SMaxime Bizon ETHSW_MIB_RX_2048_4095 }, 25496f00a022SMaxime Bizon { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191), 25506f00a022SMaxime Bizon ETHSW_MIB_RX_4096_8191 }, 25516f00a022SMaxime Bizon { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728), 25526f00a022SMaxime Bizon ETHSW_MIB_RX_8192_9728 }, 25536f00a022SMaxime Bizon { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR }, 25546f00a022SMaxime Bizon { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC }, 25556f00a022SMaxime Bizon { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP }, 25566f00a022SMaxime Bizon { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND }, 25576f00a022SMaxime Bizon { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE }, 25586f00a022SMaxime Bizon 25596f00a022SMaxime Bizon { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT }, 25606f00a022SMaxime Bizon { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST }, 25616f00a022SMaxime Bizon { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT }, 25626f00a022SMaxime Bizon { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT }, 25636f00a022SMaxime Bizon { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE }, 25646f00a022SMaxime Bizon { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS }, 25656f00a022SMaxime Bizon 25666f00a022SMaxime Bizon }; 25676f00a022SMaxime Bizon 25686f00a022SMaxime Bizon #define BCM_ENETSW_STATS_LEN \ 25696f00a022SMaxime Bizon (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats)) 25706f00a022SMaxime Bizon 25716f00a022SMaxime Bizon static void bcm_enetsw_get_strings(struct net_device *netdev, 25726f00a022SMaxime Bizon u32 stringset, u8 *data) 25736f00a022SMaxime Bizon { 25746f00a022SMaxime Bizon int i; 25756f00a022SMaxime Bizon 25766f00a022SMaxime Bizon switch (stringset) { 25776f00a022SMaxime Bizon case ETH_SS_STATS: 25786f00a022SMaxime Bizon for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { 25796f00a022SMaxime Bizon memcpy(data + i * ETH_GSTRING_LEN, 25806f00a022SMaxime Bizon bcm_enetsw_gstrings_stats[i].stat_string, 25816f00a022SMaxime Bizon ETH_GSTRING_LEN); 25826f00a022SMaxime Bizon } 25836f00a022SMaxime Bizon break; 25846f00a022SMaxime Bizon } 25856f00a022SMaxime Bizon } 25866f00a022SMaxime Bizon 25876f00a022SMaxime Bizon static int bcm_enetsw_get_sset_count(struct net_device *netdev, 25886f00a022SMaxime Bizon int string_set) 25896f00a022SMaxime Bizon { 25906f00a022SMaxime Bizon switch (string_set) { 25916f00a022SMaxime Bizon case ETH_SS_STATS: 25926f00a022SMaxime Bizon return BCM_ENETSW_STATS_LEN; 25936f00a022SMaxime Bizon default: 25946f00a022SMaxime Bizon return -EINVAL; 25956f00a022SMaxime Bizon } 25966f00a022SMaxime Bizon } 25976f00a022SMaxime Bizon 25986f00a022SMaxime Bizon static void bcm_enetsw_get_drvinfo(struct net_device *netdev, 25996f00a022SMaxime Bizon struct ethtool_drvinfo *drvinfo) 26006f00a022SMaxime Bizon { 26016f00a022SMaxime Bizon strncpy(drvinfo->driver, bcm_enet_driver_name, 32); 26026f00a022SMaxime Bizon strncpy(drvinfo->version, bcm_enet_driver_version, 32); 26036f00a022SMaxime Bizon strncpy(drvinfo->fw_version, "N/A", 32); 26046f00a022SMaxime Bizon strncpy(drvinfo->bus_info, "bcm63xx", 32); 26056f00a022SMaxime Bizon drvinfo->n_stats = BCM_ENETSW_STATS_LEN; 26066f00a022SMaxime Bizon } 26076f00a022SMaxime Bizon 26086f00a022SMaxime Bizon static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev, 26096f00a022SMaxime Bizon struct ethtool_stats *stats, 26106f00a022SMaxime Bizon u64 *data) 26116f00a022SMaxime Bizon { 26126f00a022SMaxime Bizon struct bcm_enet_priv *priv; 26136f00a022SMaxime Bizon int i; 26146f00a022SMaxime Bizon 26156f00a022SMaxime Bizon priv = netdev_priv(netdev); 26166f00a022SMaxime Bizon 26176f00a022SMaxime Bizon for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { 26186f00a022SMaxime Bizon const struct bcm_enet_stats *s; 26196f00a022SMaxime Bizon u32 lo, hi; 26206f00a022SMaxime Bizon char *p; 26216f00a022SMaxime Bizon int reg; 26226f00a022SMaxime Bizon 26236f00a022SMaxime Bizon s = &bcm_enetsw_gstrings_stats[i]; 26246f00a022SMaxime Bizon 26256f00a022SMaxime Bizon reg = s->mib_reg; 26266f00a022SMaxime Bizon if (reg == -1) 26276f00a022SMaxime Bizon continue; 26286f00a022SMaxime Bizon 26296f00a022SMaxime Bizon lo = enetsw_readl(priv, ENETSW_MIB_REG(reg)); 26306f00a022SMaxime Bizon p = (char *)priv + s->stat_offset; 26316f00a022SMaxime Bizon 26326f00a022SMaxime Bizon if (s->sizeof_stat == sizeof(u64)) { 26336f00a022SMaxime Bizon hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1)); 26346f00a022SMaxime Bizon *(u64 *)p = ((u64)hi << 32 | lo); 26356f00a022SMaxime Bizon } else { 26366f00a022SMaxime Bizon *(u32 *)p = lo; 26376f00a022SMaxime Bizon } 26386f00a022SMaxime Bizon } 26396f00a022SMaxime Bizon 26406f00a022SMaxime Bizon for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { 26416f00a022SMaxime Bizon const struct bcm_enet_stats *s; 26426f00a022SMaxime Bizon char *p; 26436f00a022SMaxime Bizon 26446f00a022SMaxime Bizon s = &bcm_enetsw_gstrings_stats[i]; 26456f00a022SMaxime Bizon 26466f00a022SMaxime Bizon if (s->mib_reg == -1) 26476f00a022SMaxime Bizon p = (char *)&netdev->stats + s->stat_offset; 26486f00a022SMaxime Bizon else 26496f00a022SMaxime Bizon p = (char *)priv + s->stat_offset; 26506f00a022SMaxime Bizon 26516f00a022SMaxime Bizon data[i] = (s->sizeof_stat == sizeof(u64)) ? 26526f00a022SMaxime Bizon *(u64 *)p : *(u32 *)p; 26536f00a022SMaxime Bizon } 26546f00a022SMaxime Bizon } 26556f00a022SMaxime Bizon 26566f00a022SMaxime Bizon static void bcm_enetsw_get_ringparam(struct net_device *dev, 26576f00a022SMaxime Bizon struct ethtool_ringparam *ering) 26586f00a022SMaxime Bizon { 26596f00a022SMaxime Bizon struct bcm_enet_priv *priv; 26606f00a022SMaxime Bizon 26616f00a022SMaxime Bizon priv = netdev_priv(dev); 26626f00a022SMaxime Bizon 26636f00a022SMaxime Bizon /* rx/tx ring is actually only limited by memory */ 26646f00a022SMaxime Bizon ering->rx_max_pending = 8192; 26656f00a022SMaxime Bizon ering->tx_max_pending = 8192; 26666f00a022SMaxime Bizon ering->rx_mini_max_pending = 0; 26676f00a022SMaxime Bizon ering->rx_jumbo_max_pending = 0; 26686f00a022SMaxime Bizon ering->rx_pending = priv->rx_ring_size; 26696f00a022SMaxime Bizon ering->tx_pending = priv->tx_ring_size; 26706f00a022SMaxime Bizon } 26716f00a022SMaxime Bizon 26726f00a022SMaxime Bizon static int bcm_enetsw_set_ringparam(struct net_device *dev, 26736f00a022SMaxime Bizon struct ethtool_ringparam *ering) 26746f00a022SMaxime Bizon { 26756f00a022SMaxime Bizon struct bcm_enet_priv *priv; 26766f00a022SMaxime Bizon int was_running; 26776f00a022SMaxime Bizon 26786f00a022SMaxime Bizon priv = netdev_priv(dev); 26796f00a022SMaxime Bizon 26806f00a022SMaxime Bizon was_running = 0; 26816f00a022SMaxime Bizon if (netif_running(dev)) { 26826f00a022SMaxime Bizon bcm_enetsw_stop(dev); 26836f00a022SMaxime Bizon was_running = 1; 26846f00a022SMaxime Bizon } 26856f00a022SMaxime Bizon 26866f00a022SMaxime Bizon priv->rx_ring_size = ering->rx_pending; 26876f00a022SMaxime Bizon priv->tx_ring_size = ering->tx_pending; 26886f00a022SMaxime Bizon 26896f00a022SMaxime Bizon if (was_running) { 26906f00a022SMaxime Bizon int err; 26916f00a022SMaxime Bizon 26926f00a022SMaxime Bizon err = bcm_enetsw_open(dev); 26936f00a022SMaxime Bizon if (err) 26946f00a022SMaxime Bizon dev_close(dev); 26956f00a022SMaxime Bizon } 26966f00a022SMaxime Bizon return 0; 26976f00a022SMaxime Bizon } 26986f00a022SMaxime Bizon 26996f00a022SMaxime Bizon static struct ethtool_ops bcm_enetsw_ethtool_ops = { 27006f00a022SMaxime Bizon .get_strings = bcm_enetsw_get_strings, 27016f00a022SMaxime Bizon .get_sset_count = bcm_enetsw_get_sset_count, 27026f00a022SMaxime Bizon .get_ethtool_stats = bcm_enetsw_get_ethtool_stats, 27036f00a022SMaxime Bizon .get_drvinfo = bcm_enetsw_get_drvinfo, 27046f00a022SMaxime Bizon .get_ringparam = bcm_enetsw_get_ringparam, 27056f00a022SMaxime Bizon .set_ringparam = bcm_enetsw_set_ringparam, 27066f00a022SMaxime Bizon }; 27076f00a022SMaxime Bizon 27086f00a022SMaxime Bizon /* allocate netdevice, request register memory and register device. */ 27096f00a022SMaxime Bizon static int bcm_enetsw_probe(struct platform_device *pdev) 27106f00a022SMaxime Bizon { 27116f00a022SMaxime Bizon struct bcm_enet_priv *priv; 27126f00a022SMaxime Bizon struct net_device *dev; 27136f00a022SMaxime Bizon struct bcm63xx_enetsw_platform_data *pd; 27146f00a022SMaxime Bizon struct resource *res_mem; 27156f00a022SMaxime Bizon int ret, irq_rx, irq_tx; 27166f00a022SMaxime Bizon 27176f00a022SMaxime Bizon /* stop if shared driver failed, assume driver->probe will be 27186f00a022SMaxime Bizon * called in the same order we register devices (correct ?) 27196f00a022SMaxime Bizon */ 27206f00a022SMaxime Bizon if (!bcm_enet_shared_base[0]) 27216f00a022SMaxime Bizon return -ENODEV; 27226f00a022SMaxime Bizon 27236f00a022SMaxime Bizon res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 27246f00a022SMaxime Bizon irq_rx = platform_get_irq(pdev, 0); 27256f00a022SMaxime Bizon irq_tx = platform_get_irq(pdev, 1); 27266f00a022SMaxime Bizon if (!res_mem || irq_rx < 0) 27276f00a022SMaxime Bizon return -ENODEV; 27286f00a022SMaxime Bizon 27296f00a022SMaxime Bizon ret = 0; 27306f00a022SMaxime Bizon dev = alloc_etherdev(sizeof(*priv)); 27316f00a022SMaxime Bizon if (!dev) 27326f00a022SMaxime Bizon return -ENOMEM; 27336f00a022SMaxime Bizon priv = netdev_priv(dev); 27346f00a022SMaxime Bizon memset(priv, 0, sizeof(*priv)); 27356f00a022SMaxime Bizon 27366f00a022SMaxime Bizon /* initialize default and fetch platform data */ 27376f00a022SMaxime Bizon priv->enet_is_sw = true; 27386f00a022SMaxime Bizon priv->irq_rx = irq_rx; 27396f00a022SMaxime Bizon priv->irq_tx = irq_tx; 27406f00a022SMaxime Bizon priv->rx_ring_size = BCMENET_DEF_RX_DESC; 27416f00a022SMaxime Bizon priv->tx_ring_size = BCMENET_DEF_TX_DESC; 27426f00a022SMaxime Bizon priv->dma_maxburst = BCMENETSW_DMA_MAXBURST; 27436f00a022SMaxime Bizon 2744cf0e7794SJingoo Han pd = dev_get_platdata(&pdev->dev); 27456f00a022SMaxime Bizon if (pd) { 27466f00a022SMaxime Bizon memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); 27476f00a022SMaxime Bizon memcpy(priv->used_ports, pd->used_ports, 27486f00a022SMaxime Bizon sizeof(pd->used_ports)); 27496f00a022SMaxime Bizon priv->num_ports = pd->num_ports; 27503dc6475cSFlorian Fainelli priv->dma_has_sram = pd->dma_has_sram; 27513dc6475cSFlorian Fainelli priv->dma_chan_en_mask = pd->dma_chan_en_mask; 27523dc6475cSFlorian Fainelli priv->dma_chan_int_mask = pd->dma_chan_int_mask; 27533dc6475cSFlorian Fainelli priv->dma_chan_width = pd->dma_chan_width; 27546f00a022SMaxime Bizon } 27556f00a022SMaxime Bizon 27566f00a022SMaxime Bizon ret = compute_hw_mtu(priv, dev->mtu); 27576f00a022SMaxime Bizon if (ret) 27586f00a022SMaxime Bizon goto out; 27596f00a022SMaxime Bizon 27606f00a022SMaxime Bizon if (!request_mem_region(res_mem->start, resource_size(res_mem), 27616f00a022SMaxime Bizon "bcm63xx_enetsw")) { 27626f00a022SMaxime Bizon ret = -EBUSY; 27636f00a022SMaxime Bizon goto out; 27646f00a022SMaxime Bizon } 27656f00a022SMaxime Bizon 27666f00a022SMaxime Bizon priv->base = ioremap(res_mem->start, resource_size(res_mem)); 27676f00a022SMaxime Bizon if (priv->base == NULL) { 27686f00a022SMaxime Bizon ret = -ENOMEM; 27696f00a022SMaxime Bizon goto out_release_mem; 27706f00a022SMaxime Bizon } 27716f00a022SMaxime Bizon 27726f00a022SMaxime Bizon priv->mac_clk = clk_get(&pdev->dev, "enetsw"); 27736f00a022SMaxime Bizon if (IS_ERR(priv->mac_clk)) { 27746f00a022SMaxime Bizon ret = PTR_ERR(priv->mac_clk); 27756f00a022SMaxime Bizon goto out_unmap; 27766f00a022SMaxime Bizon } 27776f00a022SMaxime Bizon clk_enable(priv->mac_clk); 27786f00a022SMaxime Bizon 27796f00a022SMaxime Bizon priv->rx_chan = 0; 27806f00a022SMaxime Bizon priv->tx_chan = 1; 27816f00a022SMaxime Bizon spin_lock_init(&priv->rx_lock); 27826f00a022SMaxime Bizon 27836f00a022SMaxime Bizon /* init rx timeout (used for oom) */ 27846f00a022SMaxime Bizon init_timer(&priv->rx_timeout); 27856f00a022SMaxime Bizon priv->rx_timeout.function = bcm_enet_refill_rx_timer; 27866f00a022SMaxime Bizon priv->rx_timeout.data = (unsigned long)dev; 27876f00a022SMaxime Bizon 27886f00a022SMaxime Bizon /* register netdevice */ 27896f00a022SMaxime Bizon dev->netdev_ops = &bcm_enetsw_ops; 27906f00a022SMaxime Bizon netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); 27917ad24ea4SWilfried Klaebe dev->ethtool_ops = &bcm_enetsw_ethtool_ops; 27926f00a022SMaxime Bizon SET_NETDEV_DEV(dev, &pdev->dev); 27936f00a022SMaxime Bizon 27946f00a022SMaxime Bizon spin_lock_init(&priv->enetsw_mdio_lock); 27956f00a022SMaxime Bizon 27966f00a022SMaxime Bizon ret = register_netdev(dev); 27976f00a022SMaxime Bizon if (ret) 27986f00a022SMaxime Bizon goto out_put_clk; 27996f00a022SMaxime Bizon 28006f00a022SMaxime Bizon netif_carrier_off(dev); 28016f00a022SMaxime Bizon platform_set_drvdata(pdev, dev); 28026f00a022SMaxime Bizon priv->pdev = pdev; 28036f00a022SMaxime Bizon priv->net_dev = dev; 28046f00a022SMaxime Bizon 28056f00a022SMaxime Bizon return 0; 28066f00a022SMaxime Bizon 28076f00a022SMaxime Bizon out_put_clk: 28086f00a022SMaxime Bizon clk_put(priv->mac_clk); 28096f00a022SMaxime Bizon 28106f00a022SMaxime Bizon out_unmap: 28116f00a022SMaxime Bizon iounmap(priv->base); 28126f00a022SMaxime Bizon 28136f00a022SMaxime Bizon out_release_mem: 28146f00a022SMaxime Bizon release_mem_region(res_mem->start, resource_size(res_mem)); 28156f00a022SMaxime Bizon out: 28166f00a022SMaxime Bizon free_netdev(dev); 28176f00a022SMaxime Bizon return ret; 28186f00a022SMaxime Bizon } 28196f00a022SMaxime Bizon 28206f00a022SMaxime Bizon 28216f00a022SMaxime Bizon /* exit func, stops hardware and unregisters netdevice */ 28226f00a022SMaxime Bizon static int bcm_enetsw_remove(struct platform_device *pdev) 28236f00a022SMaxime Bizon { 28246f00a022SMaxime Bizon struct bcm_enet_priv *priv; 28256f00a022SMaxime Bizon struct net_device *dev; 28266f00a022SMaxime Bizon struct resource *res; 28276f00a022SMaxime Bizon 28286f00a022SMaxime Bizon /* stop netdevice */ 28296f00a022SMaxime Bizon dev = platform_get_drvdata(pdev); 28306f00a022SMaxime Bizon priv = netdev_priv(dev); 28316f00a022SMaxime Bizon unregister_netdev(dev); 28326f00a022SMaxime Bizon 28336f00a022SMaxime Bizon /* release device resources */ 28346f00a022SMaxime Bizon iounmap(priv->base); 28356f00a022SMaxime Bizon res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 28366f00a022SMaxime Bizon release_mem_region(res->start, resource_size(res)); 28376f00a022SMaxime Bizon 28386f00a022SMaxime Bizon free_netdev(dev); 28396f00a022SMaxime Bizon return 0; 28406f00a022SMaxime Bizon } 28416f00a022SMaxime Bizon 28426f00a022SMaxime Bizon struct platform_driver bcm63xx_enetsw_driver = { 28436f00a022SMaxime Bizon .probe = bcm_enetsw_probe, 28446f00a022SMaxime Bizon .remove = bcm_enetsw_remove, 28456f00a022SMaxime Bizon .driver = { 28466f00a022SMaxime Bizon .name = "bcm63xx_enetsw", 28476f00a022SMaxime Bizon .owner = THIS_MODULE, 28486f00a022SMaxime Bizon }, 28496f00a022SMaxime Bizon }; 28506f00a022SMaxime Bizon 28516f00a022SMaxime Bizon /* reserve & remap memory space shared between all macs */ 2852047fc566SBill Pemberton static int bcm_enet_shared_probe(struct platform_device *pdev) 2853adfc5217SJeff Kirsher { 2854adfc5217SJeff Kirsher struct resource *res; 28550ae99b5fSMaxime Bizon void __iomem *p[3]; 28560ae99b5fSMaxime Bizon unsigned int i; 2857adfc5217SJeff Kirsher 28580ae99b5fSMaxime Bizon memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base)); 2859adfc5217SJeff Kirsher 28600ae99b5fSMaxime Bizon for (i = 0; i < 3; i++) { 28610ae99b5fSMaxime Bizon res = platform_get_resource(pdev, IORESOURCE_MEM, i); 28620ae99b5fSMaxime Bizon p[i] = devm_ioremap_resource(&pdev->dev, res); 2863646093a2SWei Yongjun if (IS_ERR(p[i])) 2864646093a2SWei Yongjun return PTR_ERR(p[i]); 28650ae99b5fSMaxime Bizon } 28660ae99b5fSMaxime Bizon 28670ae99b5fSMaxime Bizon memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base)); 28681c03da05SJonas Gorski 2869adfc5217SJeff Kirsher return 0; 2870adfc5217SJeff Kirsher } 2871adfc5217SJeff Kirsher 2872047fc566SBill Pemberton static int bcm_enet_shared_remove(struct platform_device *pdev) 2873adfc5217SJeff Kirsher { 2874adfc5217SJeff Kirsher return 0; 2875adfc5217SJeff Kirsher } 2876adfc5217SJeff Kirsher 28776f00a022SMaxime Bizon /* this "shared" driver is needed because both macs share a single 2878adfc5217SJeff Kirsher * address space 2879adfc5217SJeff Kirsher */ 2880adfc5217SJeff Kirsher struct platform_driver bcm63xx_enet_shared_driver = { 2881adfc5217SJeff Kirsher .probe = bcm_enet_shared_probe, 2882047fc566SBill Pemberton .remove = bcm_enet_shared_remove, 2883adfc5217SJeff Kirsher .driver = { 2884adfc5217SJeff Kirsher .name = "bcm63xx_enet_shared", 2885adfc5217SJeff Kirsher .owner = THIS_MODULE, 2886adfc5217SJeff Kirsher }, 2887adfc5217SJeff Kirsher }; 2888adfc5217SJeff Kirsher 28896f00a022SMaxime Bizon /* entry point */ 2890adfc5217SJeff Kirsher static int __init bcm_enet_init(void) 2891adfc5217SJeff Kirsher { 2892adfc5217SJeff Kirsher int ret; 2893adfc5217SJeff Kirsher 2894adfc5217SJeff Kirsher ret = platform_driver_register(&bcm63xx_enet_shared_driver); 2895adfc5217SJeff Kirsher if (ret) 2896adfc5217SJeff Kirsher return ret; 2897adfc5217SJeff Kirsher 2898adfc5217SJeff Kirsher ret = platform_driver_register(&bcm63xx_enet_driver); 2899adfc5217SJeff Kirsher if (ret) 2900adfc5217SJeff Kirsher platform_driver_unregister(&bcm63xx_enet_shared_driver); 2901adfc5217SJeff Kirsher 29026f00a022SMaxime Bizon ret = platform_driver_register(&bcm63xx_enetsw_driver); 29036f00a022SMaxime Bizon if (ret) { 29046f00a022SMaxime Bizon platform_driver_unregister(&bcm63xx_enet_driver); 29056f00a022SMaxime Bizon platform_driver_unregister(&bcm63xx_enet_shared_driver); 29066f00a022SMaxime Bizon } 29076f00a022SMaxime Bizon 2908adfc5217SJeff Kirsher return ret; 2909adfc5217SJeff Kirsher } 2910adfc5217SJeff Kirsher 2911adfc5217SJeff Kirsher static void __exit bcm_enet_exit(void) 2912adfc5217SJeff Kirsher { 2913adfc5217SJeff Kirsher platform_driver_unregister(&bcm63xx_enet_driver); 29146f00a022SMaxime Bizon platform_driver_unregister(&bcm63xx_enetsw_driver); 2915adfc5217SJeff Kirsher platform_driver_unregister(&bcm63xx_enet_shared_driver); 2916adfc5217SJeff Kirsher } 2917adfc5217SJeff Kirsher 2918adfc5217SJeff Kirsher 2919adfc5217SJeff Kirsher module_init(bcm_enet_init); 2920adfc5217SJeff Kirsher module_exit(bcm_enet_exit); 2921adfc5217SJeff Kirsher 2922adfc5217SJeff Kirsher MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver"); 2923adfc5217SJeff Kirsher MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); 2924adfc5217SJeff Kirsher MODULE_LICENSE("GPL"); 2925