174ba9207SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2adfc5217SJeff Kirsher /* 3adfc5217SJeff Kirsher * Driver for BCM963xx builtin Ethernet mac 4adfc5217SJeff Kirsher * 5adfc5217SJeff Kirsher * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> 6adfc5217SJeff Kirsher */ 7adfc5217SJeff Kirsher #include <linux/init.h> 8adfc5217SJeff Kirsher #include <linux/interrupt.h> 9adfc5217SJeff Kirsher #include <linux/module.h> 10adfc5217SJeff Kirsher #include <linux/clk.h> 11adfc5217SJeff Kirsher #include <linux/etherdevice.h> 12adfc5217SJeff Kirsher #include <linux/slab.h> 13adfc5217SJeff Kirsher #include <linux/delay.h> 14adfc5217SJeff Kirsher #include <linux/ethtool.h> 15adfc5217SJeff Kirsher #include <linux/crc32.h> 16adfc5217SJeff Kirsher #include <linux/err.h> 17adfc5217SJeff Kirsher #include <linux/dma-mapping.h> 18adfc5217SJeff Kirsher #include <linux/platform_device.h> 19adfc5217SJeff Kirsher #include <linux/if_vlan.h> 20adfc5217SJeff Kirsher 21adfc5217SJeff Kirsher #include <bcm63xx_dev_enet.h> 22adfc5217SJeff Kirsher #include "bcm63xx_enet.h" 23adfc5217SJeff Kirsher 24adfc5217SJeff Kirsher static char bcm_enet_driver_name[] = "bcm63xx_enet"; 25adfc5217SJeff Kirsher 26adfc5217SJeff Kirsher static int copybreak __read_mostly = 128; 27adfc5217SJeff Kirsher module_param(copybreak, int, 0); 28adfc5217SJeff Kirsher MODULE_PARM_DESC(copybreak, "Receive copy threshold"); 29adfc5217SJeff Kirsher 300ae99b5fSMaxime Bizon /* io registers memory shared between all devices */ 310ae99b5fSMaxime Bizon static void __iomem *bcm_enet_shared_base[3]; 32adfc5217SJeff Kirsher 33adfc5217SJeff Kirsher /* 34adfc5217SJeff Kirsher * io helpers to access mac registers 35adfc5217SJeff Kirsher */ 36adfc5217SJeff Kirsher static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off) 37adfc5217SJeff Kirsher { 38adfc5217SJeff Kirsher return bcm_readl(priv->base + off); 39adfc5217SJeff Kirsher } 40adfc5217SJeff Kirsher 41adfc5217SJeff Kirsher static inline void enet_writel(struct bcm_enet_priv *priv, 42adfc5217SJeff Kirsher u32 val, u32 off) 43adfc5217SJeff Kirsher { 44adfc5217SJeff Kirsher bcm_writel(val, priv->base + off); 45adfc5217SJeff Kirsher } 46adfc5217SJeff Kirsher 47adfc5217SJeff Kirsher /* 486f00a022SMaxime Bizon * io helpers to access switch registers 49adfc5217SJeff Kirsher */ 506f00a022SMaxime Bizon static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off) 516f00a022SMaxime Bizon { 526f00a022SMaxime Bizon return bcm_readl(priv->base + off); 536f00a022SMaxime Bizon } 546f00a022SMaxime Bizon 556f00a022SMaxime Bizon static inline void enetsw_writel(struct bcm_enet_priv *priv, 566f00a022SMaxime Bizon u32 val, u32 off) 576f00a022SMaxime Bizon { 586f00a022SMaxime Bizon bcm_writel(val, priv->base + off); 596f00a022SMaxime Bizon } 606f00a022SMaxime Bizon 616f00a022SMaxime Bizon static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off) 626f00a022SMaxime Bizon { 636f00a022SMaxime Bizon return bcm_readw(priv->base + off); 646f00a022SMaxime Bizon } 656f00a022SMaxime Bizon 666f00a022SMaxime Bizon static inline void enetsw_writew(struct bcm_enet_priv *priv, 676f00a022SMaxime Bizon u16 val, u32 off) 686f00a022SMaxime Bizon { 696f00a022SMaxime Bizon bcm_writew(val, priv->base + off); 706f00a022SMaxime Bizon } 716f00a022SMaxime Bizon 726f00a022SMaxime Bizon static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off) 736f00a022SMaxime Bizon { 746f00a022SMaxime Bizon return bcm_readb(priv->base + off); 756f00a022SMaxime Bizon } 766f00a022SMaxime Bizon 776f00a022SMaxime Bizon static inline void enetsw_writeb(struct bcm_enet_priv *priv, 786f00a022SMaxime Bizon u8 val, u32 off) 796f00a022SMaxime Bizon { 806f00a022SMaxime Bizon bcm_writeb(val, priv->base + off); 816f00a022SMaxime Bizon } 826f00a022SMaxime Bizon 836f00a022SMaxime Bizon 846f00a022SMaxime Bizon /* io helpers to access shared registers */ 85adfc5217SJeff Kirsher static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) 86adfc5217SJeff Kirsher { 870ae99b5fSMaxime Bizon return bcm_readl(bcm_enet_shared_base[0] + off); 88adfc5217SJeff Kirsher } 89adfc5217SJeff Kirsher 90adfc5217SJeff Kirsher static inline void enet_dma_writel(struct bcm_enet_priv *priv, 91adfc5217SJeff Kirsher u32 val, u32 off) 92adfc5217SJeff Kirsher { 930ae99b5fSMaxime Bizon bcm_writel(val, bcm_enet_shared_base[0] + off); 940ae99b5fSMaxime Bizon } 950ae99b5fSMaxime Bizon 963dc6475cSFlorian Fainelli static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan) 970ae99b5fSMaxime Bizon { 983dc6475cSFlorian Fainelli return bcm_readl(bcm_enet_shared_base[1] + 993dc6475cSFlorian Fainelli bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); 1000ae99b5fSMaxime Bizon } 1010ae99b5fSMaxime Bizon 1020ae99b5fSMaxime Bizon static inline void enet_dmac_writel(struct bcm_enet_priv *priv, 1033dc6475cSFlorian Fainelli u32 val, u32 off, int chan) 1040ae99b5fSMaxime Bizon { 1053dc6475cSFlorian Fainelli bcm_writel(val, bcm_enet_shared_base[1] + 1063dc6475cSFlorian Fainelli bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); 1070ae99b5fSMaxime Bizon } 1080ae99b5fSMaxime Bizon 1093dc6475cSFlorian Fainelli static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan) 1100ae99b5fSMaxime Bizon { 1113dc6475cSFlorian Fainelli return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); 1120ae99b5fSMaxime Bizon } 1130ae99b5fSMaxime Bizon 1140ae99b5fSMaxime Bizon static inline void enet_dmas_writel(struct bcm_enet_priv *priv, 1153dc6475cSFlorian Fainelli u32 val, u32 off, int chan) 1160ae99b5fSMaxime Bizon { 1173dc6475cSFlorian Fainelli bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); 118adfc5217SJeff Kirsher } 119adfc5217SJeff Kirsher 120adfc5217SJeff Kirsher /* 121adfc5217SJeff Kirsher * write given data into mii register and wait for transfer to end 122adfc5217SJeff Kirsher * with timeout (average measured transfer time is 25us) 123adfc5217SJeff Kirsher */ 124adfc5217SJeff Kirsher static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data) 125adfc5217SJeff Kirsher { 126adfc5217SJeff Kirsher int limit; 127adfc5217SJeff Kirsher 128adfc5217SJeff Kirsher /* make sure mii interrupt status is cleared */ 129adfc5217SJeff Kirsher enet_writel(priv, ENET_IR_MII, ENET_IR_REG); 130adfc5217SJeff Kirsher 131adfc5217SJeff Kirsher enet_writel(priv, data, ENET_MIIDATA_REG); 132adfc5217SJeff Kirsher wmb(); 133adfc5217SJeff Kirsher 134adfc5217SJeff Kirsher /* busy wait on mii interrupt bit, with timeout */ 135adfc5217SJeff Kirsher limit = 1000; 136adfc5217SJeff Kirsher do { 137adfc5217SJeff Kirsher if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) 138adfc5217SJeff Kirsher break; 139adfc5217SJeff Kirsher udelay(1); 140adfc5217SJeff Kirsher } while (limit-- > 0); 141adfc5217SJeff Kirsher 142adfc5217SJeff Kirsher return (limit < 0) ? 1 : 0; 143adfc5217SJeff Kirsher } 144adfc5217SJeff Kirsher 145adfc5217SJeff Kirsher /* 146adfc5217SJeff Kirsher * MII internal read callback 147adfc5217SJeff Kirsher */ 148adfc5217SJeff Kirsher static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id, 149adfc5217SJeff Kirsher int regnum) 150adfc5217SJeff Kirsher { 151adfc5217SJeff Kirsher u32 tmp, val; 152adfc5217SJeff Kirsher 153adfc5217SJeff Kirsher tmp = regnum << ENET_MIIDATA_REG_SHIFT; 154adfc5217SJeff Kirsher tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; 155adfc5217SJeff Kirsher tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; 156adfc5217SJeff Kirsher tmp |= ENET_MIIDATA_OP_READ_MASK; 157adfc5217SJeff Kirsher 158adfc5217SJeff Kirsher if (do_mdio_op(priv, tmp)) 159adfc5217SJeff Kirsher return -1; 160adfc5217SJeff Kirsher 161adfc5217SJeff Kirsher val = enet_readl(priv, ENET_MIIDATA_REG); 162adfc5217SJeff Kirsher val &= 0xffff; 163adfc5217SJeff Kirsher return val; 164adfc5217SJeff Kirsher } 165adfc5217SJeff Kirsher 166adfc5217SJeff Kirsher /* 167adfc5217SJeff Kirsher * MII internal write callback 168adfc5217SJeff Kirsher */ 169adfc5217SJeff Kirsher static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id, 170adfc5217SJeff Kirsher int regnum, u16 value) 171adfc5217SJeff Kirsher { 172adfc5217SJeff Kirsher u32 tmp; 173adfc5217SJeff Kirsher 174adfc5217SJeff Kirsher tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT; 175adfc5217SJeff Kirsher tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; 176adfc5217SJeff Kirsher tmp |= regnum << ENET_MIIDATA_REG_SHIFT; 177adfc5217SJeff Kirsher tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; 178adfc5217SJeff Kirsher tmp |= ENET_MIIDATA_OP_WRITE_MASK; 179adfc5217SJeff Kirsher 180adfc5217SJeff Kirsher (void)do_mdio_op(priv, tmp); 181adfc5217SJeff Kirsher return 0; 182adfc5217SJeff Kirsher } 183adfc5217SJeff Kirsher 184adfc5217SJeff Kirsher /* 185adfc5217SJeff Kirsher * MII read callback from phylib 186adfc5217SJeff Kirsher */ 187adfc5217SJeff Kirsher static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id, 188adfc5217SJeff Kirsher int regnum) 189adfc5217SJeff Kirsher { 190adfc5217SJeff Kirsher return bcm_enet_mdio_read(bus->priv, mii_id, regnum); 191adfc5217SJeff Kirsher } 192adfc5217SJeff Kirsher 193adfc5217SJeff Kirsher /* 194adfc5217SJeff Kirsher * MII write callback from phylib 195adfc5217SJeff Kirsher */ 196adfc5217SJeff Kirsher static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id, 197adfc5217SJeff Kirsher int regnum, u16 value) 198adfc5217SJeff Kirsher { 199adfc5217SJeff Kirsher return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value); 200adfc5217SJeff Kirsher } 201adfc5217SJeff Kirsher 202adfc5217SJeff Kirsher /* 203adfc5217SJeff Kirsher * MII read callback from mii core 204adfc5217SJeff Kirsher */ 205adfc5217SJeff Kirsher static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id, 206adfc5217SJeff Kirsher int regnum) 207adfc5217SJeff Kirsher { 208adfc5217SJeff Kirsher return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum); 209adfc5217SJeff Kirsher } 210adfc5217SJeff Kirsher 211adfc5217SJeff Kirsher /* 212adfc5217SJeff Kirsher * MII write callback from mii core 213adfc5217SJeff Kirsher */ 214adfc5217SJeff Kirsher static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id, 215adfc5217SJeff Kirsher int regnum, int value) 216adfc5217SJeff Kirsher { 217adfc5217SJeff Kirsher bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value); 218adfc5217SJeff Kirsher } 219adfc5217SJeff Kirsher 220adfc5217SJeff Kirsher /* 221adfc5217SJeff Kirsher * refill rx queue 222adfc5217SJeff Kirsher */ 223d27de0efSSieng Piaw Liew static int bcm_enet_refill_rx(struct net_device *dev, bool napi_mode) 224adfc5217SJeff Kirsher { 225adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 226adfc5217SJeff Kirsher 227adfc5217SJeff Kirsher priv = netdev_priv(dev); 228adfc5217SJeff Kirsher 229adfc5217SJeff Kirsher while (priv->rx_desc_count < priv->rx_ring_size) { 230adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 231adfc5217SJeff Kirsher int desc_idx; 232adfc5217SJeff Kirsher u32 len_stat; 233adfc5217SJeff Kirsher 234adfc5217SJeff Kirsher desc_idx = priv->rx_dirty_desc; 235adfc5217SJeff Kirsher desc = &priv->rx_desc_cpu[desc_idx]; 236adfc5217SJeff Kirsher 237d27de0efSSieng Piaw Liew if (!priv->rx_buf[desc_idx]) { 238d27de0efSSieng Piaw Liew void *buf; 239d27de0efSSieng Piaw Liew 240d27de0efSSieng Piaw Liew if (likely(napi_mode)) 241d27de0efSSieng Piaw Liew buf = napi_alloc_frag(priv->rx_frag_size); 242c4a20786SSieng Piaw Liew else 243d27de0efSSieng Piaw Liew buf = netdev_alloc_frag(priv->rx_frag_size); 244d27de0efSSieng Piaw Liew if (unlikely(!buf)) 245adfc5217SJeff Kirsher break; 246d27de0efSSieng Piaw Liew priv->rx_buf[desc_idx] = buf; 247d27de0efSSieng Piaw Liew desc->address = dma_map_single(&priv->pdev->dev, 248d27de0efSSieng Piaw Liew buf + priv->rx_buf_offset, 249d27de0efSSieng Piaw Liew priv->rx_buf_size, 250adfc5217SJeff Kirsher DMA_FROM_DEVICE); 251adfc5217SJeff Kirsher } 252adfc5217SJeff Kirsher 253d27de0efSSieng Piaw Liew len_stat = priv->rx_buf_size << DMADESC_LENGTH_SHIFT; 254adfc5217SJeff Kirsher len_stat |= DMADESC_OWNER_MASK; 255adfc5217SJeff Kirsher if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { 2563dc6475cSFlorian Fainelli len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); 257adfc5217SJeff Kirsher priv->rx_dirty_desc = 0; 258adfc5217SJeff Kirsher } else { 259adfc5217SJeff Kirsher priv->rx_dirty_desc++; 260adfc5217SJeff Kirsher } 261adfc5217SJeff Kirsher wmb(); 262adfc5217SJeff Kirsher desc->len_stat = len_stat; 263adfc5217SJeff Kirsher 264adfc5217SJeff Kirsher priv->rx_desc_count++; 265adfc5217SJeff Kirsher 266adfc5217SJeff Kirsher /* tell dma engine we allocated one buffer */ 2673dc6475cSFlorian Fainelli if (priv->dma_has_sram) 268adfc5217SJeff Kirsher enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); 2693dc6475cSFlorian Fainelli else 2703dc6475cSFlorian Fainelli enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan); 271adfc5217SJeff Kirsher } 272adfc5217SJeff Kirsher 273adfc5217SJeff Kirsher /* If rx ring is still empty, set a timer to try allocating 274adfc5217SJeff Kirsher * again at a later time. */ 275adfc5217SJeff Kirsher if (priv->rx_desc_count == 0 && netif_running(dev)) { 276adfc5217SJeff Kirsher dev_warn(&priv->pdev->dev, "unable to refill rx ring\n"); 277adfc5217SJeff Kirsher priv->rx_timeout.expires = jiffies + HZ; 278adfc5217SJeff Kirsher add_timer(&priv->rx_timeout); 279adfc5217SJeff Kirsher } 280adfc5217SJeff Kirsher 281adfc5217SJeff Kirsher return 0; 282adfc5217SJeff Kirsher } 283adfc5217SJeff Kirsher 284adfc5217SJeff Kirsher /* 285adfc5217SJeff Kirsher * timer callback to defer refill rx queue in case we're OOM 286adfc5217SJeff Kirsher */ 287eb8c6b5bSKees Cook static void bcm_enet_refill_rx_timer(struct timer_list *t) 288adfc5217SJeff Kirsher { 289eb8c6b5bSKees Cook struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout); 290eb8c6b5bSKees Cook struct net_device *dev = priv->net_dev; 291adfc5217SJeff Kirsher 292adfc5217SJeff Kirsher spin_lock(&priv->rx_lock); 293d27de0efSSieng Piaw Liew bcm_enet_refill_rx(dev, false); 294adfc5217SJeff Kirsher spin_unlock(&priv->rx_lock); 295adfc5217SJeff Kirsher } 296adfc5217SJeff Kirsher 297adfc5217SJeff Kirsher /* 298adfc5217SJeff Kirsher * extract packet from rx queue 299adfc5217SJeff Kirsher */ 300adfc5217SJeff Kirsher static int bcm_enet_receive_queue(struct net_device *dev, int budget) 301adfc5217SJeff Kirsher { 302adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 3039cbfea02SSieng Piaw Liew struct list_head rx_list; 304adfc5217SJeff Kirsher struct device *kdev; 305adfc5217SJeff Kirsher int processed; 306adfc5217SJeff Kirsher 307adfc5217SJeff Kirsher priv = netdev_priv(dev); 3089cbfea02SSieng Piaw Liew INIT_LIST_HEAD(&rx_list); 309adfc5217SJeff Kirsher kdev = &priv->pdev->dev; 310adfc5217SJeff Kirsher processed = 0; 311adfc5217SJeff Kirsher 312adfc5217SJeff Kirsher /* don't scan ring further than number of refilled 313adfc5217SJeff Kirsher * descriptor */ 314adfc5217SJeff Kirsher if (budget > priv->rx_desc_count) 315adfc5217SJeff Kirsher budget = priv->rx_desc_count; 316adfc5217SJeff Kirsher 317adfc5217SJeff Kirsher do { 318adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 319adfc5217SJeff Kirsher struct sk_buff *skb; 320adfc5217SJeff Kirsher int desc_idx; 321adfc5217SJeff Kirsher u32 len_stat; 322adfc5217SJeff Kirsher unsigned int len; 323d27de0efSSieng Piaw Liew void *buf; 324adfc5217SJeff Kirsher 325adfc5217SJeff Kirsher desc_idx = priv->rx_curr_desc; 326adfc5217SJeff Kirsher desc = &priv->rx_desc_cpu[desc_idx]; 327adfc5217SJeff Kirsher 328adfc5217SJeff Kirsher /* make sure we actually read the descriptor status at 329adfc5217SJeff Kirsher * each loop */ 330adfc5217SJeff Kirsher rmb(); 331adfc5217SJeff Kirsher 332adfc5217SJeff Kirsher len_stat = desc->len_stat; 333adfc5217SJeff Kirsher 334adfc5217SJeff Kirsher /* break if dma ownership belongs to hw */ 335adfc5217SJeff Kirsher if (len_stat & DMADESC_OWNER_MASK) 336adfc5217SJeff Kirsher break; 337adfc5217SJeff Kirsher 338adfc5217SJeff Kirsher processed++; 339adfc5217SJeff Kirsher priv->rx_curr_desc++; 340adfc5217SJeff Kirsher if (priv->rx_curr_desc == priv->rx_ring_size) 341adfc5217SJeff Kirsher priv->rx_curr_desc = 0; 342adfc5217SJeff Kirsher 343adfc5217SJeff Kirsher /* if the packet does not have start of packet _and_ 344adfc5217SJeff Kirsher * end of packet flag set, then just recycle it */ 3453dc6475cSFlorian Fainelli if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) != 3463dc6475cSFlorian Fainelli (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) { 347adfc5217SJeff Kirsher dev->stats.rx_dropped++; 348adfc5217SJeff Kirsher continue; 349adfc5217SJeff Kirsher } 350adfc5217SJeff Kirsher 351adfc5217SJeff Kirsher /* recycle packet if it's marked as bad */ 3526f00a022SMaxime Bizon if (!priv->enet_is_sw && 3536f00a022SMaxime Bizon unlikely(len_stat & DMADESC_ERR_MASK)) { 354adfc5217SJeff Kirsher dev->stats.rx_errors++; 355adfc5217SJeff Kirsher 356adfc5217SJeff Kirsher if (len_stat & DMADESC_OVSIZE_MASK) 357adfc5217SJeff Kirsher dev->stats.rx_length_errors++; 358adfc5217SJeff Kirsher if (len_stat & DMADESC_CRC_MASK) 359adfc5217SJeff Kirsher dev->stats.rx_crc_errors++; 360adfc5217SJeff Kirsher if (len_stat & DMADESC_UNDER_MASK) 361adfc5217SJeff Kirsher dev->stats.rx_frame_errors++; 362adfc5217SJeff Kirsher if (len_stat & DMADESC_OV_MASK) 363adfc5217SJeff Kirsher dev->stats.rx_fifo_errors++; 364adfc5217SJeff Kirsher continue; 365adfc5217SJeff Kirsher } 366adfc5217SJeff Kirsher 367adfc5217SJeff Kirsher /* valid packet */ 368d27de0efSSieng Piaw Liew buf = priv->rx_buf[desc_idx]; 369adfc5217SJeff Kirsher len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT; 370adfc5217SJeff Kirsher /* don't include FCS */ 371adfc5217SJeff Kirsher len -= 4; 372adfc5217SJeff Kirsher 373adfc5217SJeff Kirsher if (len < copybreak) { 374d27de0efSSieng Piaw Liew skb = napi_alloc_skb(&priv->napi, len); 375d27de0efSSieng Piaw Liew if (unlikely(!skb)) { 376adfc5217SJeff Kirsher /* forget packet, just rearm desc */ 377adfc5217SJeff Kirsher dev->stats.rx_dropped++; 378adfc5217SJeff Kirsher continue; 379adfc5217SJeff Kirsher } 380adfc5217SJeff Kirsher 381adfc5217SJeff Kirsher dma_sync_single_for_cpu(kdev, desc->address, 382adfc5217SJeff Kirsher len, DMA_FROM_DEVICE); 383d27de0efSSieng Piaw Liew memcpy(skb->data, buf + priv->rx_buf_offset, len); 384adfc5217SJeff Kirsher dma_sync_single_for_device(kdev, desc->address, 385adfc5217SJeff Kirsher len, DMA_FROM_DEVICE); 386adfc5217SJeff Kirsher } else { 387d27de0efSSieng Piaw Liew dma_unmap_single(kdev, desc->address, 388d27de0efSSieng Piaw Liew priv->rx_buf_size, DMA_FROM_DEVICE); 389d27de0efSSieng Piaw Liew priv->rx_buf[desc_idx] = NULL; 390d27de0efSSieng Piaw Liew 391*c63c615eSSieng Piaw Liew skb = napi_build_skb(buf, priv->rx_frag_size); 392d27de0efSSieng Piaw Liew if (unlikely(!skb)) { 393d27de0efSSieng Piaw Liew skb_free_frag(buf); 394d27de0efSSieng Piaw Liew dev->stats.rx_dropped++; 395d27de0efSSieng Piaw Liew continue; 396d27de0efSSieng Piaw Liew } 397d27de0efSSieng Piaw Liew skb_reserve(skb, priv->rx_buf_offset); 398adfc5217SJeff Kirsher } 399adfc5217SJeff Kirsher 400adfc5217SJeff Kirsher skb_put(skb, len); 401adfc5217SJeff Kirsher skb->protocol = eth_type_trans(skb, dev); 402adfc5217SJeff Kirsher dev->stats.rx_packets++; 403adfc5217SJeff Kirsher dev->stats.rx_bytes += len; 4049cbfea02SSieng Piaw Liew list_add_tail(&skb->list, &rx_list); 405adfc5217SJeff Kirsher 406ae2259eeSSieng Piaw Liew } while (processed < budget); 407adfc5217SJeff Kirsher 4089cbfea02SSieng Piaw Liew netif_receive_skb_list(&rx_list); 409ae2259eeSSieng Piaw Liew priv->rx_desc_count -= processed; 4109cbfea02SSieng Piaw Liew 411adfc5217SJeff Kirsher if (processed || !priv->rx_desc_count) { 412d27de0efSSieng Piaw Liew bcm_enet_refill_rx(dev, true); 413adfc5217SJeff Kirsher 414adfc5217SJeff Kirsher /* kick rx dma */ 4153dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_en_mask, 4163dc6475cSFlorian Fainelli ENETDMAC_CHANCFG, priv->rx_chan); 417adfc5217SJeff Kirsher } 418adfc5217SJeff Kirsher 419adfc5217SJeff Kirsher return processed; 420adfc5217SJeff Kirsher } 421adfc5217SJeff Kirsher 422adfc5217SJeff Kirsher 423adfc5217SJeff Kirsher /* 424adfc5217SJeff Kirsher * try to or force reclaim of transmitted buffers 425adfc5217SJeff Kirsher */ 426adfc5217SJeff Kirsher static int bcm_enet_tx_reclaim(struct net_device *dev, int force) 427adfc5217SJeff Kirsher { 428adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 4294c59b0f5SSieng Piaw Liew unsigned int bytes; 430adfc5217SJeff Kirsher int released; 431adfc5217SJeff Kirsher 432adfc5217SJeff Kirsher priv = netdev_priv(dev); 4334c59b0f5SSieng Piaw Liew bytes = 0; 434adfc5217SJeff Kirsher released = 0; 435adfc5217SJeff Kirsher 436adfc5217SJeff Kirsher while (priv->tx_desc_count < priv->tx_ring_size) { 437adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 438adfc5217SJeff Kirsher struct sk_buff *skb; 439adfc5217SJeff Kirsher 440adfc5217SJeff Kirsher /* We run in a bh and fight against start_xmit, which 441adfc5217SJeff Kirsher * is called with bh disabled */ 442adfc5217SJeff Kirsher spin_lock(&priv->tx_lock); 443adfc5217SJeff Kirsher 444adfc5217SJeff Kirsher desc = &priv->tx_desc_cpu[priv->tx_dirty_desc]; 445adfc5217SJeff Kirsher 446adfc5217SJeff Kirsher if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) { 447adfc5217SJeff Kirsher spin_unlock(&priv->tx_lock); 448adfc5217SJeff Kirsher break; 449adfc5217SJeff Kirsher } 450adfc5217SJeff Kirsher 451adfc5217SJeff Kirsher /* ensure other field of the descriptor were not read 452adfc5217SJeff Kirsher * before we checked ownership */ 453adfc5217SJeff Kirsher rmb(); 454adfc5217SJeff Kirsher 455adfc5217SJeff Kirsher skb = priv->tx_skb[priv->tx_dirty_desc]; 456adfc5217SJeff Kirsher priv->tx_skb[priv->tx_dirty_desc] = NULL; 457adfc5217SJeff Kirsher dma_unmap_single(&priv->pdev->dev, desc->address, skb->len, 458adfc5217SJeff Kirsher DMA_TO_DEVICE); 459adfc5217SJeff Kirsher 460adfc5217SJeff Kirsher priv->tx_dirty_desc++; 461adfc5217SJeff Kirsher if (priv->tx_dirty_desc == priv->tx_ring_size) 462adfc5217SJeff Kirsher priv->tx_dirty_desc = 0; 463adfc5217SJeff Kirsher priv->tx_desc_count++; 464adfc5217SJeff Kirsher 465adfc5217SJeff Kirsher spin_unlock(&priv->tx_lock); 466adfc5217SJeff Kirsher 467adfc5217SJeff Kirsher if (desc->len_stat & DMADESC_UNDER_MASK) 468adfc5217SJeff Kirsher dev->stats.tx_errors++; 469adfc5217SJeff Kirsher 4704c59b0f5SSieng Piaw Liew bytes += skb->len; 471*c63c615eSSieng Piaw Liew napi_consume_skb(skb, !force); 472adfc5217SJeff Kirsher released++; 473adfc5217SJeff Kirsher } 474adfc5217SJeff Kirsher 4754c59b0f5SSieng Piaw Liew netdev_completed_queue(dev, released, bytes); 4764c59b0f5SSieng Piaw Liew 477adfc5217SJeff Kirsher if (netif_queue_stopped(dev) && released) 478adfc5217SJeff Kirsher netif_wake_queue(dev); 479adfc5217SJeff Kirsher 480adfc5217SJeff Kirsher return released; 481adfc5217SJeff Kirsher } 482adfc5217SJeff Kirsher 483adfc5217SJeff Kirsher /* 484adfc5217SJeff Kirsher * poll func, called by network core 485adfc5217SJeff Kirsher */ 486adfc5217SJeff Kirsher static int bcm_enet_poll(struct napi_struct *napi, int budget) 487adfc5217SJeff Kirsher { 488adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 489adfc5217SJeff Kirsher struct net_device *dev; 490cd33ccf5SNicolas Schichan int rx_work_done; 491adfc5217SJeff Kirsher 492adfc5217SJeff Kirsher priv = container_of(napi, struct bcm_enet_priv, napi); 493adfc5217SJeff Kirsher dev = priv->net_dev; 494adfc5217SJeff Kirsher 495adfc5217SJeff Kirsher /* ack interrupts */ 4963dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 4973dc6475cSFlorian Fainelli ENETDMAC_IR, priv->rx_chan); 4983dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 4993dc6475cSFlorian Fainelli ENETDMAC_IR, priv->tx_chan); 500adfc5217SJeff Kirsher 501adfc5217SJeff Kirsher /* reclaim sent skb */ 502cd33ccf5SNicolas Schichan bcm_enet_tx_reclaim(dev, 0); 503adfc5217SJeff Kirsher 504adfc5217SJeff Kirsher spin_lock(&priv->rx_lock); 505adfc5217SJeff Kirsher rx_work_done = bcm_enet_receive_queue(dev, budget); 506adfc5217SJeff Kirsher spin_unlock(&priv->rx_lock); 507adfc5217SJeff Kirsher 508cd33ccf5SNicolas Schichan if (rx_work_done >= budget) { 509cd33ccf5SNicolas Schichan /* rx queue is not yet empty/clean */ 510adfc5217SJeff Kirsher return rx_work_done; 511adfc5217SJeff Kirsher } 512adfc5217SJeff Kirsher 513adfc5217SJeff Kirsher /* no more packet in rx/tx queue, remove device from poll 514adfc5217SJeff Kirsher * queue */ 5156ad20165SEric Dumazet napi_complete_done(napi, rx_work_done); 516adfc5217SJeff Kirsher 517adfc5217SJeff Kirsher /* restore rx/tx interrupt */ 5183dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 5193dc6475cSFlorian Fainelli ENETDMAC_IRMASK, priv->rx_chan); 5203dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 5213dc6475cSFlorian Fainelli ENETDMAC_IRMASK, priv->tx_chan); 522adfc5217SJeff Kirsher 523adfc5217SJeff Kirsher return rx_work_done; 524adfc5217SJeff Kirsher } 525adfc5217SJeff Kirsher 526adfc5217SJeff Kirsher /* 527adfc5217SJeff Kirsher * mac interrupt handler 528adfc5217SJeff Kirsher */ 529adfc5217SJeff Kirsher static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id) 530adfc5217SJeff Kirsher { 531adfc5217SJeff Kirsher struct net_device *dev; 532adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 533adfc5217SJeff Kirsher u32 stat; 534adfc5217SJeff Kirsher 535adfc5217SJeff Kirsher dev = dev_id; 536adfc5217SJeff Kirsher priv = netdev_priv(dev); 537adfc5217SJeff Kirsher 538adfc5217SJeff Kirsher stat = enet_readl(priv, ENET_IR_REG); 539adfc5217SJeff Kirsher if (!(stat & ENET_IR_MIB)) 540adfc5217SJeff Kirsher return IRQ_NONE; 541adfc5217SJeff Kirsher 542adfc5217SJeff Kirsher /* clear & mask interrupt */ 543adfc5217SJeff Kirsher enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); 544adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_IRMASK_REG); 545adfc5217SJeff Kirsher 546adfc5217SJeff Kirsher /* read mib registers in workqueue */ 547adfc5217SJeff Kirsher schedule_work(&priv->mib_update_task); 548adfc5217SJeff Kirsher 549adfc5217SJeff Kirsher return IRQ_HANDLED; 550adfc5217SJeff Kirsher } 551adfc5217SJeff Kirsher 552adfc5217SJeff Kirsher /* 553adfc5217SJeff Kirsher * rx/tx dma interrupt handler 554adfc5217SJeff Kirsher */ 555adfc5217SJeff Kirsher static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) 556adfc5217SJeff Kirsher { 557adfc5217SJeff Kirsher struct net_device *dev; 558adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 559adfc5217SJeff Kirsher 560adfc5217SJeff Kirsher dev = dev_id; 561adfc5217SJeff Kirsher priv = netdev_priv(dev); 562adfc5217SJeff Kirsher 563adfc5217SJeff Kirsher /* mask rx/tx interrupts */ 5643dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 5653dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 566adfc5217SJeff Kirsher 567adfc5217SJeff Kirsher napi_schedule(&priv->napi); 568adfc5217SJeff Kirsher 569adfc5217SJeff Kirsher return IRQ_HANDLED; 570adfc5217SJeff Kirsher } 571adfc5217SJeff Kirsher 572adfc5217SJeff Kirsher /* 573adfc5217SJeff Kirsher * tx request callback 574adfc5217SJeff Kirsher */ 5750c13b8d1SYueHaibing static netdev_tx_t 5760c13b8d1SYueHaibing bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 577adfc5217SJeff Kirsher { 578adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 579adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 580adfc5217SJeff Kirsher u32 len_stat; 5810c13b8d1SYueHaibing netdev_tx_t ret; 582adfc5217SJeff Kirsher 583adfc5217SJeff Kirsher priv = netdev_priv(dev); 584adfc5217SJeff Kirsher 585adfc5217SJeff Kirsher /* lock against tx reclaim */ 586adfc5217SJeff Kirsher spin_lock(&priv->tx_lock); 587adfc5217SJeff Kirsher 588adfc5217SJeff Kirsher /* make sure the tx hw queue is not full, should not happen 589adfc5217SJeff Kirsher * since we stop queue before it's the case */ 590adfc5217SJeff Kirsher if (unlikely(!priv->tx_desc_count)) { 591adfc5217SJeff Kirsher netif_stop_queue(dev); 592adfc5217SJeff Kirsher dev_err(&priv->pdev->dev, "xmit called with no tx desc " 593adfc5217SJeff Kirsher "available?\n"); 594adfc5217SJeff Kirsher ret = NETDEV_TX_BUSY; 595adfc5217SJeff Kirsher goto out_unlock; 596adfc5217SJeff Kirsher } 597adfc5217SJeff Kirsher 5986f00a022SMaxime Bizon /* pad small packets sent on a switch device */ 5996f00a022SMaxime Bizon if (priv->enet_is_sw && skb->len < 64) { 6006f00a022SMaxime Bizon int needed = 64 - skb->len; 6016f00a022SMaxime Bizon char *data; 6026f00a022SMaxime Bizon 6036f00a022SMaxime Bizon if (unlikely(skb_tailroom(skb) < needed)) { 6046f00a022SMaxime Bizon struct sk_buff *nskb; 6056f00a022SMaxime Bizon 6066f00a022SMaxime Bizon nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC); 6076f00a022SMaxime Bizon if (!nskb) { 6086f00a022SMaxime Bizon ret = NETDEV_TX_BUSY; 6096f00a022SMaxime Bizon goto out_unlock; 6106f00a022SMaxime Bizon } 6116f00a022SMaxime Bizon dev_kfree_skb(skb); 6126f00a022SMaxime Bizon skb = nskb; 6136f00a022SMaxime Bizon } 614aa9f979cSJohannes Berg data = skb_put_zero(skb, needed); 6156f00a022SMaxime Bizon } 6166f00a022SMaxime Bizon 617adfc5217SJeff Kirsher /* point to the next available desc */ 618adfc5217SJeff Kirsher desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; 619adfc5217SJeff Kirsher priv->tx_skb[priv->tx_curr_desc] = skb; 620adfc5217SJeff Kirsher 621adfc5217SJeff Kirsher /* fill descriptor */ 622adfc5217SJeff Kirsher desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len, 623adfc5217SJeff Kirsher DMA_TO_DEVICE); 624adfc5217SJeff Kirsher 625adfc5217SJeff Kirsher len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; 6263dc6475cSFlorian Fainelli len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) | 627adfc5217SJeff Kirsher DMADESC_APPEND_CRC | 628adfc5217SJeff Kirsher DMADESC_OWNER_MASK; 629adfc5217SJeff Kirsher 630adfc5217SJeff Kirsher priv->tx_curr_desc++; 631adfc5217SJeff Kirsher if (priv->tx_curr_desc == priv->tx_ring_size) { 632adfc5217SJeff Kirsher priv->tx_curr_desc = 0; 6333dc6475cSFlorian Fainelli len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); 634adfc5217SJeff Kirsher } 635adfc5217SJeff Kirsher priv->tx_desc_count--; 636adfc5217SJeff Kirsher 637adfc5217SJeff Kirsher /* dma might be already polling, make sure we update desc 638adfc5217SJeff Kirsher * fields in correct order */ 639adfc5217SJeff Kirsher wmb(); 640adfc5217SJeff Kirsher desc->len_stat = len_stat; 641adfc5217SJeff Kirsher wmb(); 642adfc5217SJeff Kirsher 6434c59b0f5SSieng Piaw Liew netdev_sent_queue(dev, skb->len); 6444c59b0f5SSieng Piaw Liew 645adfc5217SJeff Kirsher /* kick tx dma */ 646375281d3SSieng Piaw Liew if (!netdev_xmit_more() || !priv->tx_desc_count) 6473dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_en_mask, 6483dc6475cSFlorian Fainelli ENETDMAC_CHANCFG, priv->tx_chan); 649adfc5217SJeff Kirsher 650adfc5217SJeff Kirsher /* stop queue if no more desc available */ 651adfc5217SJeff Kirsher if (!priv->tx_desc_count) 652adfc5217SJeff Kirsher netif_stop_queue(dev); 653adfc5217SJeff Kirsher 654adfc5217SJeff Kirsher dev->stats.tx_bytes += skb->len; 655adfc5217SJeff Kirsher dev->stats.tx_packets++; 656adfc5217SJeff Kirsher ret = NETDEV_TX_OK; 657adfc5217SJeff Kirsher 658adfc5217SJeff Kirsher out_unlock: 659adfc5217SJeff Kirsher spin_unlock(&priv->tx_lock); 660adfc5217SJeff Kirsher return ret; 661adfc5217SJeff Kirsher } 662adfc5217SJeff Kirsher 663adfc5217SJeff Kirsher /* 664adfc5217SJeff Kirsher * Change the interface's mac address. 665adfc5217SJeff Kirsher */ 666adfc5217SJeff Kirsher static int bcm_enet_set_mac_address(struct net_device *dev, void *p) 667adfc5217SJeff Kirsher { 668adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 669adfc5217SJeff Kirsher struct sockaddr *addr = p; 670adfc5217SJeff Kirsher u32 val; 671adfc5217SJeff Kirsher 672adfc5217SJeff Kirsher priv = netdev_priv(dev); 673a96d317fSJakub Kicinski eth_hw_addr_set(dev, addr->sa_data); 674adfc5217SJeff Kirsher 675adfc5217SJeff Kirsher /* use perfect match register 0 to store my mac address */ 676adfc5217SJeff Kirsher val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | 677adfc5217SJeff Kirsher (dev->dev_addr[4] << 8) | dev->dev_addr[5]; 678adfc5217SJeff Kirsher enet_writel(priv, val, ENET_PML_REG(0)); 679adfc5217SJeff Kirsher 680adfc5217SJeff Kirsher val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]); 681adfc5217SJeff Kirsher val |= ENET_PMH_DATAVALID_MASK; 682adfc5217SJeff Kirsher enet_writel(priv, val, ENET_PMH_REG(0)); 683adfc5217SJeff Kirsher 684adfc5217SJeff Kirsher return 0; 685adfc5217SJeff Kirsher } 686adfc5217SJeff Kirsher 687adfc5217SJeff Kirsher /* 688adfc5217SJeff Kirsher * Change rx mode (promiscuous/allmulti) and update multicast list 689adfc5217SJeff Kirsher */ 690adfc5217SJeff Kirsher static void bcm_enet_set_multicast_list(struct net_device *dev) 691adfc5217SJeff Kirsher { 692adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 693adfc5217SJeff Kirsher struct netdev_hw_addr *ha; 694adfc5217SJeff Kirsher u32 val; 695adfc5217SJeff Kirsher int i; 696adfc5217SJeff Kirsher 697adfc5217SJeff Kirsher priv = netdev_priv(dev); 698adfc5217SJeff Kirsher 699adfc5217SJeff Kirsher val = enet_readl(priv, ENET_RXCFG_REG); 700adfc5217SJeff Kirsher 701adfc5217SJeff Kirsher if (dev->flags & IFF_PROMISC) 702adfc5217SJeff Kirsher val |= ENET_RXCFG_PROMISC_MASK; 703adfc5217SJeff Kirsher else 704adfc5217SJeff Kirsher val &= ~ENET_RXCFG_PROMISC_MASK; 705adfc5217SJeff Kirsher 706adfc5217SJeff Kirsher /* only 3 perfect match registers left, first one is used for 707adfc5217SJeff Kirsher * own mac address */ 708adfc5217SJeff Kirsher if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3) 709adfc5217SJeff Kirsher val |= ENET_RXCFG_ALLMCAST_MASK; 710adfc5217SJeff Kirsher else 711adfc5217SJeff Kirsher val &= ~ENET_RXCFG_ALLMCAST_MASK; 712adfc5217SJeff Kirsher 713adfc5217SJeff Kirsher /* no need to set perfect match registers if we catch all 714adfc5217SJeff Kirsher * multicast */ 715adfc5217SJeff Kirsher if (val & ENET_RXCFG_ALLMCAST_MASK) { 716adfc5217SJeff Kirsher enet_writel(priv, val, ENET_RXCFG_REG); 717adfc5217SJeff Kirsher return; 718adfc5217SJeff Kirsher } 719adfc5217SJeff Kirsher 720adfc5217SJeff Kirsher i = 0; 721adfc5217SJeff Kirsher netdev_for_each_mc_addr(ha, dev) { 722adfc5217SJeff Kirsher u8 *dmi_addr; 723adfc5217SJeff Kirsher u32 tmp; 724adfc5217SJeff Kirsher 725adfc5217SJeff Kirsher if (i == 3) 726adfc5217SJeff Kirsher break; 727adfc5217SJeff Kirsher /* update perfect match registers */ 728adfc5217SJeff Kirsher dmi_addr = ha->addr; 729adfc5217SJeff Kirsher tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | 730adfc5217SJeff Kirsher (dmi_addr[4] << 8) | dmi_addr[5]; 731adfc5217SJeff Kirsher enet_writel(priv, tmp, ENET_PML_REG(i + 1)); 732adfc5217SJeff Kirsher 733adfc5217SJeff Kirsher tmp = (dmi_addr[0] << 8 | dmi_addr[1]); 734adfc5217SJeff Kirsher tmp |= ENET_PMH_DATAVALID_MASK; 735adfc5217SJeff Kirsher enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1)); 736adfc5217SJeff Kirsher } 737adfc5217SJeff Kirsher 738adfc5217SJeff Kirsher for (; i < 3; i++) { 739adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_PML_REG(i + 1)); 740adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_PMH_REG(i + 1)); 741adfc5217SJeff Kirsher } 742adfc5217SJeff Kirsher 743adfc5217SJeff Kirsher enet_writel(priv, val, ENET_RXCFG_REG); 744adfc5217SJeff Kirsher } 745adfc5217SJeff Kirsher 746adfc5217SJeff Kirsher /* 747adfc5217SJeff Kirsher * set mac duplex parameters 748adfc5217SJeff Kirsher */ 749adfc5217SJeff Kirsher static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex) 750adfc5217SJeff Kirsher { 751adfc5217SJeff Kirsher u32 val; 752adfc5217SJeff Kirsher 753adfc5217SJeff Kirsher val = enet_readl(priv, ENET_TXCTL_REG); 754adfc5217SJeff Kirsher if (fullduplex) 755adfc5217SJeff Kirsher val |= ENET_TXCTL_FD_MASK; 756adfc5217SJeff Kirsher else 757adfc5217SJeff Kirsher val &= ~ENET_TXCTL_FD_MASK; 758adfc5217SJeff Kirsher enet_writel(priv, val, ENET_TXCTL_REG); 759adfc5217SJeff Kirsher } 760adfc5217SJeff Kirsher 761adfc5217SJeff Kirsher /* 762adfc5217SJeff Kirsher * set mac flow control parameters 763adfc5217SJeff Kirsher */ 764adfc5217SJeff Kirsher static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en) 765adfc5217SJeff Kirsher { 766adfc5217SJeff Kirsher u32 val; 767adfc5217SJeff Kirsher 768adfc5217SJeff Kirsher /* rx flow control (pause frame handling) */ 769adfc5217SJeff Kirsher val = enet_readl(priv, ENET_RXCFG_REG); 770adfc5217SJeff Kirsher if (rx_en) 771adfc5217SJeff Kirsher val |= ENET_RXCFG_ENFLOW_MASK; 772adfc5217SJeff Kirsher else 773adfc5217SJeff Kirsher val &= ~ENET_RXCFG_ENFLOW_MASK; 774adfc5217SJeff Kirsher enet_writel(priv, val, ENET_RXCFG_REG); 775adfc5217SJeff Kirsher 7763dc6475cSFlorian Fainelli if (!priv->dma_has_sram) 7773dc6475cSFlorian Fainelli return; 7783dc6475cSFlorian Fainelli 779adfc5217SJeff Kirsher /* tx flow control (pause frame generation) */ 780adfc5217SJeff Kirsher val = enet_dma_readl(priv, ENETDMA_CFG_REG); 781adfc5217SJeff Kirsher if (tx_en) 782adfc5217SJeff Kirsher val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); 783adfc5217SJeff Kirsher else 784adfc5217SJeff Kirsher val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); 785adfc5217SJeff Kirsher enet_dma_writel(priv, val, ENETDMA_CFG_REG); 786adfc5217SJeff Kirsher } 787adfc5217SJeff Kirsher 788adfc5217SJeff Kirsher /* 789adfc5217SJeff Kirsher * link changed callback (from phylib) 790adfc5217SJeff Kirsher */ 791adfc5217SJeff Kirsher static void bcm_enet_adjust_phy_link(struct net_device *dev) 792adfc5217SJeff Kirsher { 793adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 794adfc5217SJeff Kirsher struct phy_device *phydev; 795adfc5217SJeff Kirsher int status_changed; 796adfc5217SJeff Kirsher 797adfc5217SJeff Kirsher priv = netdev_priv(dev); 798625eb866SPhilippe Reynes phydev = dev->phydev; 799adfc5217SJeff Kirsher status_changed = 0; 800adfc5217SJeff Kirsher 801adfc5217SJeff Kirsher if (priv->old_link != phydev->link) { 802adfc5217SJeff Kirsher status_changed = 1; 803adfc5217SJeff Kirsher priv->old_link = phydev->link; 804adfc5217SJeff Kirsher } 805adfc5217SJeff Kirsher 806adfc5217SJeff Kirsher /* reflect duplex change in mac configuration */ 807adfc5217SJeff Kirsher if (phydev->link && phydev->duplex != priv->old_duplex) { 808adfc5217SJeff Kirsher bcm_enet_set_duplex(priv, 809adfc5217SJeff Kirsher (phydev->duplex == DUPLEX_FULL) ? 1 : 0); 810adfc5217SJeff Kirsher status_changed = 1; 811adfc5217SJeff Kirsher priv->old_duplex = phydev->duplex; 812adfc5217SJeff Kirsher } 813adfc5217SJeff Kirsher 814adfc5217SJeff Kirsher /* enable flow control if remote advertise it (trust phylib to 815adfc5217SJeff Kirsher * check that duplex is full */ 816adfc5217SJeff Kirsher if (phydev->link && phydev->pause != priv->old_pause) { 817adfc5217SJeff Kirsher int rx_pause_en, tx_pause_en; 818adfc5217SJeff Kirsher 819adfc5217SJeff Kirsher if (phydev->pause) { 820adfc5217SJeff Kirsher /* pause was advertised by lpa and us */ 821adfc5217SJeff Kirsher rx_pause_en = 1; 822adfc5217SJeff Kirsher tx_pause_en = 1; 823adfc5217SJeff Kirsher } else if (!priv->pause_auto) { 82403671057SMasahiro Yamada /* pause setting overridden by user */ 825adfc5217SJeff Kirsher rx_pause_en = priv->pause_rx; 826adfc5217SJeff Kirsher tx_pause_en = priv->pause_tx; 827adfc5217SJeff Kirsher } else { 828adfc5217SJeff Kirsher rx_pause_en = 0; 829adfc5217SJeff Kirsher tx_pause_en = 0; 830adfc5217SJeff Kirsher } 831adfc5217SJeff Kirsher 832adfc5217SJeff Kirsher bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en); 833adfc5217SJeff Kirsher status_changed = 1; 834adfc5217SJeff Kirsher priv->old_pause = phydev->pause; 835adfc5217SJeff Kirsher } 836adfc5217SJeff Kirsher 837adfc5217SJeff Kirsher if (status_changed) { 838adfc5217SJeff Kirsher pr_info("%s: link %s", dev->name, phydev->link ? 839adfc5217SJeff Kirsher "UP" : "DOWN"); 840adfc5217SJeff Kirsher if (phydev->link) 841adfc5217SJeff Kirsher pr_cont(" - %d/%s - flow control %s", phydev->speed, 842adfc5217SJeff Kirsher DUPLEX_FULL == phydev->duplex ? "full" : "half", 843adfc5217SJeff Kirsher phydev->pause == 1 ? "rx&tx" : "off"); 844adfc5217SJeff Kirsher 845adfc5217SJeff Kirsher pr_cont("\n"); 846adfc5217SJeff Kirsher } 847adfc5217SJeff Kirsher } 848adfc5217SJeff Kirsher 849adfc5217SJeff Kirsher /* 850adfc5217SJeff Kirsher * link changed callback (if phylib is not used) 851adfc5217SJeff Kirsher */ 852adfc5217SJeff Kirsher static void bcm_enet_adjust_link(struct net_device *dev) 853adfc5217SJeff Kirsher { 854adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 855adfc5217SJeff Kirsher 856adfc5217SJeff Kirsher priv = netdev_priv(dev); 857adfc5217SJeff Kirsher bcm_enet_set_duplex(priv, priv->force_duplex_full); 858adfc5217SJeff Kirsher bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx); 859adfc5217SJeff Kirsher netif_carrier_on(dev); 860adfc5217SJeff Kirsher 861adfc5217SJeff Kirsher pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n", 862adfc5217SJeff Kirsher dev->name, 863adfc5217SJeff Kirsher priv->force_speed_100 ? 100 : 10, 864adfc5217SJeff Kirsher priv->force_duplex_full ? "full" : "half", 865adfc5217SJeff Kirsher priv->pause_rx ? "rx" : "off", 866adfc5217SJeff Kirsher priv->pause_tx ? "tx" : "off"); 867adfc5217SJeff Kirsher } 868adfc5217SJeff Kirsher 869d27de0efSSieng Piaw Liew static void bcm_enet_free_rx_buf_ring(struct device *kdev, struct bcm_enet_priv *priv) 8703d0b7265SSieng Piaw Liew { 8713d0b7265SSieng Piaw Liew int i; 8723d0b7265SSieng Piaw Liew 8733d0b7265SSieng Piaw Liew for (i = 0; i < priv->rx_ring_size; i++) { 8743d0b7265SSieng Piaw Liew struct bcm_enet_desc *desc; 8753d0b7265SSieng Piaw Liew 876d27de0efSSieng Piaw Liew if (!priv->rx_buf[i]) 8773d0b7265SSieng Piaw Liew continue; 8783d0b7265SSieng Piaw Liew 8793d0b7265SSieng Piaw Liew desc = &priv->rx_desc_cpu[i]; 880d27de0efSSieng Piaw Liew dma_unmap_single(kdev, desc->address, priv->rx_buf_size, 8813d0b7265SSieng Piaw Liew DMA_FROM_DEVICE); 882d27de0efSSieng Piaw Liew skb_free_frag(priv->rx_buf[i]); 8833d0b7265SSieng Piaw Liew } 884d27de0efSSieng Piaw Liew kfree(priv->rx_buf); 8853d0b7265SSieng Piaw Liew } 8863d0b7265SSieng Piaw Liew 887adfc5217SJeff Kirsher /* 888adfc5217SJeff Kirsher * open callback, allocate dma rings & buffers and start rx operation 889adfc5217SJeff Kirsher */ 890adfc5217SJeff Kirsher static int bcm_enet_open(struct net_device *dev) 891adfc5217SJeff Kirsher { 892adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 893adfc5217SJeff Kirsher struct sockaddr addr; 894adfc5217SJeff Kirsher struct device *kdev; 895adfc5217SJeff Kirsher struct phy_device *phydev; 896adfc5217SJeff Kirsher int i, ret; 897adfc5217SJeff Kirsher unsigned int size; 898adfc5217SJeff Kirsher char phy_id[MII_BUS_ID_SIZE + 3]; 899adfc5217SJeff Kirsher void *p; 900adfc5217SJeff Kirsher u32 val; 901adfc5217SJeff Kirsher 902adfc5217SJeff Kirsher priv = netdev_priv(dev); 903adfc5217SJeff Kirsher kdev = &priv->pdev->dev; 904adfc5217SJeff Kirsher 905adfc5217SJeff Kirsher if (priv->has_phy) { 906adfc5217SJeff Kirsher /* connect to PHY */ 907adfc5217SJeff Kirsher snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 908c56e9e2aSFlorian Fainelli priv->mii_bus->id, priv->phy_id); 909adfc5217SJeff Kirsher 910f9a8f83bSFlorian Fainelli phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 911adfc5217SJeff Kirsher PHY_INTERFACE_MODE_MII); 912adfc5217SJeff Kirsher 913adfc5217SJeff Kirsher if (IS_ERR(phydev)) { 914adfc5217SJeff Kirsher dev_err(kdev, "could not attach to PHY\n"); 915adfc5217SJeff Kirsher return PTR_ERR(phydev); 916adfc5217SJeff Kirsher } 917adfc5217SJeff Kirsher 918adfc5217SJeff Kirsher /* mask with MAC supported features */ 919c306ad36SAndrew Lunn phy_support_sym_pause(phydev); 92058056c1eSAndrew Lunn phy_set_max_speed(phydev, SPEED_100); 9210c122405SAndrew Lunn phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx, 9220c122405SAndrew Lunn priv->pause_auto); 923adfc5217SJeff Kirsher 9242220943aSAndrew Lunn phy_attached_info(phydev); 925adfc5217SJeff Kirsher 926adfc5217SJeff Kirsher priv->old_link = 0; 927adfc5217SJeff Kirsher priv->old_duplex = -1; 928adfc5217SJeff Kirsher priv->old_pause = -1; 929df384d43SArnd Bergmann } else { 930df384d43SArnd Bergmann phydev = NULL; 931adfc5217SJeff Kirsher } 932adfc5217SJeff Kirsher 933adfc5217SJeff Kirsher /* mask all interrupts and request them */ 934adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_IRMASK_REG); 9353dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 9363dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 937adfc5217SJeff Kirsher 938adfc5217SJeff Kirsher ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); 939adfc5217SJeff Kirsher if (ret) 940adfc5217SJeff Kirsher goto out_phy_disconnect; 941adfc5217SJeff Kirsher 942df9f1b9fSMichael Opdenacker ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0, 943adfc5217SJeff Kirsher dev->name, dev); 944adfc5217SJeff Kirsher if (ret) 945adfc5217SJeff Kirsher goto out_freeirq; 946adfc5217SJeff Kirsher 947adfc5217SJeff Kirsher ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, 948df9f1b9fSMichael Opdenacker 0, dev->name, dev); 949adfc5217SJeff Kirsher if (ret) 950adfc5217SJeff Kirsher goto out_freeirq_rx; 951adfc5217SJeff Kirsher 952adfc5217SJeff Kirsher /* initialize perfect match registers */ 953adfc5217SJeff Kirsher for (i = 0; i < 4; i++) { 954adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_PML_REG(i)); 955adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_PMH_REG(i)); 956adfc5217SJeff Kirsher } 957adfc5217SJeff Kirsher 958adfc5217SJeff Kirsher /* write device mac address */ 959adfc5217SJeff Kirsher memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN); 960adfc5217SJeff Kirsher bcm_enet_set_mac_address(dev, &addr); 961adfc5217SJeff Kirsher 962adfc5217SJeff Kirsher /* allocate rx dma ring */ 963adfc5217SJeff Kirsher size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 964750afb08SLuis Chamberlain p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 965adfc5217SJeff Kirsher if (!p) { 966adfc5217SJeff Kirsher ret = -ENOMEM; 967adfc5217SJeff Kirsher goto out_freeirq_tx; 968adfc5217SJeff Kirsher } 969adfc5217SJeff Kirsher 970adfc5217SJeff Kirsher priv->rx_desc_alloc_size = size; 971adfc5217SJeff Kirsher priv->rx_desc_cpu = p; 972adfc5217SJeff Kirsher 973adfc5217SJeff Kirsher /* allocate tx dma ring */ 974adfc5217SJeff Kirsher size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 975750afb08SLuis Chamberlain p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 976adfc5217SJeff Kirsher if (!p) { 977adfc5217SJeff Kirsher ret = -ENOMEM; 978adfc5217SJeff Kirsher goto out_free_rx_ring; 979adfc5217SJeff Kirsher } 980adfc5217SJeff Kirsher 981adfc5217SJeff Kirsher priv->tx_desc_alloc_size = size; 982adfc5217SJeff Kirsher priv->tx_desc_cpu = p; 983adfc5217SJeff Kirsher 984b2adaca9SJoe Perches priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *), 985adfc5217SJeff Kirsher GFP_KERNEL); 986adfc5217SJeff Kirsher if (!priv->tx_skb) { 987adfc5217SJeff Kirsher ret = -ENOMEM; 988adfc5217SJeff Kirsher goto out_free_tx_ring; 989adfc5217SJeff Kirsher } 990adfc5217SJeff Kirsher 991adfc5217SJeff Kirsher priv->tx_desc_count = priv->tx_ring_size; 992adfc5217SJeff Kirsher priv->tx_dirty_desc = 0; 993adfc5217SJeff Kirsher priv->tx_curr_desc = 0; 994adfc5217SJeff Kirsher spin_lock_init(&priv->tx_lock); 995adfc5217SJeff Kirsher 996d27de0efSSieng Piaw Liew /* init & fill rx ring with buffers */ 997d27de0efSSieng Piaw Liew priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *), 998adfc5217SJeff Kirsher GFP_KERNEL); 999d27de0efSSieng Piaw Liew if (!priv->rx_buf) { 1000adfc5217SJeff Kirsher ret = -ENOMEM; 1001adfc5217SJeff Kirsher goto out_free_tx_skb; 1002adfc5217SJeff Kirsher } 1003adfc5217SJeff Kirsher 1004adfc5217SJeff Kirsher priv->rx_desc_count = 0; 1005adfc5217SJeff Kirsher priv->rx_dirty_desc = 0; 1006adfc5217SJeff Kirsher priv->rx_curr_desc = 0; 1007adfc5217SJeff Kirsher 1008adfc5217SJeff Kirsher /* initialize flow control buffer allocation */ 10093dc6475cSFlorian Fainelli if (priv->dma_has_sram) 1010adfc5217SJeff Kirsher enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, 1011adfc5217SJeff Kirsher ENETDMA_BUFALLOC_REG(priv->rx_chan)); 10123dc6475cSFlorian Fainelli else 10133dc6475cSFlorian Fainelli enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, 10143dc6475cSFlorian Fainelli ENETDMAC_BUFALLOC, priv->rx_chan); 1015adfc5217SJeff Kirsher 1016d27de0efSSieng Piaw Liew if (bcm_enet_refill_rx(dev, false)) { 1017d27de0efSSieng Piaw Liew dev_err(kdev, "cannot allocate rx buffer queue\n"); 1018adfc5217SJeff Kirsher ret = -ENOMEM; 1019adfc5217SJeff Kirsher goto out; 1020adfc5217SJeff Kirsher } 1021adfc5217SJeff Kirsher 1022adfc5217SJeff Kirsher /* write rx & tx ring addresses */ 10233dc6475cSFlorian Fainelli if (priv->dma_has_sram) { 10240ae99b5fSMaxime Bizon enet_dmas_writel(priv, priv->rx_desc_dma, 10253dc6475cSFlorian Fainelli ENETDMAS_RSTART_REG, priv->rx_chan); 10260ae99b5fSMaxime Bizon enet_dmas_writel(priv, priv->tx_desc_dma, 10273dc6475cSFlorian Fainelli ENETDMAS_RSTART_REG, priv->tx_chan); 10283dc6475cSFlorian Fainelli } else { 10293dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->rx_desc_dma, 10303dc6475cSFlorian Fainelli ENETDMAC_RSTART, priv->rx_chan); 10313dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->tx_desc_dma, 10323dc6475cSFlorian Fainelli ENETDMAC_RSTART, priv->tx_chan); 10333dc6475cSFlorian Fainelli } 1034adfc5217SJeff Kirsher 1035adfc5217SJeff Kirsher /* clear remaining state ram for rx & tx channel */ 10363dc6475cSFlorian Fainelli if (priv->dma_has_sram) { 10373dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); 10383dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); 10393dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); 10403dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); 10413dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); 10423dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); 10433dc6475cSFlorian Fainelli } else { 10443dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan); 10453dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan); 10463dc6475cSFlorian Fainelli } 1047adfc5217SJeff Kirsher 1048adfc5217SJeff Kirsher /* set max rx/tx length */ 1049adfc5217SJeff Kirsher enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); 1050adfc5217SJeff Kirsher enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); 1051adfc5217SJeff Kirsher 1052adfc5217SJeff Kirsher /* set dma maximum burst len */ 10536f00a022SMaxime Bizon enet_dmac_writel(priv, priv->dma_maxburst, 10543dc6475cSFlorian Fainelli ENETDMAC_MAXBURST, priv->rx_chan); 10556f00a022SMaxime Bizon enet_dmac_writel(priv, priv->dma_maxburst, 10563dc6475cSFlorian Fainelli ENETDMAC_MAXBURST, priv->tx_chan); 1057adfc5217SJeff Kirsher 1058adfc5217SJeff Kirsher /* set correct transmit fifo watermark */ 1059adfc5217SJeff Kirsher enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); 1060adfc5217SJeff Kirsher 1061adfc5217SJeff Kirsher /* set flow control low/high threshold to 1/3 / 2/3 */ 10623dc6475cSFlorian Fainelli if (priv->dma_has_sram) { 1063adfc5217SJeff Kirsher val = priv->rx_ring_size / 3; 1064adfc5217SJeff Kirsher enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); 1065adfc5217SJeff Kirsher val = (priv->rx_ring_size * 2) / 3; 1066adfc5217SJeff Kirsher enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); 10673dc6475cSFlorian Fainelli } else { 10683dc6475cSFlorian Fainelli enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan); 10693dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan); 10703dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan); 10713dc6475cSFlorian Fainelli } 1072adfc5217SJeff Kirsher 1073adfc5217SJeff Kirsher /* all set, enable mac and interrupts, start dma engine and 1074adfc5217SJeff Kirsher * kick rx dma channel */ 1075adfc5217SJeff Kirsher wmb(); 1076adfc5217SJeff Kirsher val = enet_readl(priv, ENET_CTL_REG); 1077adfc5217SJeff Kirsher val |= ENET_CTL_ENABLE_MASK; 1078adfc5217SJeff Kirsher enet_writel(priv, val, ENET_CTL_REG); 1079d6213c1fSJonas Gorski if (priv->dma_has_sram) 1080adfc5217SJeff Kirsher enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); 10813dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_en_mask, 10823dc6475cSFlorian Fainelli ENETDMAC_CHANCFG, priv->rx_chan); 1083adfc5217SJeff Kirsher 1084adfc5217SJeff Kirsher /* watch "mib counters about to overflow" interrupt */ 1085adfc5217SJeff Kirsher enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); 1086adfc5217SJeff Kirsher enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); 1087adfc5217SJeff Kirsher 1088adfc5217SJeff Kirsher /* watch "packet transferred" interrupt in rx and tx */ 10893dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 10903dc6475cSFlorian Fainelli ENETDMAC_IR, priv->rx_chan); 10913dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 10923dc6475cSFlorian Fainelli ENETDMAC_IR, priv->tx_chan); 1093adfc5217SJeff Kirsher 1094adfc5217SJeff Kirsher /* make sure we enable napi before rx interrupt */ 1095adfc5217SJeff Kirsher napi_enable(&priv->napi); 1096adfc5217SJeff Kirsher 10973dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 10983dc6475cSFlorian Fainelli ENETDMAC_IRMASK, priv->rx_chan); 10993dc6475cSFlorian Fainelli enet_dmac_writel(priv, priv->dma_chan_int_mask, 11003dc6475cSFlorian Fainelli ENETDMAC_IRMASK, priv->tx_chan); 1101adfc5217SJeff Kirsher 1102df384d43SArnd Bergmann if (phydev) 1103625eb866SPhilippe Reynes phy_start(phydev); 1104adfc5217SJeff Kirsher else 1105adfc5217SJeff Kirsher bcm_enet_adjust_link(dev); 1106adfc5217SJeff Kirsher 1107adfc5217SJeff Kirsher netif_start_queue(dev); 1108adfc5217SJeff Kirsher return 0; 1109adfc5217SJeff Kirsher 1110adfc5217SJeff Kirsher out: 1111d27de0efSSieng Piaw Liew bcm_enet_free_rx_buf_ring(kdev, priv); 1112adfc5217SJeff Kirsher 1113adfc5217SJeff Kirsher out_free_tx_skb: 1114adfc5217SJeff Kirsher kfree(priv->tx_skb); 1115adfc5217SJeff Kirsher 1116adfc5217SJeff Kirsher out_free_tx_ring: 1117adfc5217SJeff Kirsher dma_free_coherent(kdev, priv->tx_desc_alloc_size, 1118adfc5217SJeff Kirsher priv->tx_desc_cpu, priv->tx_desc_dma); 1119adfc5217SJeff Kirsher 1120adfc5217SJeff Kirsher out_free_rx_ring: 1121adfc5217SJeff Kirsher dma_free_coherent(kdev, priv->rx_desc_alloc_size, 1122adfc5217SJeff Kirsher priv->rx_desc_cpu, priv->rx_desc_dma); 1123adfc5217SJeff Kirsher 1124adfc5217SJeff Kirsher out_freeirq_tx: 1125adfc5217SJeff Kirsher free_irq(priv->irq_tx, dev); 1126adfc5217SJeff Kirsher 1127adfc5217SJeff Kirsher out_freeirq_rx: 1128adfc5217SJeff Kirsher free_irq(priv->irq_rx, dev); 1129adfc5217SJeff Kirsher 1130adfc5217SJeff Kirsher out_freeirq: 1131adfc5217SJeff Kirsher free_irq(dev->irq, dev); 1132adfc5217SJeff Kirsher 1133adfc5217SJeff Kirsher out_phy_disconnect: 1134df384d43SArnd Bergmann if (phydev) 1135625eb866SPhilippe Reynes phy_disconnect(phydev); 1136adfc5217SJeff Kirsher 1137adfc5217SJeff Kirsher return ret; 1138adfc5217SJeff Kirsher } 1139adfc5217SJeff Kirsher 1140adfc5217SJeff Kirsher /* 1141adfc5217SJeff Kirsher * disable mac 1142adfc5217SJeff Kirsher */ 1143adfc5217SJeff Kirsher static void bcm_enet_disable_mac(struct bcm_enet_priv *priv) 1144adfc5217SJeff Kirsher { 1145adfc5217SJeff Kirsher int limit; 1146adfc5217SJeff Kirsher u32 val; 1147adfc5217SJeff Kirsher 1148adfc5217SJeff Kirsher val = enet_readl(priv, ENET_CTL_REG); 1149adfc5217SJeff Kirsher val |= ENET_CTL_DISABLE_MASK; 1150adfc5217SJeff Kirsher enet_writel(priv, val, ENET_CTL_REG); 1151adfc5217SJeff Kirsher 1152adfc5217SJeff Kirsher limit = 1000; 1153adfc5217SJeff Kirsher do { 1154adfc5217SJeff Kirsher u32 val; 1155adfc5217SJeff Kirsher 1156adfc5217SJeff Kirsher val = enet_readl(priv, ENET_CTL_REG); 1157adfc5217SJeff Kirsher if (!(val & ENET_CTL_DISABLE_MASK)) 1158adfc5217SJeff Kirsher break; 1159adfc5217SJeff Kirsher udelay(1); 1160adfc5217SJeff Kirsher } while (limit--); 1161adfc5217SJeff Kirsher } 1162adfc5217SJeff Kirsher 1163adfc5217SJeff Kirsher /* 1164adfc5217SJeff Kirsher * disable dma in given channel 1165adfc5217SJeff Kirsher */ 1166adfc5217SJeff Kirsher static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan) 1167adfc5217SJeff Kirsher { 1168adfc5217SJeff Kirsher int limit; 1169adfc5217SJeff Kirsher 11703dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan); 1171adfc5217SJeff Kirsher 1172adfc5217SJeff Kirsher limit = 1000; 1173adfc5217SJeff Kirsher do { 1174adfc5217SJeff Kirsher u32 val; 1175adfc5217SJeff Kirsher 11763dc6475cSFlorian Fainelli val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan); 11770ae99b5fSMaxime Bizon if (!(val & ENETDMAC_CHANCFG_EN_MASK)) 1178adfc5217SJeff Kirsher break; 1179adfc5217SJeff Kirsher udelay(1); 1180adfc5217SJeff Kirsher } while (limit--); 1181adfc5217SJeff Kirsher } 1182adfc5217SJeff Kirsher 1183adfc5217SJeff Kirsher /* 1184adfc5217SJeff Kirsher * stop callback 1185adfc5217SJeff Kirsher */ 1186adfc5217SJeff Kirsher static int bcm_enet_stop(struct net_device *dev) 1187adfc5217SJeff Kirsher { 1188adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1189adfc5217SJeff Kirsher struct device *kdev; 1190adfc5217SJeff Kirsher 1191adfc5217SJeff Kirsher priv = netdev_priv(dev); 1192adfc5217SJeff Kirsher kdev = &priv->pdev->dev; 1193adfc5217SJeff Kirsher 1194adfc5217SJeff Kirsher netif_stop_queue(dev); 1195adfc5217SJeff Kirsher napi_disable(&priv->napi); 1196adfc5217SJeff Kirsher if (priv->has_phy) 1197625eb866SPhilippe Reynes phy_stop(dev->phydev); 1198adfc5217SJeff Kirsher del_timer_sync(&priv->rx_timeout); 1199adfc5217SJeff Kirsher 1200adfc5217SJeff Kirsher /* mask all interrupts */ 1201adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_IRMASK_REG); 12023dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 12033dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 1204adfc5217SJeff Kirsher 1205adfc5217SJeff Kirsher /* make sure no mib update is scheduled */ 1206adfc5217SJeff Kirsher cancel_work_sync(&priv->mib_update_task); 1207adfc5217SJeff Kirsher 1208adfc5217SJeff Kirsher /* disable dma & mac */ 1209adfc5217SJeff Kirsher bcm_enet_disable_dma(priv, priv->tx_chan); 1210adfc5217SJeff Kirsher bcm_enet_disable_dma(priv, priv->rx_chan); 1211adfc5217SJeff Kirsher bcm_enet_disable_mac(priv); 1212adfc5217SJeff Kirsher 1213adfc5217SJeff Kirsher /* force reclaim of all tx buffers */ 1214adfc5217SJeff Kirsher bcm_enet_tx_reclaim(dev, 1); 1215adfc5217SJeff Kirsher 1216d27de0efSSieng Piaw Liew /* free the rx buffer ring */ 1217d27de0efSSieng Piaw Liew bcm_enet_free_rx_buf_ring(kdev, priv); 1218adfc5217SJeff Kirsher 1219adfc5217SJeff Kirsher /* free remaining allocated memory */ 1220adfc5217SJeff Kirsher kfree(priv->tx_skb); 1221adfc5217SJeff Kirsher dma_free_coherent(kdev, priv->rx_desc_alloc_size, 1222adfc5217SJeff Kirsher priv->rx_desc_cpu, priv->rx_desc_dma); 1223adfc5217SJeff Kirsher dma_free_coherent(kdev, priv->tx_desc_alloc_size, 1224adfc5217SJeff Kirsher priv->tx_desc_cpu, priv->tx_desc_dma); 1225adfc5217SJeff Kirsher free_irq(priv->irq_tx, dev); 1226adfc5217SJeff Kirsher free_irq(priv->irq_rx, dev); 1227adfc5217SJeff Kirsher free_irq(dev->irq, dev); 1228adfc5217SJeff Kirsher 1229adfc5217SJeff Kirsher /* release phy */ 1230625eb866SPhilippe Reynes if (priv->has_phy) 1231625eb866SPhilippe Reynes phy_disconnect(dev->phydev); 1232adfc5217SJeff Kirsher 12339bc1ef64SSieng Piaw Liew /* reset BQL after forced tx reclaim to prevent kernel panic */ 12349bc1ef64SSieng Piaw Liew netdev_reset_queue(dev); 12359bc1ef64SSieng Piaw Liew 1236adfc5217SJeff Kirsher return 0; 1237adfc5217SJeff Kirsher } 1238adfc5217SJeff Kirsher 1239adfc5217SJeff Kirsher /* 1240adfc5217SJeff Kirsher * ethtool callbacks 1241adfc5217SJeff Kirsher */ 1242adfc5217SJeff Kirsher struct bcm_enet_stats { 1243adfc5217SJeff Kirsher char stat_string[ETH_GSTRING_LEN]; 1244adfc5217SJeff Kirsher int sizeof_stat; 1245adfc5217SJeff Kirsher int stat_offset; 1246adfc5217SJeff Kirsher int mib_reg; 1247adfc5217SJeff Kirsher }; 1248adfc5217SJeff Kirsher 1249adfc5217SJeff Kirsher #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \ 1250adfc5217SJeff Kirsher offsetof(struct bcm_enet_priv, m) 1251adfc5217SJeff Kirsher #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \ 1252adfc5217SJeff Kirsher offsetof(struct net_device_stats, m) 1253adfc5217SJeff Kirsher 1254adfc5217SJeff Kirsher static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = { 1255adfc5217SJeff Kirsher { "rx_packets", DEV_STAT(rx_packets), -1 }, 1256adfc5217SJeff Kirsher { "tx_packets", DEV_STAT(tx_packets), -1 }, 1257adfc5217SJeff Kirsher { "rx_bytes", DEV_STAT(rx_bytes), -1 }, 1258adfc5217SJeff Kirsher { "tx_bytes", DEV_STAT(tx_bytes), -1 }, 1259adfc5217SJeff Kirsher { "rx_errors", DEV_STAT(rx_errors), -1 }, 1260adfc5217SJeff Kirsher { "tx_errors", DEV_STAT(tx_errors), -1 }, 1261adfc5217SJeff Kirsher { "rx_dropped", DEV_STAT(rx_dropped), -1 }, 1262adfc5217SJeff Kirsher { "tx_dropped", DEV_STAT(tx_dropped), -1 }, 1263adfc5217SJeff Kirsher 1264adfc5217SJeff Kirsher { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS}, 1265adfc5217SJeff Kirsher { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS }, 1266adfc5217SJeff Kirsher { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST }, 1267adfc5217SJeff Kirsher { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT }, 1268adfc5217SJeff Kirsher { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 }, 1269adfc5217SJeff Kirsher { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 }, 1270adfc5217SJeff Kirsher { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 }, 1271adfc5217SJeff Kirsher { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 }, 1272adfc5217SJeff Kirsher { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 }, 1273adfc5217SJeff Kirsher { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX }, 1274adfc5217SJeff Kirsher { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB }, 1275adfc5217SJeff Kirsher { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR }, 1276adfc5217SJeff Kirsher { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG }, 1277adfc5217SJeff Kirsher { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP }, 1278adfc5217SJeff Kirsher { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN }, 1279adfc5217SJeff Kirsher { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND }, 1280adfc5217SJeff Kirsher { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC }, 1281adfc5217SJeff Kirsher { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN }, 1282adfc5217SJeff Kirsher { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM }, 1283adfc5217SJeff Kirsher { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE }, 1284adfc5217SJeff Kirsher { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL }, 1285adfc5217SJeff Kirsher 1286adfc5217SJeff Kirsher { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS }, 1287adfc5217SJeff Kirsher { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS }, 1288adfc5217SJeff Kirsher { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST }, 1289adfc5217SJeff Kirsher { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT }, 1290adfc5217SJeff Kirsher { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 }, 1291adfc5217SJeff Kirsher { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 }, 1292adfc5217SJeff Kirsher { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 }, 1293adfc5217SJeff Kirsher { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 }, 1294adfc5217SJeff Kirsher { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023}, 1295adfc5217SJeff Kirsher { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX }, 1296adfc5217SJeff Kirsher { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB }, 1297adfc5217SJeff Kirsher { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR }, 1298adfc5217SJeff Kirsher { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG }, 1299adfc5217SJeff Kirsher { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN }, 1300adfc5217SJeff Kirsher { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL }, 1301adfc5217SJeff Kirsher { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL }, 1302adfc5217SJeff Kirsher { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL }, 1303adfc5217SJeff Kirsher { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL }, 1304adfc5217SJeff Kirsher { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE }, 1305adfc5217SJeff Kirsher { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF }, 1306adfc5217SJeff Kirsher { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS }, 1307adfc5217SJeff Kirsher { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE }, 1308adfc5217SJeff Kirsher 1309adfc5217SJeff Kirsher }; 1310adfc5217SJeff Kirsher 13116afc0d7aSTobias Klauser #define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats) 1312adfc5217SJeff Kirsher 1313adfc5217SJeff Kirsher static const u32 unused_mib_regs[] = { 1314adfc5217SJeff Kirsher ETH_MIB_TX_ALL_OCTETS, 1315adfc5217SJeff Kirsher ETH_MIB_TX_ALL_PKTS, 1316adfc5217SJeff Kirsher ETH_MIB_RX_ALL_OCTETS, 1317adfc5217SJeff Kirsher ETH_MIB_RX_ALL_PKTS, 1318adfc5217SJeff Kirsher }; 1319adfc5217SJeff Kirsher 1320adfc5217SJeff Kirsher 1321adfc5217SJeff Kirsher static void bcm_enet_get_drvinfo(struct net_device *netdev, 1322adfc5217SJeff Kirsher struct ethtool_drvinfo *drvinfo) 1323adfc5217SJeff Kirsher { 13247826d43fSJiri Pirko strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver)); 13257826d43fSJiri Pirko strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info)); 1326adfc5217SJeff Kirsher } 1327adfc5217SJeff Kirsher 1328adfc5217SJeff Kirsher static int bcm_enet_get_sset_count(struct net_device *netdev, 1329adfc5217SJeff Kirsher int string_set) 1330adfc5217SJeff Kirsher { 1331adfc5217SJeff Kirsher switch (string_set) { 1332adfc5217SJeff Kirsher case ETH_SS_STATS: 1333adfc5217SJeff Kirsher return BCM_ENET_STATS_LEN; 1334adfc5217SJeff Kirsher default: 1335adfc5217SJeff Kirsher return -EINVAL; 1336adfc5217SJeff Kirsher } 1337adfc5217SJeff Kirsher } 1338adfc5217SJeff Kirsher 1339adfc5217SJeff Kirsher static void bcm_enet_get_strings(struct net_device *netdev, 1340adfc5217SJeff Kirsher u32 stringset, u8 *data) 1341adfc5217SJeff Kirsher { 1342adfc5217SJeff Kirsher int i; 1343adfc5217SJeff Kirsher 1344adfc5217SJeff Kirsher switch (stringset) { 1345adfc5217SJeff Kirsher case ETH_SS_STATS: 1346adfc5217SJeff Kirsher for (i = 0; i < BCM_ENET_STATS_LEN; i++) { 1347adfc5217SJeff Kirsher memcpy(data + i * ETH_GSTRING_LEN, 1348adfc5217SJeff Kirsher bcm_enet_gstrings_stats[i].stat_string, 1349adfc5217SJeff Kirsher ETH_GSTRING_LEN); 1350adfc5217SJeff Kirsher } 1351adfc5217SJeff Kirsher break; 1352adfc5217SJeff Kirsher } 1353adfc5217SJeff Kirsher } 1354adfc5217SJeff Kirsher 1355adfc5217SJeff Kirsher static void update_mib_counters(struct bcm_enet_priv *priv) 1356adfc5217SJeff Kirsher { 1357adfc5217SJeff Kirsher int i; 1358adfc5217SJeff Kirsher 1359adfc5217SJeff Kirsher for (i = 0; i < BCM_ENET_STATS_LEN; i++) { 1360adfc5217SJeff Kirsher const struct bcm_enet_stats *s; 1361adfc5217SJeff Kirsher u32 val; 1362adfc5217SJeff Kirsher char *p; 1363adfc5217SJeff Kirsher 1364adfc5217SJeff Kirsher s = &bcm_enet_gstrings_stats[i]; 1365adfc5217SJeff Kirsher if (s->mib_reg == -1) 1366adfc5217SJeff Kirsher continue; 1367adfc5217SJeff Kirsher 1368adfc5217SJeff Kirsher val = enet_readl(priv, ENET_MIB_REG(s->mib_reg)); 1369adfc5217SJeff Kirsher p = (char *)priv + s->stat_offset; 1370adfc5217SJeff Kirsher 1371adfc5217SJeff Kirsher if (s->sizeof_stat == sizeof(u64)) 1372adfc5217SJeff Kirsher *(u64 *)p += val; 1373adfc5217SJeff Kirsher else 1374adfc5217SJeff Kirsher *(u32 *)p += val; 1375adfc5217SJeff Kirsher } 1376adfc5217SJeff Kirsher 1377adfc5217SJeff Kirsher /* also empty unused mib counters to make sure mib counter 1378adfc5217SJeff Kirsher * overflow interrupt is cleared */ 1379adfc5217SJeff Kirsher for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++) 1380adfc5217SJeff Kirsher (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i])); 1381adfc5217SJeff Kirsher } 1382adfc5217SJeff Kirsher 1383adfc5217SJeff Kirsher static void bcm_enet_update_mib_counters_defer(struct work_struct *t) 1384adfc5217SJeff Kirsher { 1385adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1386adfc5217SJeff Kirsher 1387adfc5217SJeff Kirsher priv = container_of(t, struct bcm_enet_priv, mib_update_task); 1388adfc5217SJeff Kirsher mutex_lock(&priv->mib_update_lock); 1389adfc5217SJeff Kirsher update_mib_counters(priv); 1390adfc5217SJeff Kirsher mutex_unlock(&priv->mib_update_lock); 1391adfc5217SJeff Kirsher 1392adfc5217SJeff Kirsher /* reenable mib interrupt */ 1393adfc5217SJeff Kirsher if (netif_running(priv->net_dev)) 1394adfc5217SJeff Kirsher enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); 1395adfc5217SJeff Kirsher } 1396adfc5217SJeff Kirsher 1397adfc5217SJeff Kirsher static void bcm_enet_get_ethtool_stats(struct net_device *netdev, 1398adfc5217SJeff Kirsher struct ethtool_stats *stats, 1399adfc5217SJeff Kirsher u64 *data) 1400adfc5217SJeff Kirsher { 1401adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1402adfc5217SJeff Kirsher int i; 1403adfc5217SJeff Kirsher 1404adfc5217SJeff Kirsher priv = netdev_priv(netdev); 1405adfc5217SJeff Kirsher 1406adfc5217SJeff Kirsher mutex_lock(&priv->mib_update_lock); 1407adfc5217SJeff Kirsher update_mib_counters(priv); 1408adfc5217SJeff Kirsher 1409adfc5217SJeff Kirsher for (i = 0; i < BCM_ENET_STATS_LEN; i++) { 1410adfc5217SJeff Kirsher const struct bcm_enet_stats *s; 1411adfc5217SJeff Kirsher char *p; 1412adfc5217SJeff Kirsher 1413adfc5217SJeff Kirsher s = &bcm_enet_gstrings_stats[i]; 1414adfc5217SJeff Kirsher if (s->mib_reg == -1) 1415adfc5217SJeff Kirsher p = (char *)&netdev->stats; 1416adfc5217SJeff Kirsher else 1417adfc5217SJeff Kirsher p = (char *)priv; 1418adfc5217SJeff Kirsher p += s->stat_offset; 1419adfc5217SJeff Kirsher data[i] = (s->sizeof_stat == sizeof(u64)) ? 1420adfc5217SJeff Kirsher *(u64 *)p : *(u32 *)p; 1421adfc5217SJeff Kirsher } 1422adfc5217SJeff Kirsher mutex_unlock(&priv->mib_update_lock); 1423adfc5217SJeff Kirsher } 1424adfc5217SJeff Kirsher 14257260aac9SMaxime Bizon static int bcm_enet_nway_reset(struct net_device *dev) 14267260aac9SMaxime Bizon { 14277260aac9SMaxime Bizon struct bcm_enet_priv *priv; 14287260aac9SMaxime Bizon 14297260aac9SMaxime Bizon priv = netdev_priv(dev); 143042469bf5SFlorian Fainelli if (priv->has_phy) 14310fa1dfd6SFlorian Fainelli return phy_ethtool_nway_reset(dev); 14327260aac9SMaxime Bizon 14337260aac9SMaxime Bizon return -EOPNOTSUPP; 14347260aac9SMaxime Bizon } 14357260aac9SMaxime Bizon 1436639cfa9eSPhilippe Reynes static int bcm_enet_get_link_ksettings(struct net_device *dev, 1437639cfa9eSPhilippe Reynes struct ethtool_link_ksettings *cmd) 1438adfc5217SJeff Kirsher { 1439adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1440639cfa9eSPhilippe Reynes u32 supported, advertising; 1441adfc5217SJeff Kirsher 1442adfc5217SJeff Kirsher priv = netdev_priv(dev); 1443adfc5217SJeff Kirsher 1444adfc5217SJeff Kirsher if (priv->has_phy) { 1445625eb866SPhilippe Reynes if (!dev->phydev) 1446adfc5217SJeff Kirsher return -ENODEV; 14475514174fSyuval.shaia@oracle.com 14485514174fSyuval.shaia@oracle.com phy_ethtool_ksettings_get(dev->phydev, cmd); 14495514174fSyuval.shaia@oracle.com 14505514174fSyuval.shaia@oracle.com return 0; 1451adfc5217SJeff Kirsher } else { 1452639cfa9eSPhilippe Reynes cmd->base.autoneg = 0; 1453639cfa9eSPhilippe Reynes cmd->base.speed = (priv->force_speed_100) ? 1454639cfa9eSPhilippe Reynes SPEED_100 : SPEED_10; 1455639cfa9eSPhilippe Reynes cmd->base.duplex = (priv->force_duplex_full) ? 1456adfc5217SJeff Kirsher DUPLEX_FULL : DUPLEX_HALF; 1457639cfa9eSPhilippe Reynes supported = ADVERTISED_10baseT_Half | 1458adfc5217SJeff Kirsher ADVERTISED_10baseT_Full | 1459adfc5217SJeff Kirsher ADVERTISED_100baseT_Half | 1460adfc5217SJeff Kirsher ADVERTISED_100baseT_Full; 1461639cfa9eSPhilippe Reynes advertising = 0; 1462639cfa9eSPhilippe Reynes ethtool_convert_legacy_u32_to_link_mode( 1463639cfa9eSPhilippe Reynes cmd->link_modes.supported, supported); 1464639cfa9eSPhilippe Reynes ethtool_convert_legacy_u32_to_link_mode( 1465639cfa9eSPhilippe Reynes cmd->link_modes.advertising, advertising); 1466639cfa9eSPhilippe Reynes cmd->base.port = PORT_MII; 1467adfc5217SJeff Kirsher } 1468adfc5217SJeff Kirsher return 0; 1469adfc5217SJeff Kirsher } 1470adfc5217SJeff Kirsher 1471639cfa9eSPhilippe Reynes static int bcm_enet_set_link_ksettings(struct net_device *dev, 1472639cfa9eSPhilippe Reynes const struct ethtool_link_ksettings *cmd) 1473adfc5217SJeff Kirsher { 1474adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1475adfc5217SJeff Kirsher 1476adfc5217SJeff Kirsher priv = netdev_priv(dev); 1477adfc5217SJeff Kirsher if (priv->has_phy) { 1478625eb866SPhilippe Reynes if (!dev->phydev) 1479adfc5217SJeff Kirsher return -ENODEV; 1480639cfa9eSPhilippe Reynes return phy_ethtool_ksettings_set(dev->phydev, cmd); 1481adfc5217SJeff Kirsher } else { 1482adfc5217SJeff Kirsher 1483639cfa9eSPhilippe Reynes if (cmd->base.autoneg || 1484639cfa9eSPhilippe Reynes (cmd->base.speed != SPEED_100 && 1485639cfa9eSPhilippe Reynes cmd->base.speed != SPEED_10) || 1486639cfa9eSPhilippe Reynes cmd->base.port != PORT_MII) 1487adfc5217SJeff Kirsher return -EINVAL; 1488adfc5217SJeff Kirsher 1489639cfa9eSPhilippe Reynes priv->force_speed_100 = 1490639cfa9eSPhilippe Reynes (cmd->base.speed == SPEED_100) ? 1 : 0; 1491639cfa9eSPhilippe Reynes priv->force_duplex_full = 1492639cfa9eSPhilippe Reynes (cmd->base.duplex == DUPLEX_FULL) ? 1 : 0; 1493adfc5217SJeff Kirsher 1494adfc5217SJeff Kirsher if (netif_running(dev)) 1495adfc5217SJeff Kirsher bcm_enet_adjust_link(dev); 1496adfc5217SJeff Kirsher return 0; 1497adfc5217SJeff Kirsher } 1498adfc5217SJeff Kirsher } 1499adfc5217SJeff Kirsher 150074624944SHao Chen static void 150174624944SHao Chen bcm_enet_get_ringparam(struct net_device *dev, 150274624944SHao Chen struct ethtool_ringparam *ering, 150374624944SHao Chen struct kernel_ethtool_ringparam *kernel_ering, 150474624944SHao Chen struct netlink_ext_ack *extack) 1505adfc5217SJeff Kirsher { 1506adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1507adfc5217SJeff Kirsher 1508adfc5217SJeff Kirsher priv = netdev_priv(dev); 1509adfc5217SJeff Kirsher 1510adfc5217SJeff Kirsher /* rx/tx ring is actually only limited by memory */ 1511adfc5217SJeff Kirsher ering->rx_max_pending = 8192; 1512adfc5217SJeff Kirsher ering->tx_max_pending = 8192; 1513adfc5217SJeff Kirsher ering->rx_pending = priv->rx_ring_size; 1514adfc5217SJeff Kirsher ering->tx_pending = priv->tx_ring_size; 1515adfc5217SJeff Kirsher } 1516adfc5217SJeff Kirsher 1517adfc5217SJeff Kirsher static int bcm_enet_set_ringparam(struct net_device *dev, 151874624944SHao Chen struct ethtool_ringparam *ering, 151974624944SHao Chen struct kernel_ethtool_ringparam *kernel_ering, 152074624944SHao Chen struct netlink_ext_ack *extack) 1521adfc5217SJeff Kirsher { 1522adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1523adfc5217SJeff Kirsher int was_running; 1524adfc5217SJeff Kirsher 1525adfc5217SJeff Kirsher priv = netdev_priv(dev); 1526adfc5217SJeff Kirsher 1527adfc5217SJeff Kirsher was_running = 0; 1528adfc5217SJeff Kirsher if (netif_running(dev)) { 1529adfc5217SJeff Kirsher bcm_enet_stop(dev); 1530adfc5217SJeff Kirsher was_running = 1; 1531adfc5217SJeff Kirsher } 1532adfc5217SJeff Kirsher 1533adfc5217SJeff Kirsher priv->rx_ring_size = ering->rx_pending; 1534adfc5217SJeff Kirsher priv->tx_ring_size = ering->tx_pending; 1535adfc5217SJeff Kirsher 1536adfc5217SJeff Kirsher if (was_running) { 1537adfc5217SJeff Kirsher int err; 1538adfc5217SJeff Kirsher 1539adfc5217SJeff Kirsher err = bcm_enet_open(dev); 1540adfc5217SJeff Kirsher if (err) 1541adfc5217SJeff Kirsher dev_close(dev); 1542adfc5217SJeff Kirsher else 1543adfc5217SJeff Kirsher bcm_enet_set_multicast_list(dev); 1544adfc5217SJeff Kirsher } 1545adfc5217SJeff Kirsher return 0; 1546adfc5217SJeff Kirsher } 1547adfc5217SJeff Kirsher 1548adfc5217SJeff Kirsher static void bcm_enet_get_pauseparam(struct net_device *dev, 1549adfc5217SJeff Kirsher struct ethtool_pauseparam *ecmd) 1550adfc5217SJeff Kirsher { 1551adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1552adfc5217SJeff Kirsher 1553adfc5217SJeff Kirsher priv = netdev_priv(dev); 1554adfc5217SJeff Kirsher ecmd->autoneg = priv->pause_auto; 1555adfc5217SJeff Kirsher ecmd->rx_pause = priv->pause_rx; 1556adfc5217SJeff Kirsher ecmd->tx_pause = priv->pause_tx; 1557adfc5217SJeff Kirsher } 1558adfc5217SJeff Kirsher 1559adfc5217SJeff Kirsher static int bcm_enet_set_pauseparam(struct net_device *dev, 1560adfc5217SJeff Kirsher struct ethtool_pauseparam *ecmd) 1561adfc5217SJeff Kirsher { 1562adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1563adfc5217SJeff Kirsher 1564adfc5217SJeff Kirsher priv = netdev_priv(dev); 1565adfc5217SJeff Kirsher 1566adfc5217SJeff Kirsher if (priv->has_phy) { 1567adfc5217SJeff Kirsher if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) { 1568adfc5217SJeff Kirsher /* asymetric pause mode not supported, 1569adfc5217SJeff Kirsher * actually possible but integrated PHY has RO 1570adfc5217SJeff Kirsher * asym_pause bit */ 1571adfc5217SJeff Kirsher return -EINVAL; 1572adfc5217SJeff Kirsher } 1573adfc5217SJeff Kirsher } else { 1574adfc5217SJeff Kirsher /* no pause autoneg on direct mii connection */ 1575adfc5217SJeff Kirsher if (ecmd->autoneg) 1576adfc5217SJeff Kirsher return -EINVAL; 1577adfc5217SJeff Kirsher } 1578adfc5217SJeff Kirsher 1579adfc5217SJeff Kirsher priv->pause_auto = ecmd->autoneg; 1580adfc5217SJeff Kirsher priv->pause_rx = ecmd->rx_pause; 1581adfc5217SJeff Kirsher priv->pause_tx = ecmd->tx_pause; 1582adfc5217SJeff Kirsher 1583adfc5217SJeff Kirsher return 0; 1584adfc5217SJeff Kirsher } 1585adfc5217SJeff Kirsher 15861aff0cbeSstephen hemminger static const struct ethtool_ops bcm_enet_ethtool_ops = { 1587adfc5217SJeff Kirsher .get_strings = bcm_enet_get_strings, 1588adfc5217SJeff Kirsher .get_sset_count = bcm_enet_get_sset_count, 1589adfc5217SJeff Kirsher .get_ethtool_stats = bcm_enet_get_ethtool_stats, 15907260aac9SMaxime Bizon .nway_reset = bcm_enet_nway_reset, 1591adfc5217SJeff Kirsher .get_drvinfo = bcm_enet_get_drvinfo, 1592adfc5217SJeff Kirsher .get_link = ethtool_op_get_link, 1593adfc5217SJeff Kirsher .get_ringparam = bcm_enet_get_ringparam, 1594adfc5217SJeff Kirsher .set_ringparam = bcm_enet_set_ringparam, 1595adfc5217SJeff Kirsher .get_pauseparam = bcm_enet_get_pauseparam, 1596adfc5217SJeff Kirsher .set_pauseparam = bcm_enet_set_pauseparam, 1597639cfa9eSPhilippe Reynes .get_link_ksettings = bcm_enet_get_link_ksettings, 1598639cfa9eSPhilippe Reynes .set_link_ksettings = bcm_enet_set_link_ksettings, 1599adfc5217SJeff Kirsher }; 1600adfc5217SJeff Kirsher 1601adfc5217SJeff Kirsher static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1602adfc5217SJeff Kirsher { 1603adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1604adfc5217SJeff Kirsher 1605adfc5217SJeff Kirsher priv = netdev_priv(dev); 1606adfc5217SJeff Kirsher if (priv->has_phy) { 1607625eb866SPhilippe Reynes if (!dev->phydev) 1608adfc5217SJeff Kirsher return -ENODEV; 1609625eb866SPhilippe Reynes return phy_mii_ioctl(dev->phydev, rq, cmd); 1610adfc5217SJeff Kirsher } else { 1611adfc5217SJeff Kirsher struct mii_if_info mii; 1612adfc5217SJeff Kirsher 1613adfc5217SJeff Kirsher mii.dev = dev; 1614adfc5217SJeff Kirsher mii.mdio_read = bcm_enet_mdio_read_mii; 1615adfc5217SJeff Kirsher mii.mdio_write = bcm_enet_mdio_write_mii; 1616adfc5217SJeff Kirsher mii.phy_id = 0; 1617adfc5217SJeff Kirsher mii.phy_id_mask = 0x3f; 1618adfc5217SJeff Kirsher mii.reg_num_mask = 0x1f; 1619adfc5217SJeff Kirsher return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); 1620adfc5217SJeff Kirsher } 1621adfc5217SJeff Kirsher } 1622adfc5217SJeff Kirsher 1623adfc5217SJeff Kirsher /* 1624e1c6dccaSJarod Wilson * adjust mtu, can't be called while device is running 1625adfc5217SJeff Kirsher */ 1626e1c6dccaSJarod Wilson static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu) 1627adfc5217SJeff Kirsher { 1628e1c6dccaSJarod Wilson struct bcm_enet_priv *priv = netdev_priv(dev); 1629e1c6dccaSJarod Wilson int actual_mtu = new_mtu; 1630adfc5217SJeff Kirsher 1631e1c6dccaSJarod Wilson if (netif_running(dev)) 1632e1c6dccaSJarod Wilson return -EBUSY; 1633adfc5217SJeff Kirsher 1634adfc5217SJeff Kirsher /* add ethernet header + vlan tag size */ 1635adfc5217SJeff Kirsher actual_mtu += VLAN_ETH_HLEN; 1636adfc5217SJeff Kirsher 1637adfc5217SJeff Kirsher /* 1638adfc5217SJeff Kirsher * setup maximum size before we get overflow mark in 1639adfc5217SJeff Kirsher * descriptor, note that this will not prevent reception of 1640adfc5217SJeff Kirsher * big frames, they will be split into multiple buffers 1641adfc5217SJeff Kirsher * anyway 1642adfc5217SJeff Kirsher */ 1643adfc5217SJeff Kirsher priv->hw_mtu = actual_mtu; 1644adfc5217SJeff Kirsher 1645adfc5217SJeff Kirsher /* 1646adfc5217SJeff Kirsher * align rx buffer size to dma burst len, account FCS since 1647adfc5217SJeff Kirsher * it's appended 1648adfc5217SJeff Kirsher */ 1649d27de0efSSieng Piaw Liew priv->rx_buf_size = ALIGN(actual_mtu + ETH_FCS_LEN, 16506f00a022SMaxime Bizon priv->dma_maxburst * 4); 1651adfc5217SJeff Kirsher 1652d27de0efSSieng Piaw Liew priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) + 1653d27de0efSSieng Piaw Liew SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1654d27de0efSSieng Piaw Liew 1655adfc5217SJeff Kirsher dev->mtu = new_mtu; 1656adfc5217SJeff Kirsher return 0; 1657adfc5217SJeff Kirsher } 1658adfc5217SJeff Kirsher 1659adfc5217SJeff Kirsher /* 1660adfc5217SJeff Kirsher * preinit hardware to allow mii operation while device is down 1661adfc5217SJeff Kirsher */ 1662adfc5217SJeff Kirsher static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv) 1663adfc5217SJeff Kirsher { 1664adfc5217SJeff Kirsher u32 val; 1665adfc5217SJeff Kirsher int limit; 1666adfc5217SJeff Kirsher 1667adfc5217SJeff Kirsher /* make sure mac is disabled */ 1668adfc5217SJeff Kirsher bcm_enet_disable_mac(priv); 1669adfc5217SJeff Kirsher 1670adfc5217SJeff Kirsher /* soft reset mac */ 1671adfc5217SJeff Kirsher val = ENET_CTL_SRESET_MASK; 1672adfc5217SJeff Kirsher enet_writel(priv, val, ENET_CTL_REG); 1673adfc5217SJeff Kirsher wmb(); 1674adfc5217SJeff Kirsher 1675adfc5217SJeff Kirsher limit = 1000; 1676adfc5217SJeff Kirsher do { 1677adfc5217SJeff Kirsher val = enet_readl(priv, ENET_CTL_REG); 1678adfc5217SJeff Kirsher if (!(val & ENET_CTL_SRESET_MASK)) 1679adfc5217SJeff Kirsher break; 1680adfc5217SJeff Kirsher udelay(1); 1681adfc5217SJeff Kirsher } while (limit--); 1682adfc5217SJeff Kirsher 1683adfc5217SJeff Kirsher /* select correct mii interface */ 1684adfc5217SJeff Kirsher val = enet_readl(priv, ENET_CTL_REG); 1685adfc5217SJeff Kirsher if (priv->use_external_mii) 1686adfc5217SJeff Kirsher val |= ENET_CTL_EPHYSEL_MASK; 1687adfc5217SJeff Kirsher else 1688adfc5217SJeff Kirsher val &= ~ENET_CTL_EPHYSEL_MASK; 1689adfc5217SJeff Kirsher enet_writel(priv, val, ENET_CTL_REG); 1690adfc5217SJeff Kirsher 1691adfc5217SJeff Kirsher /* turn on mdc clock */ 1692adfc5217SJeff Kirsher enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) | 1693adfc5217SJeff Kirsher ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG); 1694adfc5217SJeff Kirsher 1695adfc5217SJeff Kirsher /* set mib counters to self-clear when read */ 1696adfc5217SJeff Kirsher val = enet_readl(priv, ENET_MIBCTL_REG); 1697adfc5217SJeff Kirsher val |= ENET_MIBCTL_RDCLEAR_MASK; 1698adfc5217SJeff Kirsher enet_writel(priv, val, ENET_MIBCTL_REG); 1699adfc5217SJeff Kirsher } 1700adfc5217SJeff Kirsher 1701adfc5217SJeff Kirsher static const struct net_device_ops bcm_enet_ops = { 1702adfc5217SJeff Kirsher .ndo_open = bcm_enet_open, 1703adfc5217SJeff Kirsher .ndo_stop = bcm_enet_stop, 1704adfc5217SJeff Kirsher .ndo_start_xmit = bcm_enet_start_xmit, 1705adfc5217SJeff Kirsher .ndo_set_mac_address = bcm_enet_set_mac_address, 1706afc4b13dSJiri Pirko .ndo_set_rx_mode = bcm_enet_set_multicast_list, 1707a7605370SArnd Bergmann .ndo_eth_ioctl = bcm_enet_ioctl, 1708adfc5217SJeff Kirsher .ndo_change_mtu = bcm_enet_change_mtu, 1709adfc5217SJeff Kirsher }; 1710adfc5217SJeff Kirsher 1711adfc5217SJeff Kirsher /* 1712adfc5217SJeff Kirsher * allocate netdevice, request register memory and register device. 1713adfc5217SJeff Kirsher */ 1714047fc566SBill Pemberton static int bcm_enet_probe(struct platform_device *pdev) 1715adfc5217SJeff Kirsher { 1716adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1717adfc5217SJeff Kirsher struct net_device *dev; 1718adfc5217SJeff Kirsher struct bcm63xx_enet_platform_data *pd; 171943ff0d76SMeng Tang int irq, irq_rx, irq_tx; 1720adfc5217SJeff Kirsher struct mii_bus *bus; 1721adfc5217SJeff Kirsher int i, ret; 1722adfc5217SJeff Kirsher 17230ae99b5fSMaxime Bizon if (!bcm_enet_shared_base[0]) 1724527a4871SJonas Gorski return -EPROBE_DEFER; 1725adfc5217SJeff Kirsher 172643ff0d76SMeng Tang irq = platform_get_irq(pdev, 0); 172743ff0d76SMeng Tang irq_rx = platform_get_irq(pdev, 1); 172843ff0d76SMeng Tang irq_tx = platform_get_irq(pdev, 2); 172943ff0d76SMeng Tang if (irq < 0 || irq_rx < 0 || irq_tx < 0) 1730adfc5217SJeff Kirsher return -ENODEV; 1731adfc5217SJeff Kirsher 1732adfc5217SJeff Kirsher dev = alloc_etherdev(sizeof(*priv)); 1733adfc5217SJeff Kirsher if (!dev) 1734adfc5217SJeff Kirsher return -ENOMEM; 1735adfc5217SJeff Kirsher priv = netdev_priv(dev); 1736adfc5217SJeff Kirsher 17376f00a022SMaxime Bizon priv->enet_is_sw = false; 17386f00a022SMaxime Bizon priv->dma_maxburst = BCMENET_DMA_MAXBURST; 1739d27de0efSSieng Piaw Liew priv->rx_buf_offset = NET_SKB_PAD; 17406f00a022SMaxime Bizon 1741e1c6dccaSJarod Wilson ret = bcm_enet_change_mtu(dev, dev->mtu); 1742adfc5217SJeff Kirsher if (ret) 1743adfc5217SJeff Kirsher goto out; 1744adfc5217SJeff Kirsher 17459d26cfa5SYueHaibing priv->base = devm_platform_ioremap_resource(pdev, 0); 1746f607e059SJulia Lawall if (IS_ERR(priv->base)) { 1747f607e059SJulia Lawall ret = PTR_ERR(priv->base); 1748adfc5217SJeff Kirsher goto out; 1749adfc5217SJeff Kirsher } 1750adfc5217SJeff Kirsher 175143ff0d76SMeng Tang dev->irq = priv->irq = irq; 175243ff0d76SMeng Tang priv->irq_rx = irq_rx; 175343ff0d76SMeng Tang priv->irq_tx = irq_tx; 1754adfc5217SJeff Kirsher 175575550015SJonas Gorski priv->mac_clk = devm_clk_get(&pdev->dev, "enet"); 1756adfc5217SJeff Kirsher if (IS_ERR(priv->mac_clk)) { 1757adfc5217SJeff Kirsher ret = PTR_ERR(priv->mac_clk); 17581c03da05SJonas Gorski goto out; 1759adfc5217SJeff Kirsher } 17609c86b846SJonas Gorski ret = clk_prepare_enable(priv->mac_clk); 17619c86b846SJonas Gorski if (ret) 17627e697ce9SJonas Gorski goto out; 1763adfc5217SJeff Kirsher 1764adfc5217SJeff Kirsher /* initialize default and fetch platform data */ 1765adfc5217SJeff Kirsher priv->rx_ring_size = BCMENET_DEF_RX_DESC; 1766adfc5217SJeff Kirsher priv->tx_ring_size = BCMENET_DEF_TX_DESC; 1767adfc5217SJeff Kirsher 1768cf0e7794SJingoo Han pd = dev_get_platdata(&pdev->dev); 1769adfc5217SJeff Kirsher if (pd) { 1770a96d317fSJakub Kicinski eth_hw_addr_set(dev, pd->mac_addr); 1771adfc5217SJeff Kirsher priv->has_phy = pd->has_phy; 1772adfc5217SJeff Kirsher priv->phy_id = pd->phy_id; 1773adfc5217SJeff Kirsher priv->has_phy_interrupt = pd->has_phy_interrupt; 1774adfc5217SJeff Kirsher priv->phy_interrupt = pd->phy_interrupt; 1775adfc5217SJeff Kirsher priv->use_external_mii = !pd->use_internal_phy; 1776adfc5217SJeff Kirsher priv->pause_auto = pd->pause_auto; 1777adfc5217SJeff Kirsher priv->pause_rx = pd->pause_rx; 1778adfc5217SJeff Kirsher priv->pause_tx = pd->pause_tx; 1779adfc5217SJeff Kirsher priv->force_duplex_full = pd->force_duplex_full; 1780adfc5217SJeff Kirsher priv->force_speed_100 = pd->force_speed_100; 17813dc6475cSFlorian Fainelli priv->dma_chan_en_mask = pd->dma_chan_en_mask; 17823dc6475cSFlorian Fainelli priv->dma_chan_int_mask = pd->dma_chan_int_mask; 17833dc6475cSFlorian Fainelli priv->dma_chan_width = pd->dma_chan_width; 17843dc6475cSFlorian Fainelli priv->dma_has_sram = pd->dma_has_sram; 17853dc6475cSFlorian Fainelli priv->dma_desc_shift = pd->dma_desc_shift; 17861942e482SJonas Gorski priv->rx_chan = pd->rx_chan; 17871942e482SJonas Gorski priv->tx_chan = pd->tx_chan; 1788adfc5217SJeff Kirsher } 1789adfc5217SJeff Kirsher 1790bbd62d24SJonas Gorski if (priv->has_phy && !priv->use_external_mii) { 1791adfc5217SJeff Kirsher /* using internal PHY, enable clock */ 17927e697ce9SJonas Gorski priv->phy_clk = devm_clk_get(&pdev->dev, "ephy"); 1793adfc5217SJeff Kirsher if (IS_ERR(priv->phy_clk)) { 1794adfc5217SJeff Kirsher ret = PTR_ERR(priv->phy_clk); 1795adfc5217SJeff Kirsher priv->phy_clk = NULL; 17969c86b846SJonas Gorski goto out_disable_clk_mac; 1797adfc5217SJeff Kirsher } 17989c86b846SJonas Gorski ret = clk_prepare_enable(priv->phy_clk); 17999c86b846SJonas Gorski if (ret) 18007e697ce9SJonas Gorski goto out_disable_clk_mac; 1801adfc5217SJeff Kirsher } 1802adfc5217SJeff Kirsher 1803adfc5217SJeff Kirsher /* do minimal hardware init to be able to probe mii bus */ 1804adfc5217SJeff Kirsher bcm_enet_hw_preinit(priv); 1805adfc5217SJeff Kirsher 1806adfc5217SJeff Kirsher /* MII bus registration */ 1807adfc5217SJeff Kirsher if (priv->has_phy) { 1808adfc5217SJeff Kirsher 1809adfc5217SJeff Kirsher priv->mii_bus = mdiobus_alloc(); 1810adfc5217SJeff Kirsher if (!priv->mii_bus) { 1811adfc5217SJeff Kirsher ret = -ENOMEM; 1812adfc5217SJeff Kirsher goto out_uninit_hw; 1813adfc5217SJeff Kirsher } 1814adfc5217SJeff Kirsher 1815adfc5217SJeff Kirsher bus = priv->mii_bus; 1816adfc5217SJeff Kirsher bus->name = "bcm63xx_enet MII bus"; 1817adfc5217SJeff Kirsher bus->parent = &pdev->dev; 1818adfc5217SJeff Kirsher bus->priv = priv; 1819adfc5217SJeff Kirsher bus->read = bcm_enet_mdio_read_phylib; 1820adfc5217SJeff Kirsher bus->write = bcm_enet_mdio_write_phylib; 1821c7fe89e3SJonas Gorski sprintf(bus->id, "%s-%d", pdev->name, pdev->id); 1822adfc5217SJeff Kirsher 1823adfc5217SJeff Kirsher /* only probe bus where we think the PHY is, because 1824adfc5217SJeff Kirsher * the mdio read operation return 0 instead of 0xffff 1825adfc5217SJeff Kirsher * if a slave is not present on hw */ 1826adfc5217SJeff Kirsher bus->phy_mask = ~(1 << priv->phy_id); 1827adfc5217SJeff Kirsher 1828adfc5217SJeff Kirsher if (priv->has_phy_interrupt) 1829adfc5217SJeff Kirsher bus->irq[priv->phy_id] = priv->phy_interrupt; 1830adfc5217SJeff Kirsher 1831adfc5217SJeff Kirsher ret = mdiobus_register(bus); 1832adfc5217SJeff Kirsher if (ret) { 1833adfc5217SJeff Kirsher dev_err(&pdev->dev, "unable to register mdio bus\n"); 1834adfc5217SJeff Kirsher goto out_free_mdio; 1835adfc5217SJeff Kirsher } 1836adfc5217SJeff Kirsher } else { 1837adfc5217SJeff Kirsher 1838adfc5217SJeff Kirsher /* run platform code to initialize PHY device */ 1839323b15b9Sxypron.glpk@gmx.de if (pd && pd->mii_config && 1840adfc5217SJeff Kirsher pd->mii_config(dev, 1, bcm_enet_mdio_read_mii, 1841adfc5217SJeff Kirsher bcm_enet_mdio_write_mii)) { 1842adfc5217SJeff Kirsher dev_err(&pdev->dev, "unable to configure mdio bus\n"); 1843adfc5217SJeff Kirsher goto out_uninit_hw; 1844adfc5217SJeff Kirsher } 1845adfc5217SJeff Kirsher } 1846adfc5217SJeff Kirsher 1847adfc5217SJeff Kirsher spin_lock_init(&priv->rx_lock); 1848adfc5217SJeff Kirsher 1849adfc5217SJeff Kirsher /* init rx timeout (used for oom) */ 1850eb8c6b5bSKees Cook timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0); 1851adfc5217SJeff Kirsher 1852adfc5217SJeff Kirsher /* init the mib update lock&work */ 1853adfc5217SJeff Kirsher mutex_init(&priv->mib_update_lock); 1854adfc5217SJeff Kirsher INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer); 1855adfc5217SJeff Kirsher 1856adfc5217SJeff Kirsher /* zero mib counters */ 1857adfc5217SJeff Kirsher for (i = 0; i < ENET_MIB_REG_COUNT; i++) 1858adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_MIB_REG(i)); 1859adfc5217SJeff Kirsher 1860adfc5217SJeff Kirsher /* register netdevice */ 1861adfc5217SJeff Kirsher dev->netdev_ops = &bcm_enet_ops; 1862b707b89fSJakub Kicinski netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16); 1863adfc5217SJeff Kirsher 18647ad24ea4SWilfried Klaebe dev->ethtool_ops = &bcm_enet_ethtool_ops; 1865e1c6dccaSJarod Wilson /* MTU range: 46 - 2028 */ 1866e1c6dccaSJarod Wilson dev->min_mtu = ETH_ZLEN - ETH_HLEN; 1867e1c6dccaSJarod Wilson dev->max_mtu = BCMENET_MAX_MTU - VLAN_ETH_HLEN; 1868adfc5217SJeff Kirsher SET_NETDEV_DEV(dev, &pdev->dev); 1869adfc5217SJeff Kirsher 1870adfc5217SJeff Kirsher ret = register_netdev(dev); 1871adfc5217SJeff Kirsher if (ret) 1872adfc5217SJeff Kirsher goto out_unregister_mdio; 1873adfc5217SJeff Kirsher 1874adfc5217SJeff Kirsher netif_carrier_off(dev); 1875adfc5217SJeff Kirsher platform_set_drvdata(pdev, dev); 1876adfc5217SJeff Kirsher priv->pdev = pdev; 1877adfc5217SJeff Kirsher priv->net_dev = dev; 1878adfc5217SJeff Kirsher 1879adfc5217SJeff Kirsher return 0; 1880adfc5217SJeff Kirsher 1881adfc5217SJeff Kirsher out_unregister_mdio: 18822a80b5e1SJonas Gorski if (priv->mii_bus) 1883adfc5217SJeff Kirsher mdiobus_unregister(priv->mii_bus); 1884adfc5217SJeff Kirsher 1885adfc5217SJeff Kirsher out_free_mdio: 1886adfc5217SJeff Kirsher if (priv->mii_bus) 1887adfc5217SJeff Kirsher mdiobus_free(priv->mii_bus); 1888adfc5217SJeff Kirsher 1889adfc5217SJeff Kirsher out_uninit_hw: 1890adfc5217SJeff Kirsher /* turn off mdc clock */ 1891adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_MIISC_REG); 1892624e2d21SJonas Gorski clk_disable_unprepare(priv->phy_clk); 1893adfc5217SJeff Kirsher 18949c86b846SJonas Gorski out_disable_clk_mac: 1895624e2d21SJonas Gorski clk_disable_unprepare(priv->mac_clk); 1896adfc5217SJeff Kirsher out: 1897adfc5217SJeff Kirsher free_netdev(dev); 1898adfc5217SJeff Kirsher return ret; 1899adfc5217SJeff Kirsher } 1900adfc5217SJeff Kirsher 1901adfc5217SJeff Kirsher 1902adfc5217SJeff Kirsher /* 1903adfc5217SJeff Kirsher * exit func, stops hardware and unregisters netdevice 1904adfc5217SJeff Kirsher */ 1905047fc566SBill Pemberton static int bcm_enet_remove(struct platform_device *pdev) 1906adfc5217SJeff Kirsher { 1907adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1908adfc5217SJeff Kirsher struct net_device *dev; 1909adfc5217SJeff Kirsher 1910adfc5217SJeff Kirsher /* stop netdevice */ 1911adfc5217SJeff Kirsher dev = platform_get_drvdata(pdev); 1912adfc5217SJeff Kirsher priv = netdev_priv(dev); 1913adfc5217SJeff Kirsher unregister_netdev(dev); 1914adfc5217SJeff Kirsher 1915adfc5217SJeff Kirsher /* turn off mdc clock */ 1916adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_MIISC_REG); 1917adfc5217SJeff Kirsher 1918adfc5217SJeff Kirsher if (priv->has_phy) { 1919adfc5217SJeff Kirsher mdiobus_unregister(priv->mii_bus); 1920adfc5217SJeff Kirsher mdiobus_free(priv->mii_bus); 1921adfc5217SJeff Kirsher } else { 1922adfc5217SJeff Kirsher struct bcm63xx_enet_platform_data *pd; 1923adfc5217SJeff Kirsher 1924cf0e7794SJingoo Han pd = dev_get_platdata(&pdev->dev); 1925adfc5217SJeff Kirsher if (pd && pd->mii_config) 1926adfc5217SJeff Kirsher pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, 1927adfc5217SJeff Kirsher bcm_enet_mdio_write_mii); 1928adfc5217SJeff Kirsher } 1929adfc5217SJeff Kirsher 1930adfc5217SJeff Kirsher /* disable hw block clocks */ 1931624e2d21SJonas Gorski clk_disable_unprepare(priv->phy_clk); 1932624e2d21SJonas Gorski clk_disable_unprepare(priv->mac_clk); 1933adfc5217SJeff Kirsher 1934adfc5217SJeff Kirsher free_netdev(dev); 1935adfc5217SJeff Kirsher return 0; 1936adfc5217SJeff Kirsher } 1937adfc5217SJeff Kirsher 1938adfc5217SJeff Kirsher struct platform_driver bcm63xx_enet_driver = { 1939adfc5217SJeff Kirsher .probe = bcm_enet_probe, 1940047fc566SBill Pemberton .remove = bcm_enet_remove, 1941adfc5217SJeff Kirsher .driver = { 1942adfc5217SJeff Kirsher .name = "bcm63xx_enet", 1943adfc5217SJeff Kirsher .owner = THIS_MODULE, 1944adfc5217SJeff Kirsher }, 1945adfc5217SJeff Kirsher }; 1946adfc5217SJeff Kirsher 1947adfc5217SJeff Kirsher /* 19486f00a022SMaxime Bizon * switch mii access callbacks 1949adfc5217SJeff Kirsher */ 19506f00a022SMaxime Bizon static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv, 19516f00a022SMaxime Bizon int ext, int phy_id, int location) 19526f00a022SMaxime Bizon { 19536f00a022SMaxime Bizon u32 reg; 19546f00a022SMaxime Bizon int ret; 19556f00a022SMaxime Bizon 19566f00a022SMaxime Bizon spin_lock_bh(&priv->enetsw_mdio_lock); 19576f00a022SMaxime Bizon enetsw_writel(priv, 0, ENETSW_MDIOC_REG); 19586f00a022SMaxime Bizon 19596f00a022SMaxime Bizon reg = ENETSW_MDIOC_RD_MASK | 19606f00a022SMaxime Bizon (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | 19616f00a022SMaxime Bizon (location << ENETSW_MDIOC_REG_SHIFT); 19626f00a022SMaxime Bizon 19636f00a022SMaxime Bizon if (ext) 19646f00a022SMaxime Bizon reg |= ENETSW_MDIOC_EXT_MASK; 19656f00a022SMaxime Bizon 19666f00a022SMaxime Bizon enetsw_writel(priv, reg, ENETSW_MDIOC_REG); 19676f00a022SMaxime Bizon udelay(50); 19686f00a022SMaxime Bizon ret = enetsw_readw(priv, ENETSW_MDIOD_REG); 19696f00a022SMaxime Bizon spin_unlock_bh(&priv->enetsw_mdio_lock); 19706f00a022SMaxime Bizon return ret; 19716f00a022SMaxime Bizon } 19726f00a022SMaxime Bizon 19736f00a022SMaxime Bizon static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv, 19746f00a022SMaxime Bizon int ext, int phy_id, int location, 19756f00a022SMaxime Bizon uint16_t data) 19766f00a022SMaxime Bizon { 19776f00a022SMaxime Bizon u32 reg; 19786f00a022SMaxime Bizon 19796f00a022SMaxime Bizon spin_lock_bh(&priv->enetsw_mdio_lock); 19806f00a022SMaxime Bizon enetsw_writel(priv, 0, ENETSW_MDIOC_REG); 19816f00a022SMaxime Bizon 19826f00a022SMaxime Bizon reg = ENETSW_MDIOC_WR_MASK | 19836f00a022SMaxime Bizon (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | 19846f00a022SMaxime Bizon (location << ENETSW_MDIOC_REG_SHIFT); 19856f00a022SMaxime Bizon 19866f00a022SMaxime Bizon if (ext) 19876f00a022SMaxime Bizon reg |= ENETSW_MDIOC_EXT_MASK; 19886f00a022SMaxime Bizon 19896f00a022SMaxime Bizon reg |= data; 19906f00a022SMaxime Bizon 19916f00a022SMaxime Bizon enetsw_writel(priv, reg, ENETSW_MDIOC_REG); 19926f00a022SMaxime Bizon udelay(50); 19936f00a022SMaxime Bizon spin_unlock_bh(&priv->enetsw_mdio_lock); 19946f00a022SMaxime Bizon } 19956f00a022SMaxime Bizon 19966f00a022SMaxime Bizon static inline int bcm_enet_port_is_rgmii(int portid) 19976f00a022SMaxime Bizon { 19986f00a022SMaxime Bizon return portid >= ENETSW_RGMII_PORT0; 19996f00a022SMaxime Bizon } 20006f00a022SMaxime Bizon 20016f00a022SMaxime Bizon /* 20026f00a022SMaxime Bizon * enet sw PHY polling 20036f00a022SMaxime Bizon */ 2004eb8c6b5bSKees Cook static void swphy_poll_timer(struct timer_list *t) 20056f00a022SMaxime Bizon { 2006eb8c6b5bSKees Cook struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll); 20076f00a022SMaxime Bizon unsigned int i; 20086f00a022SMaxime Bizon 20096f00a022SMaxime Bizon for (i = 0; i < priv->num_ports; i++) { 20106f00a022SMaxime Bizon struct bcm63xx_enetsw_port *port; 2011aebd9947SSimon Arlott int val, j, up, advertise, lpa, speed, duplex, media; 20126f00a022SMaxime Bizon int external_phy = bcm_enet_port_is_rgmii(i); 20136f00a022SMaxime Bizon u8 override; 20146f00a022SMaxime Bizon 20156f00a022SMaxime Bizon port = &priv->used_ports[i]; 20166f00a022SMaxime Bizon if (!port->used) 20176f00a022SMaxime Bizon continue; 20186f00a022SMaxime Bizon 20196f00a022SMaxime Bizon if (port->bypass_link) 20206f00a022SMaxime Bizon continue; 20216f00a022SMaxime Bizon 20226f00a022SMaxime Bizon /* dummy read to clear */ 20236f00a022SMaxime Bizon for (j = 0; j < 2; j++) 20246f00a022SMaxime Bizon val = bcmenet_sw_mdio_read(priv, external_phy, 20256f00a022SMaxime Bizon port->phy_id, MII_BMSR); 20266f00a022SMaxime Bizon 20276f00a022SMaxime Bizon if (val == 0xffff) 20286f00a022SMaxime Bizon continue; 20296f00a022SMaxime Bizon 20306f00a022SMaxime Bizon up = (val & BMSR_LSTATUS) ? 1 : 0; 20316f00a022SMaxime Bizon if (!(up ^ priv->sw_port_link[i])) 20326f00a022SMaxime Bizon continue; 20336f00a022SMaxime Bizon 20346f00a022SMaxime Bizon priv->sw_port_link[i] = up; 20356f00a022SMaxime Bizon 20366f00a022SMaxime Bizon /* link changed */ 20376f00a022SMaxime Bizon if (!up) { 20386f00a022SMaxime Bizon dev_info(&priv->pdev->dev, "link DOWN on %s\n", 20396f00a022SMaxime Bizon port->name); 20406f00a022SMaxime Bizon enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, 20416f00a022SMaxime Bizon ENETSW_PORTOV_REG(i)); 20426f00a022SMaxime Bizon enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | 20436f00a022SMaxime Bizon ENETSW_PTCTRL_TXDIS_MASK, 20446f00a022SMaxime Bizon ENETSW_PTCTRL_REG(i)); 20456f00a022SMaxime Bizon continue; 20466f00a022SMaxime Bizon } 20476f00a022SMaxime Bizon 20486f00a022SMaxime Bizon advertise = bcmenet_sw_mdio_read(priv, external_phy, 20496f00a022SMaxime Bizon port->phy_id, MII_ADVERTISE); 20506f00a022SMaxime Bizon 20516f00a022SMaxime Bizon lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, 20526f00a022SMaxime Bizon MII_LPA); 20536f00a022SMaxime Bizon 20546f00a022SMaxime Bizon /* figure out media and duplex from advertise and LPA values */ 20556f00a022SMaxime Bizon media = mii_nway_result(lpa & advertise); 20566f00a022SMaxime Bizon duplex = (media & ADVERTISE_FULL) ? 1 : 0; 20576f00a022SMaxime Bizon 20586f00a022SMaxime Bizon if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) 20596f00a022SMaxime Bizon speed = 100; 20606f00a022SMaxime Bizon else 20616f00a022SMaxime Bizon speed = 10; 2062aebd9947SSimon Arlott 2063aebd9947SSimon Arlott if (val & BMSR_ESTATEN) { 2064aebd9947SSimon Arlott advertise = bcmenet_sw_mdio_read(priv, external_phy, 2065aebd9947SSimon Arlott port->phy_id, MII_CTRL1000); 2066aebd9947SSimon Arlott 2067aebd9947SSimon Arlott lpa = bcmenet_sw_mdio_read(priv, external_phy, 2068aebd9947SSimon Arlott port->phy_id, MII_STAT1000); 2069aebd9947SSimon Arlott 2070aebd9947SSimon Arlott if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF) 2071aebd9947SSimon Arlott && lpa & (LPA_1000FULL | LPA_1000HALF)) { 2072aebd9947SSimon Arlott speed = 1000; 2073aebd9947SSimon Arlott duplex = (lpa & LPA_1000FULL); 2074aebd9947SSimon Arlott } 20756f00a022SMaxime Bizon } 20766f00a022SMaxime Bizon 20776f00a022SMaxime Bizon dev_info(&priv->pdev->dev, 20786f00a022SMaxime Bizon "link UP on %s, %dMbps, %s-duplex\n", 20796f00a022SMaxime Bizon port->name, speed, duplex ? "full" : "half"); 20806f00a022SMaxime Bizon 20816f00a022SMaxime Bizon override = ENETSW_PORTOV_ENABLE_MASK | 20826f00a022SMaxime Bizon ENETSW_PORTOV_LINKUP_MASK; 20836f00a022SMaxime Bizon 20846f00a022SMaxime Bizon if (speed == 1000) 20856f00a022SMaxime Bizon override |= ENETSW_IMPOV_1000_MASK; 20866f00a022SMaxime Bizon else if (speed == 100) 20876f00a022SMaxime Bizon override |= ENETSW_IMPOV_100_MASK; 20886f00a022SMaxime Bizon if (duplex) 20896f00a022SMaxime Bizon override |= ENETSW_IMPOV_FDX_MASK; 20906f00a022SMaxime Bizon 20916f00a022SMaxime Bizon enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i)); 20926f00a022SMaxime Bizon enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i)); 20936f00a022SMaxime Bizon } 20946f00a022SMaxime Bizon 20956f00a022SMaxime Bizon priv->swphy_poll.expires = jiffies + HZ; 20966f00a022SMaxime Bizon add_timer(&priv->swphy_poll); 20976f00a022SMaxime Bizon } 20986f00a022SMaxime Bizon 20996f00a022SMaxime Bizon /* 21006f00a022SMaxime Bizon * open callback, allocate dma rings & buffers and start rx operation 21016f00a022SMaxime Bizon */ 21026f00a022SMaxime Bizon static int bcm_enetsw_open(struct net_device *dev) 21036f00a022SMaxime Bizon { 21046f00a022SMaxime Bizon struct bcm_enet_priv *priv; 21056f00a022SMaxime Bizon struct device *kdev; 21066f00a022SMaxime Bizon int i, ret; 21076f00a022SMaxime Bizon unsigned int size; 21086f00a022SMaxime Bizon void *p; 21096f00a022SMaxime Bizon u32 val; 21106f00a022SMaxime Bizon 21116f00a022SMaxime Bizon priv = netdev_priv(dev); 21126f00a022SMaxime Bizon kdev = &priv->pdev->dev; 21136f00a022SMaxime Bizon 21146f00a022SMaxime Bizon /* mask all interrupts and request them */ 21153dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 21163dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 21176f00a022SMaxime Bizon 21186f00a022SMaxime Bizon ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 2119df9f1b9fSMichael Opdenacker 0, dev->name, dev); 21206f00a022SMaxime Bizon if (ret) 21216f00a022SMaxime Bizon goto out_freeirq; 21226f00a022SMaxime Bizon 21236f00a022SMaxime Bizon if (priv->irq_tx != -1) { 21246f00a022SMaxime Bizon ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, 2125df9f1b9fSMichael Opdenacker 0, dev->name, dev); 21266f00a022SMaxime Bizon if (ret) 21276f00a022SMaxime Bizon goto out_freeirq_rx; 21286f00a022SMaxime Bizon } 21296f00a022SMaxime Bizon 21306f00a022SMaxime Bizon /* allocate rx dma ring */ 21316f00a022SMaxime Bizon size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 2132750afb08SLuis Chamberlain p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 21336f00a022SMaxime Bizon if (!p) { 21346f00a022SMaxime Bizon dev_err(kdev, "cannot allocate rx ring %u\n", size); 21356f00a022SMaxime Bizon ret = -ENOMEM; 21366f00a022SMaxime Bizon goto out_freeirq_tx; 21376f00a022SMaxime Bizon } 21386f00a022SMaxime Bizon 21396f00a022SMaxime Bizon priv->rx_desc_alloc_size = size; 21406f00a022SMaxime Bizon priv->rx_desc_cpu = p; 21416f00a022SMaxime Bizon 21426f00a022SMaxime Bizon /* allocate tx dma ring */ 21436f00a022SMaxime Bizon size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 2144750afb08SLuis Chamberlain p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 21456f00a022SMaxime Bizon if (!p) { 21466f00a022SMaxime Bizon dev_err(kdev, "cannot allocate tx ring\n"); 21476f00a022SMaxime Bizon ret = -ENOMEM; 21486f00a022SMaxime Bizon goto out_free_rx_ring; 21496f00a022SMaxime Bizon } 21506f00a022SMaxime Bizon 21516f00a022SMaxime Bizon priv->tx_desc_alloc_size = size; 21526f00a022SMaxime Bizon priv->tx_desc_cpu = p; 21536f00a022SMaxime Bizon 21546396bb22SKees Cook priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *), 21556f00a022SMaxime Bizon GFP_KERNEL); 21566f00a022SMaxime Bizon if (!priv->tx_skb) { 2157d27de0efSSieng Piaw Liew dev_err(kdev, "cannot allocate tx skb queue\n"); 21586f00a022SMaxime Bizon ret = -ENOMEM; 21596f00a022SMaxime Bizon goto out_free_tx_ring; 21606f00a022SMaxime Bizon } 21616f00a022SMaxime Bizon 21626f00a022SMaxime Bizon priv->tx_desc_count = priv->tx_ring_size; 21636f00a022SMaxime Bizon priv->tx_dirty_desc = 0; 21646f00a022SMaxime Bizon priv->tx_curr_desc = 0; 21656f00a022SMaxime Bizon spin_lock_init(&priv->tx_lock); 21666f00a022SMaxime Bizon 2167d27de0efSSieng Piaw Liew /* init & fill rx ring with buffers */ 2168d27de0efSSieng Piaw Liew priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *), 21696f00a022SMaxime Bizon GFP_KERNEL); 2170d27de0efSSieng Piaw Liew if (!priv->rx_buf) { 2171d27de0efSSieng Piaw Liew dev_err(kdev, "cannot allocate rx buffer queue\n"); 21726f00a022SMaxime Bizon ret = -ENOMEM; 21736f00a022SMaxime Bizon goto out_free_tx_skb; 21746f00a022SMaxime Bizon } 21756f00a022SMaxime Bizon 21766f00a022SMaxime Bizon priv->rx_desc_count = 0; 21776f00a022SMaxime Bizon priv->rx_dirty_desc = 0; 21786f00a022SMaxime Bizon priv->rx_curr_desc = 0; 21796f00a022SMaxime Bizon 21806f00a022SMaxime Bizon /* disable all ports */ 21816f00a022SMaxime Bizon for (i = 0; i < priv->num_ports; i++) { 21826f00a022SMaxime Bizon enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, 21836f00a022SMaxime Bizon ENETSW_PORTOV_REG(i)); 21846f00a022SMaxime Bizon enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | 21856f00a022SMaxime Bizon ENETSW_PTCTRL_TXDIS_MASK, 21866f00a022SMaxime Bizon ENETSW_PTCTRL_REG(i)); 21876f00a022SMaxime Bizon 21886f00a022SMaxime Bizon priv->sw_port_link[i] = 0; 21896f00a022SMaxime Bizon } 21906f00a022SMaxime Bizon 21916f00a022SMaxime Bizon /* reset mib */ 21926f00a022SMaxime Bizon val = enetsw_readb(priv, ENETSW_GMCR_REG); 21936f00a022SMaxime Bizon val |= ENETSW_GMCR_RST_MIB_MASK; 21946f00a022SMaxime Bizon enetsw_writeb(priv, val, ENETSW_GMCR_REG); 21956f00a022SMaxime Bizon mdelay(1); 21966f00a022SMaxime Bizon val &= ~ENETSW_GMCR_RST_MIB_MASK; 21976f00a022SMaxime Bizon enetsw_writeb(priv, val, ENETSW_GMCR_REG); 21986f00a022SMaxime Bizon mdelay(1); 21996f00a022SMaxime Bizon 22006f00a022SMaxime Bizon /* force CPU port state */ 22016f00a022SMaxime Bizon val = enetsw_readb(priv, ENETSW_IMPOV_REG); 22026f00a022SMaxime Bizon val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK; 22036f00a022SMaxime Bizon enetsw_writeb(priv, val, ENETSW_IMPOV_REG); 22046f00a022SMaxime Bizon 22056f00a022SMaxime Bizon /* enable switch forward engine */ 22066f00a022SMaxime Bizon val = enetsw_readb(priv, ENETSW_SWMODE_REG); 22076f00a022SMaxime Bizon val |= ENETSW_SWMODE_FWD_EN_MASK; 22086f00a022SMaxime Bizon enetsw_writeb(priv, val, ENETSW_SWMODE_REG); 22096f00a022SMaxime Bizon 22106f00a022SMaxime Bizon /* enable jumbo on all ports */ 22116f00a022SMaxime Bizon enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG); 22126f00a022SMaxime Bizon enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG); 22136f00a022SMaxime Bizon 22146f00a022SMaxime Bizon /* initialize flow control buffer allocation */ 22156f00a022SMaxime Bizon enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, 22166f00a022SMaxime Bizon ENETDMA_BUFALLOC_REG(priv->rx_chan)); 22176f00a022SMaxime Bizon 2218d27de0efSSieng Piaw Liew if (bcm_enet_refill_rx(dev, false)) { 2219d27de0efSSieng Piaw Liew dev_err(kdev, "cannot allocate rx buffer queue\n"); 22206f00a022SMaxime Bizon ret = -ENOMEM; 22216f00a022SMaxime Bizon goto out; 22226f00a022SMaxime Bizon } 22236f00a022SMaxime Bizon 22246f00a022SMaxime Bizon /* write rx & tx ring addresses */ 22256f00a022SMaxime Bizon enet_dmas_writel(priv, priv->rx_desc_dma, 22263dc6475cSFlorian Fainelli ENETDMAS_RSTART_REG, priv->rx_chan); 22276f00a022SMaxime Bizon enet_dmas_writel(priv, priv->tx_desc_dma, 22283dc6475cSFlorian Fainelli ENETDMAS_RSTART_REG, priv->tx_chan); 22296f00a022SMaxime Bizon 22306f00a022SMaxime Bizon /* clear remaining state ram for rx & tx channel */ 22313dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); 22323dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); 22333dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); 22343dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); 22353dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); 22363dc6475cSFlorian Fainelli enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); 22376f00a022SMaxime Bizon 22386f00a022SMaxime Bizon /* set dma maximum burst len */ 22396f00a022SMaxime Bizon enet_dmac_writel(priv, priv->dma_maxburst, 22403dc6475cSFlorian Fainelli ENETDMAC_MAXBURST, priv->rx_chan); 22416f00a022SMaxime Bizon enet_dmac_writel(priv, priv->dma_maxburst, 22423dc6475cSFlorian Fainelli ENETDMAC_MAXBURST, priv->tx_chan); 22436f00a022SMaxime Bizon 22446f00a022SMaxime Bizon /* set flow control low/high threshold to 1/3 / 2/3 */ 22456f00a022SMaxime Bizon val = priv->rx_ring_size / 3; 22466f00a022SMaxime Bizon enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); 22476f00a022SMaxime Bizon val = (priv->rx_ring_size * 2) / 3; 22486f00a022SMaxime Bizon enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); 22496f00a022SMaxime Bizon 22506f00a022SMaxime Bizon /* all set, enable mac and interrupts, start dma engine and 22516f00a022SMaxime Bizon * kick rx dma channel 22526f00a022SMaxime Bizon */ 22536f00a022SMaxime Bizon wmb(); 22546f00a022SMaxime Bizon enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); 22556f00a022SMaxime Bizon enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, 22563dc6475cSFlorian Fainelli ENETDMAC_CHANCFG, priv->rx_chan); 22576f00a022SMaxime Bizon 22586f00a022SMaxime Bizon /* watch "packet transferred" interrupt in rx and tx */ 22596f00a022SMaxime Bizon enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 22603dc6475cSFlorian Fainelli ENETDMAC_IR, priv->rx_chan); 22616f00a022SMaxime Bizon enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 22623dc6475cSFlorian Fainelli ENETDMAC_IR, priv->tx_chan); 22636f00a022SMaxime Bizon 22646f00a022SMaxime Bizon /* make sure we enable napi before rx interrupt */ 22656f00a022SMaxime Bizon napi_enable(&priv->napi); 22666f00a022SMaxime Bizon 22676f00a022SMaxime Bizon enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 22683dc6475cSFlorian Fainelli ENETDMAC_IRMASK, priv->rx_chan); 22696f00a022SMaxime Bizon enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 22703dc6475cSFlorian Fainelli ENETDMAC_IRMASK, priv->tx_chan); 22716f00a022SMaxime Bizon 22726f00a022SMaxime Bizon netif_carrier_on(dev); 22736f00a022SMaxime Bizon netif_start_queue(dev); 22746f00a022SMaxime Bizon 22756f00a022SMaxime Bizon /* apply override config for bypass_link ports here. */ 22766f00a022SMaxime Bizon for (i = 0; i < priv->num_ports; i++) { 22776f00a022SMaxime Bizon struct bcm63xx_enetsw_port *port; 22786f00a022SMaxime Bizon u8 override; 22796f00a022SMaxime Bizon port = &priv->used_ports[i]; 22806f00a022SMaxime Bizon if (!port->used) 22816f00a022SMaxime Bizon continue; 22826f00a022SMaxime Bizon 22836f00a022SMaxime Bizon if (!port->bypass_link) 22846f00a022SMaxime Bizon continue; 22856f00a022SMaxime Bizon 22866f00a022SMaxime Bizon override = ENETSW_PORTOV_ENABLE_MASK | 22876f00a022SMaxime Bizon ENETSW_PORTOV_LINKUP_MASK; 22886f00a022SMaxime Bizon 22896f00a022SMaxime Bizon switch (port->force_speed) { 22906f00a022SMaxime Bizon case 1000: 22916f00a022SMaxime Bizon override |= ENETSW_IMPOV_1000_MASK; 22926f00a022SMaxime Bizon break; 22936f00a022SMaxime Bizon case 100: 22946f00a022SMaxime Bizon override |= ENETSW_IMPOV_100_MASK; 22956f00a022SMaxime Bizon break; 22966f00a022SMaxime Bizon case 10: 22976f00a022SMaxime Bizon break; 22986f00a022SMaxime Bizon default: 22996f00a022SMaxime Bizon pr_warn("invalid forced speed on port %s: assume 10\n", 23006f00a022SMaxime Bizon port->name); 23016f00a022SMaxime Bizon break; 23026f00a022SMaxime Bizon } 23036f00a022SMaxime Bizon 23046f00a022SMaxime Bizon if (port->force_duplex_full) 23056f00a022SMaxime Bizon override |= ENETSW_IMPOV_FDX_MASK; 23066f00a022SMaxime Bizon 23076f00a022SMaxime Bizon 23086f00a022SMaxime Bizon enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i)); 23096f00a022SMaxime Bizon enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i)); 23106f00a022SMaxime Bizon } 23116f00a022SMaxime Bizon 23126f00a022SMaxime Bizon /* start phy polling timer */ 2313eb8c6b5bSKees Cook timer_setup(&priv->swphy_poll, swphy_poll_timer, 0); 23143bd3b9edSHimanshu Jha mod_timer(&priv->swphy_poll, jiffies); 23156f00a022SMaxime Bizon return 0; 23166f00a022SMaxime Bizon 23176f00a022SMaxime Bizon out: 2318d27de0efSSieng Piaw Liew bcm_enet_free_rx_buf_ring(kdev, priv); 23196f00a022SMaxime Bizon 23206f00a022SMaxime Bizon out_free_tx_skb: 23216f00a022SMaxime Bizon kfree(priv->tx_skb); 23226f00a022SMaxime Bizon 23236f00a022SMaxime Bizon out_free_tx_ring: 23246f00a022SMaxime Bizon dma_free_coherent(kdev, priv->tx_desc_alloc_size, 23256f00a022SMaxime Bizon priv->tx_desc_cpu, priv->tx_desc_dma); 23266f00a022SMaxime Bizon 23276f00a022SMaxime Bizon out_free_rx_ring: 23286f00a022SMaxime Bizon dma_free_coherent(kdev, priv->rx_desc_alloc_size, 23296f00a022SMaxime Bizon priv->rx_desc_cpu, priv->rx_desc_dma); 23306f00a022SMaxime Bizon 23316f00a022SMaxime Bizon out_freeirq_tx: 23326f00a022SMaxime Bizon if (priv->irq_tx != -1) 23336f00a022SMaxime Bizon free_irq(priv->irq_tx, dev); 23346f00a022SMaxime Bizon 23356f00a022SMaxime Bizon out_freeirq_rx: 23366f00a022SMaxime Bizon free_irq(priv->irq_rx, dev); 23376f00a022SMaxime Bizon 23386f00a022SMaxime Bizon out_freeirq: 23396f00a022SMaxime Bizon return ret; 23406f00a022SMaxime Bizon } 23416f00a022SMaxime Bizon 23426f00a022SMaxime Bizon /* stop callback */ 23436f00a022SMaxime Bizon static int bcm_enetsw_stop(struct net_device *dev) 23446f00a022SMaxime Bizon { 23456f00a022SMaxime Bizon struct bcm_enet_priv *priv; 23466f00a022SMaxime Bizon struct device *kdev; 23476f00a022SMaxime Bizon 23486f00a022SMaxime Bizon priv = netdev_priv(dev); 23496f00a022SMaxime Bizon kdev = &priv->pdev->dev; 23506f00a022SMaxime Bizon 23516f00a022SMaxime Bizon del_timer_sync(&priv->swphy_poll); 23526f00a022SMaxime Bizon netif_stop_queue(dev); 23536f00a022SMaxime Bizon napi_disable(&priv->napi); 23546f00a022SMaxime Bizon del_timer_sync(&priv->rx_timeout); 23556f00a022SMaxime Bizon 23566f00a022SMaxime Bizon /* mask all interrupts */ 23573dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 23583dc6475cSFlorian Fainelli enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 23596f00a022SMaxime Bizon 23606f00a022SMaxime Bizon /* disable dma & mac */ 23616f00a022SMaxime Bizon bcm_enet_disable_dma(priv, priv->tx_chan); 23626f00a022SMaxime Bizon bcm_enet_disable_dma(priv, priv->rx_chan); 23636f00a022SMaxime Bizon 23646f00a022SMaxime Bizon /* force reclaim of all tx buffers */ 23656f00a022SMaxime Bizon bcm_enet_tx_reclaim(dev, 1); 23666f00a022SMaxime Bizon 2367d27de0efSSieng Piaw Liew /* free the rx buffer ring */ 2368d27de0efSSieng Piaw Liew bcm_enet_free_rx_buf_ring(kdev, priv); 23696f00a022SMaxime Bizon 23706f00a022SMaxime Bizon /* free remaining allocated memory */ 23716f00a022SMaxime Bizon kfree(priv->tx_skb); 23726f00a022SMaxime Bizon dma_free_coherent(kdev, priv->rx_desc_alloc_size, 23736f00a022SMaxime Bizon priv->rx_desc_cpu, priv->rx_desc_dma); 23746f00a022SMaxime Bizon dma_free_coherent(kdev, priv->tx_desc_alloc_size, 23756f00a022SMaxime Bizon priv->tx_desc_cpu, priv->tx_desc_dma); 23766f00a022SMaxime Bizon if (priv->irq_tx != -1) 23776f00a022SMaxime Bizon free_irq(priv->irq_tx, dev); 23786f00a022SMaxime Bizon free_irq(priv->irq_rx, dev); 23796f00a022SMaxime Bizon 23809bc1ef64SSieng Piaw Liew /* reset BQL after forced tx reclaim to prevent kernel panic */ 23819bc1ef64SSieng Piaw Liew netdev_reset_queue(dev); 23829bc1ef64SSieng Piaw Liew 23836f00a022SMaxime Bizon return 0; 23846f00a022SMaxime Bizon } 23856f00a022SMaxime Bizon 23866f00a022SMaxime Bizon /* try to sort out phy external status by walking the used_port field 23876f00a022SMaxime Bizon * in the bcm_enet_priv structure. in case the phy address is not 23886f00a022SMaxime Bizon * assigned to any physical port on the switch, assume it is external 23896f00a022SMaxime Bizon * (and yell at the user). 23906f00a022SMaxime Bizon */ 23916f00a022SMaxime Bizon static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id) 23926f00a022SMaxime Bizon { 23936f00a022SMaxime Bizon int i; 23946f00a022SMaxime Bizon 23956f00a022SMaxime Bizon for (i = 0; i < priv->num_ports; ++i) { 23966f00a022SMaxime Bizon if (!priv->used_ports[i].used) 23976f00a022SMaxime Bizon continue; 23986f00a022SMaxime Bizon if (priv->used_ports[i].phy_id == phy_id) 23996f00a022SMaxime Bizon return bcm_enet_port_is_rgmii(i); 24006f00a022SMaxime Bizon } 24016f00a022SMaxime Bizon 24026f00a022SMaxime Bizon printk_once(KERN_WARNING "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n", 24036f00a022SMaxime Bizon phy_id); 24046f00a022SMaxime Bizon return 1; 24056f00a022SMaxime Bizon } 24066f00a022SMaxime Bizon 24076f00a022SMaxime Bizon /* can't use bcmenet_sw_mdio_read directly as we need to sort out 24086f00a022SMaxime Bizon * external/internal status of the given phy_id first. 24096f00a022SMaxime Bizon */ 24106f00a022SMaxime Bizon static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id, 24116f00a022SMaxime Bizon int location) 24126f00a022SMaxime Bizon { 24136f00a022SMaxime Bizon struct bcm_enet_priv *priv; 24146f00a022SMaxime Bizon 24156f00a022SMaxime Bizon priv = netdev_priv(dev); 24166f00a022SMaxime Bizon return bcmenet_sw_mdio_read(priv, 24176f00a022SMaxime Bizon bcm_enetsw_phy_is_external(priv, phy_id), 24186f00a022SMaxime Bizon phy_id, location); 24196f00a022SMaxime Bizon } 24206f00a022SMaxime Bizon 24216f00a022SMaxime Bizon /* can't use bcmenet_sw_mdio_write directly as we need to sort out 24226f00a022SMaxime Bizon * external/internal status of the given phy_id first. 24236f00a022SMaxime Bizon */ 24246f00a022SMaxime Bizon static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id, 24256f00a022SMaxime Bizon int location, 24266f00a022SMaxime Bizon int val) 24276f00a022SMaxime Bizon { 24286f00a022SMaxime Bizon struct bcm_enet_priv *priv; 24296f00a022SMaxime Bizon 24306f00a022SMaxime Bizon priv = netdev_priv(dev); 24316f00a022SMaxime Bizon bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id), 24326f00a022SMaxime Bizon phy_id, location, val); 24336f00a022SMaxime Bizon } 24346f00a022SMaxime Bizon 24356f00a022SMaxime Bizon static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 24366f00a022SMaxime Bizon { 24376f00a022SMaxime Bizon struct mii_if_info mii; 24386f00a022SMaxime Bizon 24396f00a022SMaxime Bizon mii.dev = dev; 24406f00a022SMaxime Bizon mii.mdio_read = bcm_enetsw_mii_mdio_read; 24416f00a022SMaxime Bizon mii.mdio_write = bcm_enetsw_mii_mdio_write; 24426f00a022SMaxime Bizon mii.phy_id = 0; 24436f00a022SMaxime Bizon mii.phy_id_mask = 0x3f; 24446f00a022SMaxime Bizon mii.reg_num_mask = 0x1f; 24456f00a022SMaxime Bizon return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); 24466f00a022SMaxime Bizon 24476f00a022SMaxime Bizon } 24486f00a022SMaxime Bizon 24496f00a022SMaxime Bizon static const struct net_device_ops bcm_enetsw_ops = { 24506f00a022SMaxime Bizon .ndo_open = bcm_enetsw_open, 24516f00a022SMaxime Bizon .ndo_stop = bcm_enetsw_stop, 24526f00a022SMaxime Bizon .ndo_start_xmit = bcm_enet_start_xmit, 24536f00a022SMaxime Bizon .ndo_change_mtu = bcm_enet_change_mtu, 2454a7605370SArnd Bergmann .ndo_eth_ioctl = bcm_enetsw_ioctl, 24556f00a022SMaxime Bizon }; 24566f00a022SMaxime Bizon 24576f00a022SMaxime Bizon 24586f00a022SMaxime Bizon static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = { 24596f00a022SMaxime Bizon { "rx_packets", DEV_STAT(rx_packets), -1 }, 24606f00a022SMaxime Bizon { "tx_packets", DEV_STAT(tx_packets), -1 }, 24616f00a022SMaxime Bizon { "rx_bytes", DEV_STAT(rx_bytes), -1 }, 24626f00a022SMaxime Bizon { "tx_bytes", DEV_STAT(tx_bytes), -1 }, 24636f00a022SMaxime Bizon { "rx_errors", DEV_STAT(rx_errors), -1 }, 24646f00a022SMaxime Bizon { "tx_errors", DEV_STAT(tx_errors), -1 }, 24656f00a022SMaxime Bizon { "rx_dropped", DEV_STAT(rx_dropped), -1 }, 24666f00a022SMaxime Bizon { "tx_dropped", DEV_STAT(tx_dropped), -1 }, 24676f00a022SMaxime Bizon 24686f00a022SMaxime Bizon { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT }, 24696f00a022SMaxime Bizon { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST }, 24706f00a022SMaxime Bizon { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST }, 24716f00a022SMaxime Bizon { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT }, 24726f00a022SMaxime Bizon { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 }, 24736f00a022SMaxime Bizon { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 }, 24746f00a022SMaxime Bizon { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 }, 24756f00a022SMaxime Bizon { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 }, 24766f00a022SMaxime Bizon { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023}, 24776f00a022SMaxime Bizon { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max), 24786f00a022SMaxime Bizon ETHSW_MIB_RX_1024_1522 }, 24796f00a022SMaxime Bizon { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047), 24806f00a022SMaxime Bizon ETHSW_MIB_RX_1523_2047 }, 24816f00a022SMaxime Bizon { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095), 24826f00a022SMaxime Bizon ETHSW_MIB_RX_2048_4095 }, 24836f00a022SMaxime Bizon { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191), 24846f00a022SMaxime Bizon ETHSW_MIB_RX_4096_8191 }, 24856f00a022SMaxime Bizon { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728), 24866f00a022SMaxime Bizon ETHSW_MIB_RX_8192_9728 }, 24876f00a022SMaxime Bizon { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR }, 24886f00a022SMaxime Bizon { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC }, 24896f00a022SMaxime Bizon { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP }, 24906f00a022SMaxime Bizon { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND }, 24916f00a022SMaxime Bizon { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE }, 24926f00a022SMaxime Bizon 24936f00a022SMaxime Bizon { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT }, 24946f00a022SMaxime Bizon { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST }, 24956f00a022SMaxime Bizon { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT }, 24966f00a022SMaxime Bizon { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT }, 24976f00a022SMaxime Bizon { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE }, 24986f00a022SMaxime Bizon { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS }, 24996f00a022SMaxime Bizon 25006f00a022SMaxime Bizon }; 25016f00a022SMaxime Bizon 25026f00a022SMaxime Bizon #define BCM_ENETSW_STATS_LEN \ 25036f00a022SMaxime Bizon (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats)) 25046f00a022SMaxime Bizon 25056f00a022SMaxime Bizon static void bcm_enetsw_get_strings(struct net_device *netdev, 25066f00a022SMaxime Bizon u32 stringset, u8 *data) 25076f00a022SMaxime Bizon { 25086f00a022SMaxime Bizon int i; 25096f00a022SMaxime Bizon 25106f00a022SMaxime Bizon switch (stringset) { 25116f00a022SMaxime Bizon case ETH_SS_STATS: 25126f00a022SMaxime Bizon for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { 25136f00a022SMaxime Bizon memcpy(data + i * ETH_GSTRING_LEN, 25146f00a022SMaxime Bizon bcm_enetsw_gstrings_stats[i].stat_string, 25156f00a022SMaxime Bizon ETH_GSTRING_LEN); 25166f00a022SMaxime Bizon } 25176f00a022SMaxime Bizon break; 25186f00a022SMaxime Bizon } 25196f00a022SMaxime Bizon } 25206f00a022SMaxime Bizon 25216f00a022SMaxime Bizon static int bcm_enetsw_get_sset_count(struct net_device *netdev, 25226f00a022SMaxime Bizon int string_set) 25236f00a022SMaxime Bizon { 25246f00a022SMaxime Bizon switch (string_set) { 25256f00a022SMaxime Bizon case ETH_SS_STATS: 25266f00a022SMaxime Bizon return BCM_ENETSW_STATS_LEN; 25276f00a022SMaxime Bizon default: 25286f00a022SMaxime Bizon return -EINVAL; 25296f00a022SMaxime Bizon } 25306f00a022SMaxime Bizon } 25316f00a022SMaxime Bizon 25326f00a022SMaxime Bizon static void bcm_enetsw_get_drvinfo(struct net_device *netdev, 25336f00a022SMaxime Bizon struct ethtool_drvinfo *drvinfo) 25346f00a022SMaxime Bizon { 2535e3c0a635SLeon Romanovsky strncpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver)); 2536e3c0a635SLeon Romanovsky strncpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info)); 25376f00a022SMaxime Bizon } 25386f00a022SMaxime Bizon 25396f00a022SMaxime Bizon static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev, 25406f00a022SMaxime Bizon struct ethtool_stats *stats, 25416f00a022SMaxime Bizon u64 *data) 25426f00a022SMaxime Bizon { 25436f00a022SMaxime Bizon struct bcm_enet_priv *priv; 25446f00a022SMaxime Bizon int i; 25456f00a022SMaxime Bizon 25466f00a022SMaxime Bizon priv = netdev_priv(netdev); 25476f00a022SMaxime Bizon 25486f00a022SMaxime Bizon for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { 25496f00a022SMaxime Bizon const struct bcm_enet_stats *s; 25506f00a022SMaxime Bizon u32 lo, hi; 25516f00a022SMaxime Bizon char *p; 25526f00a022SMaxime Bizon int reg; 25536f00a022SMaxime Bizon 25546f00a022SMaxime Bizon s = &bcm_enetsw_gstrings_stats[i]; 25556f00a022SMaxime Bizon 25566f00a022SMaxime Bizon reg = s->mib_reg; 25576f00a022SMaxime Bizon if (reg == -1) 25586f00a022SMaxime Bizon continue; 25596f00a022SMaxime Bizon 25606f00a022SMaxime Bizon lo = enetsw_readl(priv, ENETSW_MIB_REG(reg)); 25616f00a022SMaxime Bizon p = (char *)priv + s->stat_offset; 25626f00a022SMaxime Bizon 25636f00a022SMaxime Bizon if (s->sizeof_stat == sizeof(u64)) { 25646f00a022SMaxime Bizon hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1)); 25656f00a022SMaxime Bizon *(u64 *)p = ((u64)hi << 32 | lo); 25666f00a022SMaxime Bizon } else { 25676f00a022SMaxime Bizon *(u32 *)p = lo; 25686f00a022SMaxime Bizon } 25696f00a022SMaxime Bizon } 25706f00a022SMaxime Bizon 25716f00a022SMaxime Bizon for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { 25726f00a022SMaxime Bizon const struct bcm_enet_stats *s; 25736f00a022SMaxime Bizon char *p; 25746f00a022SMaxime Bizon 25756f00a022SMaxime Bizon s = &bcm_enetsw_gstrings_stats[i]; 25766f00a022SMaxime Bizon 25776f00a022SMaxime Bizon if (s->mib_reg == -1) 25786f00a022SMaxime Bizon p = (char *)&netdev->stats + s->stat_offset; 25796f00a022SMaxime Bizon else 25806f00a022SMaxime Bizon p = (char *)priv + s->stat_offset; 25816f00a022SMaxime Bizon 25826f00a022SMaxime Bizon data[i] = (s->sizeof_stat == sizeof(u64)) ? 25836f00a022SMaxime Bizon *(u64 *)p : *(u32 *)p; 25846f00a022SMaxime Bizon } 25856f00a022SMaxime Bizon } 25866f00a022SMaxime Bizon 258774624944SHao Chen static void 258874624944SHao Chen bcm_enetsw_get_ringparam(struct net_device *dev, 258974624944SHao Chen struct ethtool_ringparam *ering, 259074624944SHao Chen struct kernel_ethtool_ringparam *kernel_ering, 259174624944SHao Chen struct netlink_ext_ack *extack) 25926f00a022SMaxime Bizon { 25936f00a022SMaxime Bizon struct bcm_enet_priv *priv; 25946f00a022SMaxime Bizon 25956f00a022SMaxime Bizon priv = netdev_priv(dev); 25966f00a022SMaxime Bizon 25976f00a022SMaxime Bizon /* rx/tx ring is actually only limited by memory */ 25986f00a022SMaxime Bizon ering->rx_max_pending = 8192; 25996f00a022SMaxime Bizon ering->tx_max_pending = 8192; 26006f00a022SMaxime Bizon ering->rx_mini_max_pending = 0; 26016f00a022SMaxime Bizon ering->rx_jumbo_max_pending = 0; 26026f00a022SMaxime Bizon ering->rx_pending = priv->rx_ring_size; 26036f00a022SMaxime Bizon ering->tx_pending = priv->tx_ring_size; 26046f00a022SMaxime Bizon } 26056f00a022SMaxime Bizon 260674624944SHao Chen static int 260774624944SHao Chen bcm_enetsw_set_ringparam(struct net_device *dev, 260874624944SHao Chen struct ethtool_ringparam *ering, 260974624944SHao Chen struct kernel_ethtool_ringparam *kernel_ering, 261074624944SHao Chen struct netlink_ext_ack *extack) 26116f00a022SMaxime Bizon { 26126f00a022SMaxime Bizon struct bcm_enet_priv *priv; 26136f00a022SMaxime Bizon int was_running; 26146f00a022SMaxime Bizon 26156f00a022SMaxime Bizon priv = netdev_priv(dev); 26166f00a022SMaxime Bizon 26176f00a022SMaxime Bizon was_running = 0; 26186f00a022SMaxime Bizon if (netif_running(dev)) { 26196f00a022SMaxime Bizon bcm_enetsw_stop(dev); 26206f00a022SMaxime Bizon was_running = 1; 26216f00a022SMaxime Bizon } 26226f00a022SMaxime Bizon 26236f00a022SMaxime Bizon priv->rx_ring_size = ering->rx_pending; 26246f00a022SMaxime Bizon priv->tx_ring_size = ering->tx_pending; 26256f00a022SMaxime Bizon 26266f00a022SMaxime Bizon if (was_running) { 26276f00a022SMaxime Bizon int err; 26286f00a022SMaxime Bizon 26296f00a022SMaxime Bizon err = bcm_enetsw_open(dev); 26306f00a022SMaxime Bizon if (err) 26316f00a022SMaxime Bizon dev_close(dev); 26326f00a022SMaxime Bizon } 26336f00a022SMaxime Bizon return 0; 26346f00a022SMaxime Bizon } 26356f00a022SMaxime Bizon 2636dc8007e8SBhumika Goyal static const struct ethtool_ops bcm_enetsw_ethtool_ops = { 26376f00a022SMaxime Bizon .get_strings = bcm_enetsw_get_strings, 26386f00a022SMaxime Bizon .get_sset_count = bcm_enetsw_get_sset_count, 26396f00a022SMaxime Bizon .get_ethtool_stats = bcm_enetsw_get_ethtool_stats, 26406f00a022SMaxime Bizon .get_drvinfo = bcm_enetsw_get_drvinfo, 26416f00a022SMaxime Bizon .get_ringparam = bcm_enetsw_get_ringparam, 26426f00a022SMaxime Bizon .set_ringparam = bcm_enetsw_set_ringparam, 26436f00a022SMaxime Bizon }; 26446f00a022SMaxime Bizon 26456f00a022SMaxime Bizon /* allocate netdevice, request register memory and register device. */ 26466f00a022SMaxime Bizon static int bcm_enetsw_probe(struct platform_device *pdev) 26476f00a022SMaxime Bizon { 26486f00a022SMaxime Bizon struct bcm_enet_priv *priv; 26496f00a022SMaxime Bizon struct net_device *dev; 26506f00a022SMaxime Bizon struct bcm63xx_enetsw_platform_data *pd; 26516f00a022SMaxime Bizon struct resource *res_mem; 26526f00a022SMaxime Bizon int ret, irq_rx, irq_tx; 26536f00a022SMaxime Bizon 26546f00a022SMaxime Bizon if (!bcm_enet_shared_base[0]) 2655527a4871SJonas Gorski return -EPROBE_DEFER; 26566f00a022SMaxime Bizon 26576f00a022SMaxime Bizon res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 26586f00a022SMaxime Bizon irq_rx = platform_get_irq(pdev, 0); 26596f00a022SMaxime Bizon irq_tx = platform_get_irq(pdev, 1); 26606f00a022SMaxime Bizon if (!res_mem || irq_rx < 0) 26616f00a022SMaxime Bizon return -ENODEV; 26626f00a022SMaxime Bizon 26636f00a022SMaxime Bizon dev = alloc_etherdev(sizeof(*priv)); 26646f00a022SMaxime Bizon if (!dev) 26656f00a022SMaxime Bizon return -ENOMEM; 26666f00a022SMaxime Bizon priv = netdev_priv(dev); 26676f00a022SMaxime Bizon 26686f00a022SMaxime Bizon /* initialize default and fetch platform data */ 26696f00a022SMaxime Bizon priv->enet_is_sw = true; 26706f00a022SMaxime Bizon priv->irq_rx = irq_rx; 26716f00a022SMaxime Bizon priv->irq_tx = irq_tx; 26726f00a022SMaxime Bizon priv->rx_ring_size = BCMENET_DEF_RX_DESC; 26736f00a022SMaxime Bizon priv->tx_ring_size = BCMENET_DEF_TX_DESC; 26746f00a022SMaxime Bizon priv->dma_maxburst = BCMENETSW_DMA_MAXBURST; 2675d27de0efSSieng Piaw Liew priv->rx_buf_offset = NET_SKB_PAD + NET_IP_ALIGN; 26766f00a022SMaxime Bizon 2677cf0e7794SJingoo Han pd = dev_get_platdata(&pdev->dev); 26786f00a022SMaxime Bizon if (pd) { 2679a96d317fSJakub Kicinski eth_hw_addr_set(dev, pd->mac_addr); 26806f00a022SMaxime Bizon memcpy(priv->used_ports, pd->used_ports, 26816f00a022SMaxime Bizon sizeof(pd->used_ports)); 26826f00a022SMaxime Bizon priv->num_ports = pd->num_ports; 26833dc6475cSFlorian Fainelli priv->dma_has_sram = pd->dma_has_sram; 26843dc6475cSFlorian Fainelli priv->dma_chan_en_mask = pd->dma_chan_en_mask; 26853dc6475cSFlorian Fainelli priv->dma_chan_int_mask = pd->dma_chan_int_mask; 26863dc6475cSFlorian Fainelli priv->dma_chan_width = pd->dma_chan_width; 26876f00a022SMaxime Bizon } 26886f00a022SMaxime Bizon 2689e1c6dccaSJarod Wilson ret = bcm_enet_change_mtu(dev, dev->mtu); 26906f00a022SMaxime Bizon if (ret) 26916f00a022SMaxime Bizon goto out; 26926f00a022SMaxime Bizon 26937e697ce9SJonas Gorski priv->base = devm_ioremap_resource(&pdev->dev, res_mem); 26947e697ce9SJonas Gorski if (IS_ERR(priv->base)) { 26957e697ce9SJonas Gorski ret = PTR_ERR(priv->base); 26966f00a022SMaxime Bizon goto out; 26976f00a022SMaxime Bizon } 26986f00a022SMaxime Bizon 26997e697ce9SJonas Gorski priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw"); 27006f00a022SMaxime Bizon if (IS_ERR(priv->mac_clk)) { 27016f00a022SMaxime Bizon ret = PTR_ERR(priv->mac_clk); 27027e697ce9SJonas Gorski goto out; 27036f00a022SMaxime Bizon } 27049c86b846SJonas Gorski ret = clk_prepare_enable(priv->mac_clk); 27059c86b846SJonas Gorski if (ret) 27067e697ce9SJonas Gorski goto out; 27076f00a022SMaxime Bizon 27086f00a022SMaxime Bizon priv->rx_chan = 0; 27096f00a022SMaxime Bizon priv->tx_chan = 1; 27106f00a022SMaxime Bizon spin_lock_init(&priv->rx_lock); 27116f00a022SMaxime Bizon 27126f00a022SMaxime Bizon /* init rx timeout (used for oom) */ 2713eb8c6b5bSKees Cook timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0); 27146f00a022SMaxime Bizon 27156f00a022SMaxime Bizon /* register netdevice */ 27166f00a022SMaxime Bizon dev->netdev_ops = &bcm_enetsw_ops; 2717b707b89fSJakub Kicinski netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16); 27187ad24ea4SWilfried Klaebe dev->ethtool_ops = &bcm_enetsw_ethtool_ops; 27196f00a022SMaxime Bizon SET_NETDEV_DEV(dev, &pdev->dev); 27206f00a022SMaxime Bizon 27216f00a022SMaxime Bizon spin_lock_init(&priv->enetsw_mdio_lock); 27226f00a022SMaxime Bizon 27236f00a022SMaxime Bizon ret = register_netdev(dev); 27246f00a022SMaxime Bizon if (ret) 27259c86b846SJonas Gorski goto out_disable_clk; 27266f00a022SMaxime Bizon 27276f00a022SMaxime Bizon netif_carrier_off(dev); 27286f00a022SMaxime Bizon platform_set_drvdata(pdev, dev); 27296f00a022SMaxime Bizon priv->pdev = pdev; 27306f00a022SMaxime Bizon priv->net_dev = dev; 27316f00a022SMaxime Bizon 27326f00a022SMaxime Bizon return 0; 27336f00a022SMaxime Bizon 27349c86b846SJonas Gorski out_disable_clk: 27359c86b846SJonas Gorski clk_disable_unprepare(priv->mac_clk); 27366f00a022SMaxime Bizon out: 27376f00a022SMaxime Bizon free_netdev(dev); 27386f00a022SMaxime Bizon return ret; 27396f00a022SMaxime Bizon } 27406f00a022SMaxime Bizon 27416f00a022SMaxime Bizon 27426f00a022SMaxime Bizon /* exit func, stops hardware and unregisters netdevice */ 27436f00a022SMaxime Bizon static int bcm_enetsw_remove(struct platform_device *pdev) 27446f00a022SMaxime Bizon { 27456f00a022SMaxime Bizon struct bcm_enet_priv *priv; 27466f00a022SMaxime Bizon struct net_device *dev; 27476f00a022SMaxime Bizon 27486f00a022SMaxime Bizon /* stop netdevice */ 27496f00a022SMaxime Bizon dev = platform_get_drvdata(pdev); 27506f00a022SMaxime Bizon priv = netdev_priv(dev); 27516f00a022SMaxime Bizon unregister_netdev(dev); 27526f00a022SMaxime Bizon 27539c86b846SJonas Gorski clk_disable_unprepare(priv->mac_clk); 27549c86b846SJonas Gorski 27556f00a022SMaxime Bizon free_netdev(dev); 27566f00a022SMaxime Bizon return 0; 27576f00a022SMaxime Bizon } 27586f00a022SMaxime Bizon 27596f00a022SMaxime Bizon struct platform_driver bcm63xx_enetsw_driver = { 27606f00a022SMaxime Bizon .probe = bcm_enetsw_probe, 27616f00a022SMaxime Bizon .remove = bcm_enetsw_remove, 27626f00a022SMaxime Bizon .driver = { 27636f00a022SMaxime Bizon .name = "bcm63xx_enetsw", 27646f00a022SMaxime Bizon .owner = THIS_MODULE, 27656f00a022SMaxime Bizon }, 27666f00a022SMaxime Bizon }; 27676f00a022SMaxime Bizon 27686f00a022SMaxime Bizon /* reserve & remap memory space shared between all macs */ 2769047fc566SBill Pemberton static int bcm_enet_shared_probe(struct platform_device *pdev) 2770adfc5217SJeff Kirsher { 27710ae99b5fSMaxime Bizon void __iomem *p[3]; 27720ae99b5fSMaxime Bizon unsigned int i; 2773adfc5217SJeff Kirsher 27740ae99b5fSMaxime Bizon memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base)); 2775adfc5217SJeff Kirsher 27760ae99b5fSMaxime Bizon for (i = 0; i < 3; i++) { 27779d26cfa5SYueHaibing p[i] = devm_platform_ioremap_resource(pdev, i); 2778646093a2SWei Yongjun if (IS_ERR(p[i])) 2779646093a2SWei Yongjun return PTR_ERR(p[i]); 27800ae99b5fSMaxime Bizon } 27810ae99b5fSMaxime Bizon 27820ae99b5fSMaxime Bizon memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base)); 27831c03da05SJonas Gorski 2784adfc5217SJeff Kirsher return 0; 2785adfc5217SJeff Kirsher } 2786adfc5217SJeff Kirsher 2787047fc566SBill Pemberton static int bcm_enet_shared_remove(struct platform_device *pdev) 2788adfc5217SJeff Kirsher { 2789adfc5217SJeff Kirsher return 0; 2790adfc5217SJeff Kirsher } 2791adfc5217SJeff Kirsher 27926f00a022SMaxime Bizon /* this "shared" driver is needed because both macs share a single 2793adfc5217SJeff Kirsher * address space 2794adfc5217SJeff Kirsher */ 2795adfc5217SJeff Kirsher struct platform_driver bcm63xx_enet_shared_driver = { 2796adfc5217SJeff Kirsher .probe = bcm_enet_shared_probe, 2797047fc566SBill Pemberton .remove = bcm_enet_shared_remove, 2798adfc5217SJeff Kirsher .driver = { 2799adfc5217SJeff Kirsher .name = "bcm63xx_enet_shared", 2800adfc5217SJeff Kirsher .owner = THIS_MODULE, 2801adfc5217SJeff Kirsher }, 2802adfc5217SJeff Kirsher }; 2803adfc5217SJeff Kirsher 28040d1c744cSThierry Reding static struct platform_driver * const drivers[] = { 28050d1c744cSThierry Reding &bcm63xx_enet_shared_driver, 28060d1c744cSThierry Reding &bcm63xx_enet_driver, 28070d1c744cSThierry Reding &bcm63xx_enetsw_driver, 28080d1c744cSThierry Reding }; 28090d1c744cSThierry Reding 28106f00a022SMaxime Bizon /* entry point */ 2811adfc5217SJeff Kirsher static int __init bcm_enet_init(void) 2812adfc5217SJeff Kirsher { 28130d1c744cSThierry Reding return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); 2814adfc5217SJeff Kirsher } 2815adfc5217SJeff Kirsher 2816adfc5217SJeff Kirsher static void __exit bcm_enet_exit(void) 2817adfc5217SJeff Kirsher { 28180d1c744cSThierry Reding platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); 2819adfc5217SJeff Kirsher } 2820adfc5217SJeff Kirsher 2821adfc5217SJeff Kirsher 2822adfc5217SJeff Kirsher module_init(bcm_enet_init); 2823adfc5217SJeff Kirsher module_exit(bcm_enet_exit); 2824adfc5217SJeff Kirsher 2825adfc5217SJeff Kirsher MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver"); 2826adfc5217SJeff Kirsher MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); 2827adfc5217SJeff Kirsher MODULE_LICENSE("GPL"); 2828