1adfc5217SJeff Kirsher /* 2adfc5217SJeff Kirsher * Driver for BCM963xx builtin Ethernet mac 3adfc5217SJeff Kirsher * 4adfc5217SJeff Kirsher * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> 5adfc5217SJeff Kirsher * 6adfc5217SJeff Kirsher * This program is free software; you can redistribute it and/or modify 7adfc5217SJeff Kirsher * it under the terms of the GNU General Public License as published by 8adfc5217SJeff Kirsher * the Free Software Foundation; either version 2 of the License, or 9adfc5217SJeff Kirsher * (at your option) any later version. 10adfc5217SJeff Kirsher * 11adfc5217SJeff Kirsher * This program is distributed in the hope that it will be useful, 12adfc5217SJeff Kirsher * but WITHOUT ANY WARRANTY; without even the implied warranty of 13adfc5217SJeff Kirsher * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14adfc5217SJeff Kirsher * GNU General Public License for more details. 15adfc5217SJeff Kirsher * 16adfc5217SJeff Kirsher * You should have received a copy of the GNU General Public License 17adfc5217SJeff Kirsher * along with this program; if not, write to the Free Software 18adfc5217SJeff Kirsher * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19adfc5217SJeff Kirsher */ 20adfc5217SJeff Kirsher #include <linux/init.h> 21adfc5217SJeff Kirsher #include <linux/interrupt.h> 22adfc5217SJeff Kirsher #include <linux/module.h> 23adfc5217SJeff Kirsher #include <linux/clk.h> 24adfc5217SJeff Kirsher #include <linux/etherdevice.h> 25adfc5217SJeff Kirsher #include <linux/slab.h> 26adfc5217SJeff Kirsher #include <linux/delay.h> 27adfc5217SJeff Kirsher #include <linux/ethtool.h> 28adfc5217SJeff Kirsher #include <linux/crc32.h> 29adfc5217SJeff Kirsher #include <linux/err.h> 30adfc5217SJeff Kirsher #include <linux/dma-mapping.h> 31adfc5217SJeff Kirsher #include <linux/platform_device.h> 32adfc5217SJeff Kirsher #include <linux/if_vlan.h> 33adfc5217SJeff Kirsher 34adfc5217SJeff Kirsher #include <bcm63xx_dev_enet.h> 35adfc5217SJeff Kirsher #include "bcm63xx_enet.h" 36adfc5217SJeff Kirsher 37adfc5217SJeff Kirsher static char bcm_enet_driver_name[] = "bcm63xx_enet"; 38adfc5217SJeff Kirsher static char bcm_enet_driver_version[] = "1.0"; 39adfc5217SJeff Kirsher 40adfc5217SJeff Kirsher static int copybreak __read_mostly = 128; 41adfc5217SJeff Kirsher module_param(copybreak, int, 0); 42adfc5217SJeff Kirsher MODULE_PARM_DESC(copybreak, "Receive copy threshold"); 43adfc5217SJeff Kirsher 44adfc5217SJeff Kirsher /* io memory shared between all devices */ 45adfc5217SJeff Kirsher static void __iomem *bcm_enet_shared_base; 46adfc5217SJeff Kirsher 47adfc5217SJeff Kirsher /* 48adfc5217SJeff Kirsher * io helpers to access mac registers 49adfc5217SJeff Kirsher */ 50adfc5217SJeff Kirsher static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off) 51adfc5217SJeff Kirsher { 52adfc5217SJeff Kirsher return bcm_readl(priv->base + off); 53adfc5217SJeff Kirsher } 54adfc5217SJeff Kirsher 55adfc5217SJeff Kirsher static inline void enet_writel(struct bcm_enet_priv *priv, 56adfc5217SJeff Kirsher u32 val, u32 off) 57adfc5217SJeff Kirsher { 58adfc5217SJeff Kirsher bcm_writel(val, priv->base + off); 59adfc5217SJeff Kirsher } 60adfc5217SJeff Kirsher 61adfc5217SJeff Kirsher /* 62adfc5217SJeff Kirsher * io helpers to access shared registers 63adfc5217SJeff Kirsher */ 64adfc5217SJeff Kirsher static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) 65adfc5217SJeff Kirsher { 66adfc5217SJeff Kirsher return bcm_readl(bcm_enet_shared_base + off); 67adfc5217SJeff Kirsher } 68adfc5217SJeff Kirsher 69adfc5217SJeff Kirsher static inline void enet_dma_writel(struct bcm_enet_priv *priv, 70adfc5217SJeff Kirsher u32 val, u32 off) 71adfc5217SJeff Kirsher { 72adfc5217SJeff Kirsher bcm_writel(val, bcm_enet_shared_base + off); 73adfc5217SJeff Kirsher } 74adfc5217SJeff Kirsher 75adfc5217SJeff Kirsher /* 76adfc5217SJeff Kirsher * write given data into mii register and wait for transfer to end 77adfc5217SJeff Kirsher * with timeout (average measured transfer time is 25us) 78adfc5217SJeff Kirsher */ 79adfc5217SJeff Kirsher static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data) 80adfc5217SJeff Kirsher { 81adfc5217SJeff Kirsher int limit; 82adfc5217SJeff Kirsher 83adfc5217SJeff Kirsher /* make sure mii interrupt status is cleared */ 84adfc5217SJeff Kirsher enet_writel(priv, ENET_IR_MII, ENET_IR_REG); 85adfc5217SJeff Kirsher 86adfc5217SJeff Kirsher enet_writel(priv, data, ENET_MIIDATA_REG); 87adfc5217SJeff Kirsher wmb(); 88adfc5217SJeff Kirsher 89adfc5217SJeff Kirsher /* busy wait on mii interrupt bit, with timeout */ 90adfc5217SJeff Kirsher limit = 1000; 91adfc5217SJeff Kirsher do { 92adfc5217SJeff Kirsher if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) 93adfc5217SJeff Kirsher break; 94adfc5217SJeff Kirsher udelay(1); 95adfc5217SJeff Kirsher } while (limit-- > 0); 96adfc5217SJeff Kirsher 97adfc5217SJeff Kirsher return (limit < 0) ? 1 : 0; 98adfc5217SJeff Kirsher } 99adfc5217SJeff Kirsher 100adfc5217SJeff Kirsher /* 101adfc5217SJeff Kirsher * MII internal read callback 102adfc5217SJeff Kirsher */ 103adfc5217SJeff Kirsher static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id, 104adfc5217SJeff Kirsher int regnum) 105adfc5217SJeff Kirsher { 106adfc5217SJeff Kirsher u32 tmp, val; 107adfc5217SJeff Kirsher 108adfc5217SJeff Kirsher tmp = regnum << ENET_MIIDATA_REG_SHIFT; 109adfc5217SJeff Kirsher tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; 110adfc5217SJeff Kirsher tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; 111adfc5217SJeff Kirsher tmp |= ENET_MIIDATA_OP_READ_MASK; 112adfc5217SJeff Kirsher 113adfc5217SJeff Kirsher if (do_mdio_op(priv, tmp)) 114adfc5217SJeff Kirsher return -1; 115adfc5217SJeff Kirsher 116adfc5217SJeff Kirsher val = enet_readl(priv, ENET_MIIDATA_REG); 117adfc5217SJeff Kirsher val &= 0xffff; 118adfc5217SJeff Kirsher return val; 119adfc5217SJeff Kirsher } 120adfc5217SJeff Kirsher 121adfc5217SJeff Kirsher /* 122adfc5217SJeff Kirsher * MII internal write callback 123adfc5217SJeff Kirsher */ 124adfc5217SJeff Kirsher static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id, 125adfc5217SJeff Kirsher int regnum, u16 value) 126adfc5217SJeff Kirsher { 127adfc5217SJeff Kirsher u32 tmp; 128adfc5217SJeff Kirsher 129adfc5217SJeff Kirsher tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT; 130adfc5217SJeff Kirsher tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; 131adfc5217SJeff Kirsher tmp |= regnum << ENET_MIIDATA_REG_SHIFT; 132adfc5217SJeff Kirsher tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; 133adfc5217SJeff Kirsher tmp |= ENET_MIIDATA_OP_WRITE_MASK; 134adfc5217SJeff Kirsher 135adfc5217SJeff Kirsher (void)do_mdio_op(priv, tmp); 136adfc5217SJeff Kirsher return 0; 137adfc5217SJeff Kirsher } 138adfc5217SJeff Kirsher 139adfc5217SJeff Kirsher /* 140adfc5217SJeff Kirsher * MII read callback from phylib 141adfc5217SJeff Kirsher */ 142adfc5217SJeff Kirsher static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id, 143adfc5217SJeff Kirsher int regnum) 144adfc5217SJeff Kirsher { 145adfc5217SJeff Kirsher return bcm_enet_mdio_read(bus->priv, mii_id, regnum); 146adfc5217SJeff Kirsher } 147adfc5217SJeff Kirsher 148adfc5217SJeff Kirsher /* 149adfc5217SJeff Kirsher * MII write callback from phylib 150adfc5217SJeff Kirsher */ 151adfc5217SJeff Kirsher static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id, 152adfc5217SJeff Kirsher int regnum, u16 value) 153adfc5217SJeff Kirsher { 154adfc5217SJeff Kirsher return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value); 155adfc5217SJeff Kirsher } 156adfc5217SJeff Kirsher 157adfc5217SJeff Kirsher /* 158adfc5217SJeff Kirsher * MII read callback from mii core 159adfc5217SJeff Kirsher */ 160adfc5217SJeff Kirsher static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id, 161adfc5217SJeff Kirsher int regnum) 162adfc5217SJeff Kirsher { 163adfc5217SJeff Kirsher return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum); 164adfc5217SJeff Kirsher } 165adfc5217SJeff Kirsher 166adfc5217SJeff Kirsher /* 167adfc5217SJeff Kirsher * MII write callback from mii core 168adfc5217SJeff Kirsher */ 169adfc5217SJeff Kirsher static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id, 170adfc5217SJeff Kirsher int regnum, int value) 171adfc5217SJeff Kirsher { 172adfc5217SJeff Kirsher bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value); 173adfc5217SJeff Kirsher } 174adfc5217SJeff Kirsher 175adfc5217SJeff Kirsher /* 176adfc5217SJeff Kirsher * refill rx queue 177adfc5217SJeff Kirsher */ 178adfc5217SJeff Kirsher static int bcm_enet_refill_rx(struct net_device *dev) 179adfc5217SJeff Kirsher { 180adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 181adfc5217SJeff Kirsher 182adfc5217SJeff Kirsher priv = netdev_priv(dev); 183adfc5217SJeff Kirsher 184adfc5217SJeff Kirsher while (priv->rx_desc_count < priv->rx_ring_size) { 185adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 186adfc5217SJeff Kirsher struct sk_buff *skb; 187adfc5217SJeff Kirsher dma_addr_t p; 188adfc5217SJeff Kirsher int desc_idx; 189adfc5217SJeff Kirsher u32 len_stat; 190adfc5217SJeff Kirsher 191adfc5217SJeff Kirsher desc_idx = priv->rx_dirty_desc; 192adfc5217SJeff Kirsher desc = &priv->rx_desc_cpu[desc_idx]; 193adfc5217SJeff Kirsher 194adfc5217SJeff Kirsher if (!priv->rx_skb[desc_idx]) { 195adfc5217SJeff Kirsher skb = netdev_alloc_skb(dev, priv->rx_skb_size); 196adfc5217SJeff Kirsher if (!skb) 197adfc5217SJeff Kirsher break; 198adfc5217SJeff Kirsher priv->rx_skb[desc_idx] = skb; 199adfc5217SJeff Kirsher 200adfc5217SJeff Kirsher p = dma_map_single(&priv->pdev->dev, skb->data, 201adfc5217SJeff Kirsher priv->rx_skb_size, 202adfc5217SJeff Kirsher DMA_FROM_DEVICE); 203adfc5217SJeff Kirsher desc->address = p; 204adfc5217SJeff Kirsher } 205adfc5217SJeff Kirsher 206adfc5217SJeff Kirsher len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; 207adfc5217SJeff Kirsher len_stat |= DMADESC_OWNER_MASK; 208adfc5217SJeff Kirsher if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { 209adfc5217SJeff Kirsher len_stat |= DMADESC_WRAP_MASK; 210adfc5217SJeff Kirsher priv->rx_dirty_desc = 0; 211adfc5217SJeff Kirsher } else { 212adfc5217SJeff Kirsher priv->rx_dirty_desc++; 213adfc5217SJeff Kirsher } 214adfc5217SJeff Kirsher wmb(); 215adfc5217SJeff Kirsher desc->len_stat = len_stat; 216adfc5217SJeff Kirsher 217adfc5217SJeff Kirsher priv->rx_desc_count++; 218adfc5217SJeff Kirsher 219adfc5217SJeff Kirsher /* tell dma engine we allocated one buffer */ 220adfc5217SJeff Kirsher enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); 221adfc5217SJeff Kirsher } 222adfc5217SJeff Kirsher 223adfc5217SJeff Kirsher /* If rx ring is still empty, set a timer to try allocating 224adfc5217SJeff Kirsher * again at a later time. */ 225adfc5217SJeff Kirsher if (priv->rx_desc_count == 0 && netif_running(dev)) { 226adfc5217SJeff Kirsher dev_warn(&priv->pdev->dev, "unable to refill rx ring\n"); 227adfc5217SJeff Kirsher priv->rx_timeout.expires = jiffies + HZ; 228adfc5217SJeff Kirsher add_timer(&priv->rx_timeout); 229adfc5217SJeff Kirsher } 230adfc5217SJeff Kirsher 231adfc5217SJeff Kirsher return 0; 232adfc5217SJeff Kirsher } 233adfc5217SJeff Kirsher 234adfc5217SJeff Kirsher /* 235adfc5217SJeff Kirsher * timer callback to defer refill rx queue in case we're OOM 236adfc5217SJeff Kirsher */ 237adfc5217SJeff Kirsher static void bcm_enet_refill_rx_timer(unsigned long data) 238adfc5217SJeff Kirsher { 239adfc5217SJeff Kirsher struct net_device *dev; 240adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 241adfc5217SJeff Kirsher 242adfc5217SJeff Kirsher dev = (struct net_device *)data; 243adfc5217SJeff Kirsher priv = netdev_priv(dev); 244adfc5217SJeff Kirsher 245adfc5217SJeff Kirsher spin_lock(&priv->rx_lock); 246adfc5217SJeff Kirsher bcm_enet_refill_rx((struct net_device *)data); 247adfc5217SJeff Kirsher spin_unlock(&priv->rx_lock); 248adfc5217SJeff Kirsher } 249adfc5217SJeff Kirsher 250adfc5217SJeff Kirsher /* 251adfc5217SJeff Kirsher * extract packet from rx queue 252adfc5217SJeff Kirsher */ 253adfc5217SJeff Kirsher static int bcm_enet_receive_queue(struct net_device *dev, int budget) 254adfc5217SJeff Kirsher { 255adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 256adfc5217SJeff Kirsher struct device *kdev; 257adfc5217SJeff Kirsher int processed; 258adfc5217SJeff Kirsher 259adfc5217SJeff Kirsher priv = netdev_priv(dev); 260adfc5217SJeff Kirsher kdev = &priv->pdev->dev; 261adfc5217SJeff Kirsher processed = 0; 262adfc5217SJeff Kirsher 263adfc5217SJeff Kirsher /* don't scan ring further than number of refilled 264adfc5217SJeff Kirsher * descriptor */ 265adfc5217SJeff Kirsher if (budget > priv->rx_desc_count) 266adfc5217SJeff Kirsher budget = priv->rx_desc_count; 267adfc5217SJeff Kirsher 268adfc5217SJeff Kirsher do { 269adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 270adfc5217SJeff Kirsher struct sk_buff *skb; 271adfc5217SJeff Kirsher int desc_idx; 272adfc5217SJeff Kirsher u32 len_stat; 273adfc5217SJeff Kirsher unsigned int len; 274adfc5217SJeff Kirsher 275adfc5217SJeff Kirsher desc_idx = priv->rx_curr_desc; 276adfc5217SJeff Kirsher desc = &priv->rx_desc_cpu[desc_idx]; 277adfc5217SJeff Kirsher 278adfc5217SJeff Kirsher /* make sure we actually read the descriptor status at 279adfc5217SJeff Kirsher * each loop */ 280adfc5217SJeff Kirsher rmb(); 281adfc5217SJeff Kirsher 282adfc5217SJeff Kirsher len_stat = desc->len_stat; 283adfc5217SJeff Kirsher 284adfc5217SJeff Kirsher /* break if dma ownership belongs to hw */ 285adfc5217SJeff Kirsher if (len_stat & DMADESC_OWNER_MASK) 286adfc5217SJeff Kirsher break; 287adfc5217SJeff Kirsher 288adfc5217SJeff Kirsher processed++; 289adfc5217SJeff Kirsher priv->rx_curr_desc++; 290adfc5217SJeff Kirsher if (priv->rx_curr_desc == priv->rx_ring_size) 291adfc5217SJeff Kirsher priv->rx_curr_desc = 0; 292adfc5217SJeff Kirsher priv->rx_desc_count--; 293adfc5217SJeff Kirsher 294adfc5217SJeff Kirsher /* if the packet does not have start of packet _and_ 295adfc5217SJeff Kirsher * end of packet flag set, then just recycle it */ 296adfc5217SJeff Kirsher if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { 297adfc5217SJeff Kirsher dev->stats.rx_dropped++; 298adfc5217SJeff Kirsher continue; 299adfc5217SJeff Kirsher } 300adfc5217SJeff Kirsher 301adfc5217SJeff Kirsher /* recycle packet if it's marked as bad */ 302adfc5217SJeff Kirsher if (unlikely(len_stat & DMADESC_ERR_MASK)) { 303adfc5217SJeff Kirsher dev->stats.rx_errors++; 304adfc5217SJeff Kirsher 305adfc5217SJeff Kirsher if (len_stat & DMADESC_OVSIZE_MASK) 306adfc5217SJeff Kirsher dev->stats.rx_length_errors++; 307adfc5217SJeff Kirsher if (len_stat & DMADESC_CRC_MASK) 308adfc5217SJeff Kirsher dev->stats.rx_crc_errors++; 309adfc5217SJeff Kirsher if (len_stat & DMADESC_UNDER_MASK) 310adfc5217SJeff Kirsher dev->stats.rx_frame_errors++; 311adfc5217SJeff Kirsher if (len_stat & DMADESC_OV_MASK) 312adfc5217SJeff Kirsher dev->stats.rx_fifo_errors++; 313adfc5217SJeff Kirsher continue; 314adfc5217SJeff Kirsher } 315adfc5217SJeff Kirsher 316adfc5217SJeff Kirsher /* valid packet */ 317adfc5217SJeff Kirsher skb = priv->rx_skb[desc_idx]; 318adfc5217SJeff Kirsher len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT; 319adfc5217SJeff Kirsher /* don't include FCS */ 320adfc5217SJeff Kirsher len -= 4; 321adfc5217SJeff Kirsher 322adfc5217SJeff Kirsher if (len < copybreak) { 323adfc5217SJeff Kirsher struct sk_buff *nskb; 324adfc5217SJeff Kirsher 325adfc5217SJeff Kirsher nskb = netdev_alloc_skb_ip_align(dev, len); 326adfc5217SJeff Kirsher if (!nskb) { 327adfc5217SJeff Kirsher /* forget packet, just rearm desc */ 328adfc5217SJeff Kirsher dev->stats.rx_dropped++; 329adfc5217SJeff Kirsher continue; 330adfc5217SJeff Kirsher } 331adfc5217SJeff Kirsher 332adfc5217SJeff Kirsher dma_sync_single_for_cpu(kdev, desc->address, 333adfc5217SJeff Kirsher len, DMA_FROM_DEVICE); 334adfc5217SJeff Kirsher memcpy(nskb->data, skb->data, len); 335adfc5217SJeff Kirsher dma_sync_single_for_device(kdev, desc->address, 336adfc5217SJeff Kirsher len, DMA_FROM_DEVICE); 337adfc5217SJeff Kirsher skb = nskb; 338adfc5217SJeff Kirsher } else { 339adfc5217SJeff Kirsher dma_unmap_single(&priv->pdev->dev, desc->address, 340adfc5217SJeff Kirsher priv->rx_skb_size, DMA_FROM_DEVICE); 341adfc5217SJeff Kirsher priv->rx_skb[desc_idx] = NULL; 342adfc5217SJeff Kirsher } 343adfc5217SJeff Kirsher 344adfc5217SJeff Kirsher skb_put(skb, len); 345adfc5217SJeff Kirsher skb->protocol = eth_type_trans(skb, dev); 346adfc5217SJeff Kirsher dev->stats.rx_packets++; 347adfc5217SJeff Kirsher dev->stats.rx_bytes += len; 348adfc5217SJeff Kirsher netif_receive_skb(skb); 349adfc5217SJeff Kirsher 350adfc5217SJeff Kirsher } while (--budget > 0); 351adfc5217SJeff Kirsher 352adfc5217SJeff Kirsher if (processed || !priv->rx_desc_count) { 353adfc5217SJeff Kirsher bcm_enet_refill_rx(dev); 354adfc5217SJeff Kirsher 355adfc5217SJeff Kirsher /* kick rx dma */ 356adfc5217SJeff Kirsher enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, 357adfc5217SJeff Kirsher ENETDMA_CHANCFG_REG(priv->rx_chan)); 358adfc5217SJeff Kirsher } 359adfc5217SJeff Kirsher 360adfc5217SJeff Kirsher return processed; 361adfc5217SJeff Kirsher } 362adfc5217SJeff Kirsher 363adfc5217SJeff Kirsher 364adfc5217SJeff Kirsher /* 365adfc5217SJeff Kirsher * try to or force reclaim of transmitted buffers 366adfc5217SJeff Kirsher */ 367adfc5217SJeff Kirsher static int bcm_enet_tx_reclaim(struct net_device *dev, int force) 368adfc5217SJeff Kirsher { 369adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 370adfc5217SJeff Kirsher int released; 371adfc5217SJeff Kirsher 372adfc5217SJeff Kirsher priv = netdev_priv(dev); 373adfc5217SJeff Kirsher released = 0; 374adfc5217SJeff Kirsher 375adfc5217SJeff Kirsher while (priv->tx_desc_count < priv->tx_ring_size) { 376adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 377adfc5217SJeff Kirsher struct sk_buff *skb; 378adfc5217SJeff Kirsher 379adfc5217SJeff Kirsher /* We run in a bh and fight against start_xmit, which 380adfc5217SJeff Kirsher * is called with bh disabled */ 381adfc5217SJeff Kirsher spin_lock(&priv->tx_lock); 382adfc5217SJeff Kirsher 383adfc5217SJeff Kirsher desc = &priv->tx_desc_cpu[priv->tx_dirty_desc]; 384adfc5217SJeff Kirsher 385adfc5217SJeff Kirsher if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) { 386adfc5217SJeff Kirsher spin_unlock(&priv->tx_lock); 387adfc5217SJeff Kirsher break; 388adfc5217SJeff Kirsher } 389adfc5217SJeff Kirsher 390adfc5217SJeff Kirsher /* ensure other field of the descriptor were not read 391adfc5217SJeff Kirsher * before we checked ownership */ 392adfc5217SJeff Kirsher rmb(); 393adfc5217SJeff Kirsher 394adfc5217SJeff Kirsher skb = priv->tx_skb[priv->tx_dirty_desc]; 395adfc5217SJeff Kirsher priv->tx_skb[priv->tx_dirty_desc] = NULL; 396adfc5217SJeff Kirsher dma_unmap_single(&priv->pdev->dev, desc->address, skb->len, 397adfc5217SJeff Kirsher DMA_TO_DEVICE); 398adfc5217SJeff Kirsher 399adfc5217SJeff Kirsher priv->tx_dirty_desc++; 400adfc5217SJeff Kirsher if (priv->tx_dirty_desc == priv->tx_ring_size) 401adfc5217SJeff Kirsher priv->tx_dirty_desc = 0; 402adfc5217SJeff Kirsher priv->tx_desc_count++; 403adfc5217SJeff Kirsher 404adfc5217SJeff Kirsher spin_unlock(&priv->tx_lock); 405adfc5217SJeff Kirsher 406adfc5217SJeff Kirsher if (desc->len_stat & DMADESC_UNDER_MASK) 407adfc5217SJeff Kirsher dev->stats.tx_errors++; 408adfc5217SJeff Kirsher 409adfc5217SJeff Kirsher dev_kfree_skb(skb); 410adfc5217SJeff Kirsher released++; 411adfc5217SJeff Kirsher } 412adfc5217SJeff Kirsher 413adfc5217SJeff Kirsher if (netif_queue_stopped(dev) && released) 414adfc5217SJeff Kirsher netif_wake_queue(dev); 415adfc5217SJeff Kirsher 416adfc5217SJeff Kirsher return released; 417adfc5217SJeff Kirsher } 418adfc5217SJeff Kirsher 419adfc5217SJeff Kirsher /* 420adfc5217SJeff Kirsher * poll func, called by network core 421adfc5217SJeff Kirsher */ 422adfc5217SJeff Kirsher static int bcm_enet_poll(struct napi_struct *napi, int budget) 423adfc5217SJeff Kirsher { 424adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 425adfc5217SJeff Kirsher struct net_device *dev; 426adfc5217SJeff Kirsher int tx_work_done, rx_work_done; 427adfc5217SJeff Kirsher 428adfc5217SJeff Kirsher priv = container_of(napi, struct bcm_enet_priv, napi); 429adfc5217SJeff Kirsher dev = priv->net_dev; 430adfc5217SJeff Kirsher 431adfc5217SJeff Kirsher /* ack interrupts */ 432adfc5217SJeff Kirsher enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, 433adfc5217SJeff Kirsher ENETDMA_IR_REG(priv->rx_chan)); 434adfc5217SJeff Kirsher enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, 435adfc5217SJeff Kirsher ENETDMA_IR_REG(priv->tx_chan)); 436adfc5217SJeff Kirsher 437adfc5217SJeff Kirsher /* reclaim sent skb */ 438adfc5217SJeff Kirsher tx_work_done = bcm_enet_tx_reclaim(dev, 0); 439adfc5217SJeff Kirsher 440adfc5217SJeff Kirsher spin_lock(&priv->rx_lock); 441adfc5217SJeff Kirsher rx_work_done = bcm_enet_receive_queue(dev, budget); 442adfc5217SJeff Kirsher spin_unlock(&priv->rx_lock); 443adfc5217SJeff Kirsher 444adfc5217SJeff Kirsher if (rx_work_done >= budget || tx_work_done > 0) { 445adfc5217SJeff Kirsher /* rx/tx queue is not yet empty/clean */ 446adfc5217SJeff Kirsher return rx_work_done; 447adfc5217SJeff Kirsher } 448adfc5217SJeff Kirsher 449adfc5217SJeff Kirsher /* no more packet in rx/tx queue, remove device from poll 450adfc5217SJeff Kirsher * queue */ 451adfc5217SJeff Kirsher napi_complete(napi); 452adfc5217SJeff Kirsher 453adfc5217SJeff Kirsher /* restore rx/tx interrupt */ 454adfc5217SJeff Kirsher enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, 455adfc5217SJeff Kirsher ENETDMA_IRMASK_REG(priv->rx_chan)); 456adfc5217SJeff Kirsher enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, 457adfc5217SJeff Kirsher ENETDMA_IRMASK_REG(priv->tx_chan)); 458adfc5217SJeff Kirsher 459adfc5217SJeff Kirsher return rx_work_done; 460adfc5217SJeff Kirsher } 461adfc5217SJeff Kirsher 462adfc5217SJeff Kirsher /* 463adfc5217SJeff Kirsher * mac interrupt handler 464adfc5217SJeff Kirsher */ 465adfc5217SJeff Kirsher static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id) 466adfc5217SJeff Kirsher { 467adfc5217SJeff Kirsher struct net_device *dev; 468adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 469adfc5217SJeff Kirsher u32 stat; 470adfc5217SJeff Kirsher 471adfc5217SJeff Kirsher dev = dev_id; 472adfc5217SJeff Kirsher priv = netdev_priv(dev); 473adfc5217SJeff Kirsher 474adfc5217SJeff Kirsher stat = enet_readl(priv, ENET_IR_REG); 475adfc5217SJeff Kirsher if (!(stat & ENET_IR_MIB)) 476adfc5217SJeff Kirsher return IRQ_NONE; 477adfc5217SJeff Kirsher 478adfc5217SJeff Kirsher /* clear & mask interrupt */ 479adfc5217SJeff Kirsher enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); 480adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_IRMASK_REG); 481adfc5217SJeff Kirsher 482adfc5217SJeff Kirsher /* read mib registers in workqueue */ 483adfc5217SJeff Kirsher schedule_work(&priv->mib_update_task); 484adfc5217SJeff Kirsher 485adfc5217SJeff Kirsher return IRQ_HANDLED; 486adfc5217SJeff Kirsher } 487adfc5217SJeff Kirsher 488adfc5217SJeff Kirsher /* 489adfc5217SJeff Kirsher * rx/tx dma interrupt handler 490adfc5217SJeff Kirsher */ 491adfc5217SJeff Kirsher static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) 492adfc5217SJeff Kirsher { 493adfc5217SJeff Kirsher struct net_device *dev; 494adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 495adfc5217SJeff Kirsher 496adfc5217SJeff Kirsher dev = dev_id; 497adfc5217SJeff Kirsher priv = netdev_priv(dev); 498adfc5217SJeff Kirsher 499adfc5217SJeff Kirsher /* mask rx/tx interrupts */ 500adfc5217SJeff Kirsher enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); 501adfc5217SJeff Kirsher enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); 502adfc5217SJeff Kirsher 503adfc5217SJeff Kirsher napi_schedule(&priv->napi); 504adfc5217SJeff Kirsher 505adfc5217SJeff Kirsher return IRQ_HANDLED; 506adfc5217SJeff Kirsher } 507adfc5217SJeff Kirsher 508adfc5217SJeff Kirsher /* 509adfc5217SJeff Kirsher * tx request callback 510adfc5217SJeff Kirsher */ 511adfc5217SJeff Kirsher static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 512adfc5217SJeff Kirsher { 513adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 514adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 515adfc5217SJeff Kirsher u32 len_stat; 516adfc5217SJeff Kirsher int ret; 517adfc5217SJeff Kirsher 518adfc5217SJeff Kirsher priv = netdev_priv(dev); 519adfc5217SJeff Kirsher 520adfc5217SJeff Kirsher /* lock against tx reclaim */ 521adfc5217SJeff Kirsher spin_lock(&priv->tx_lock); 522adfc5217SJeff Kirsher 523adfc5217SJeff Kirsher /* make sure the tx hw queue is not full, should not happen 524adfc5217SJeff Kirsher * since we stop queue before it's the case */ 525adfc5217SJeff Kirsher if (unlikely(!priv->tx_desc_count)) { 526adfc5217SJeff Kirsher netif_stop_queue(dev); 527adfc5217SJeff Kirsher dev_err(&priv->pdev->dev, "xmit called with no tx desc " 528adfc5217SJeff Kirsher "available?\n"); 529adfc5217SJeff Kirsher ret = NETDEV_TX_BUSY; 530adfc5217SJeff Kirsher goto out_unlock; 531adfc5217SJeff Kirsher } 532adfc5217SJeff Kirsher 533adfc5217SJeff Kirsher /* point to the next available desc */ 534adfc5217SJeff Kirsher desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; 535adfc5217SJeff Kirsher priv->tx_skb[priv->tx_curr_desc] = skb; 536adfc5217SJeff Kirsher 537adfc5217SJeff Kirsher /* fill descriptor */ 538adfc5217SJeff Kirsher desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len, 539adfc5217SJeff Kirsher DMA_TO_DEVICE); 540adfc5217SJeff Kirsher 541adfc5217SJeff Kirsher len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; 542adfc5217SJeff Kirsher len_stat |= DMADESC_ESOP_MASK | 543adfc5217SJeff Kirsher DMADESC_APPEND_CRC | 544adfc5217SJeff Kirsher DMADESC_OWNER_MASK; 545adfc5217SJeff Kirsher 546adfc5217SJeff Kirsher priv->tx_curr_desc++; 547adfc5217SJeff Kirsher if (priv->tx_curr_desc == priv->tx_ring_size) { 548adfc5217SJeff Kirsher priv->tx_curr_desc = 0; 549adfc5217SJeff Kirsher len_stat |= DMADESC_WRAP_MASK; 550adfc5217SJeff Kirsher } 551adfc5217SJeff Kirsher priv->tx_desc_count--; 552adfc5217SJeff Kirsher 553adfc5217SJeff Kirsher /* dma might be already polling, make sure we update desc 554adfc5217SJeff Kirsher * fields in correct order */ 555adfc5217SJeff Kirsher wmb(); 556adfc5217SJeff Kirsher desc->len_stat = len_stat; 557adfc5217SJeff Kirsher wmb(); 558adfc5217SJeff Kirsher 559adfc5217SJeff Kirsher /* kick tx dma */ 560adfc5217SJeff Kirsher enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, 561adfc5217SJeff Kirsher ENETDMA_CHANCFG_REG(priv->tx_chan)); 562adfc5217SJeff Kirsher 563adfc5217SJeff Kirsher /* stop queue if no more desc available */ 564adfc5217SJeff Kirsher if (!priv->tx_desc_count) 565adfc5217SJeff Kirsher netif_stop_queue(dev); 566adfc5217SJeff Kirsher 567adfc5217SJeff Kirsher dev->stats.tx_bytes += skb->len; 568adfc5217SJeff Kirsher dev->stats.tx_packets++; 569adfc5217SJeff Kirsher ret = NETDEV_TX_OK; 570adfc5217SJeff Kirsher 571adfc5217SJeff Kirsher out_unlock: 572adfc5217SJeff Kirsher spin_unlock(&priv->tx_lock); 573adfc5217SJeff Kirsher return ret; 574adfc5217SJeff Kirsher } 575adfc5217SJeff Kirsher 576adfc5217SJeff Kirsher /* 577adfc5217SJeff Kirsher * Change the interface's mac address. 578adfc5217SJeff Kirsher */ 579adfc5217SJeff Kirsher static int bcm_enet_set_mac_address(struct net_device *dev, void *p) 580adfc5217SJeff Kirsher { 581adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 582adfc5217SJeff Kirsher struct sockaddr *addr = p; 583adfc5217SJeff Kirsher u32 val; 584adfc5217SJeff Kirsher 585adfc5217SJeff Kirsher priv = netdev_priv(dev); 586adfc5217SJeff Kirsher memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 587adfc5217SJeff Kirsher 588adfc5217SJeff Kirsher /* use perfect match register 0 to store my mac address */ 589adfc5217SJeff Kirsher val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | 590adfc5217SJeff Kirsher (dev->dev_addr[4] << 8) | dev->dev_addr[5]; 591adfc5217SJeff Kirsher enet_writel(priv, val, ENET_PML_REG(0)); 592adfc5217SJeff Kirsher 593adfc5217SJeff Kirsher val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]); 594adfc5217SJeff Kirsher val |= ENET_PMH_DATAVALID_MASK; 595adfc5217SJeff Kirsher enet_writel(priv, val, ENET_PMH_REG(0)); 596adfc5217SJeff Kirsher 597adfc5217SJeff Kirsher return 0; 598adfc5217SJeff Kirsher } 599adfc5217SJeff Kirsher 600adfc5217SJeff Kirsher /* 601adfc5217SJeff Kirsher * Change rx mode (promiscuous/allmulti) and update multicast list 602adfc5217SJeff Kirsher */ 603adfc5217SJeff Kirsher static void bcm_enet_set_multicast_list(struct net_device *dev) 604adfc5217SJeff Kirsher { 605adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 606adfc5217SJeff Kirsher struct netdev_hw_addr *ha; 607adfc5217SJeff Kirsher u32 val; 608adfc5217SJeff Kirsher int i; 609adfc5217SJeff Kirsher 610adfc5217SJeff Kirsher priv = netdev_priv(dev); 611adfc5217SJeff Kirsher 612adfc5217SJeff Kirsher val = enet_readl(priv, ENET_RXCFG_REG); 613adfc5217SJeff Kirsher 614adfc5217SJeff Kirsher if (dev->flags & IFF_PROMISC) 615adfc5217SJeff Kirsher val |= ENET_RXCFG_PROMISC_MASK; 616adfc5217SJeff Kirsher else 617adfc5217SJeff Kirsher val &= ~ENET_RXCFG_PROMISC_MASK; 618adfc5217SJeff Kirsher 619adfc5217SJeff Kirsher /* only 3 perfect match registers left, first one is used for 620adfc5217SJeff Kirsher * own mac address */ 621adfc5217SJeff Kirsher if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3) 622adfc5217SJeff Kirsher val |= ENET_RXCFG_ALLMCAST_MASK; 623adfc5217SJeff Kirsher else 624adfc5217SJeff Kirsher val &= ~ENET_RXCFG_ALLMCAST_MASK; 625adfc5217SJeff Kirsher 626adfc5217SJeff Kirsher /* no need to set perfect match registers if we catch all 627adfc5217SJeff Kirsher * multicast */ 628adfc5217SJeff Kirsher if (val & ENET_RXCFG_ALLMCAST_MASK) { 629adfc5217SJeff Kirsher enet_writel(priv, val, ENET_RXCFG_REG); 630adfc5217SJeff Kirsher return; 631adfc5217SJeff Kirsher } 632adfc5217SJeff Kirsher 633adfc5217SJeff Kirsher i = 0; 634adfc5217SJeff Kirsher netdev_for_each_mc_addr(ha, dev) { 635adfc5217SJeff Kirsher u8 *dmi_addr; 636adfc5217SJeff Kirsher u32 tmp; 637adfc5217SJeff Kirsher 638adfc5217SJeff Kirsher if (i == 3) 639adfc5217SJeff Kirsher break; 640adfc5217SJeff Kirsher /* update perfect match registers */ 641adfc5217SJeff Kirsher dmi_addr = ha->addr; 642adfc5217SJeff Kirsher tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | 643adfc5217SJeff Kirsher (dmi_addr[4] << 8) | dmi_addr[5]; 644adfc5217SJeff Kirsher enet_writel(priv, tmp, ENET_PML_REG(i + 1)); 645adfc5217SJeff Kirsher 646adfc5217SJeff Kirsher tmp = (dmi_addr[0] << 8 | dmi_addr[1]); 647adfc5217SJeff Kirsher tmp |= ENET_PMH_DATAVALID_MASK; 648adfc5217SJeff Kirsher enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1)); 649adfc5217SJeff Kirsher } 650adfc5217SJeff Kirsher 651adfc5217SJeff Kirsher for (; i < 3; i++) { 652adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_PML_REG(i + 1)); 653adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_PMH_REG(i + 1)); 654adfc5217SJeff Kirsher } 655adfc5217SJeff Kirsher 656adfc5217SJeff Kirsher enet_writel(priv, val, ENET_RXCFG_REG); 657adfc5217SJeff Kirsher } 658adfc5217SJeff Kirsher 659adfc5217SJeff Kirsher /* 660adfc5217SJeff Kirsher * set mac duplex parameters 661adfc5217SJeff Kirsher */ 662adfc5217SJeff Kirsher static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex) 663adfc5217SJeff Kirsher { 664adfc5217SJeff Kirsher u32 val; 665adfc5217SJeff Kirsher 666adfc5217SJeff Kirsher val = enet_readl(priv, ENET_TXCTL_REG); 667adfc5217SJeff Kirsher if (fullduplex) 668adfc5217SJeff Kirsher val |= ENET_TXCTL_FD_MASK; 669adfc5217SJeff Kirsher else 670adfc5217SJeff Kirsher val &= ~ENET_TXCTL_FD_MASK; 671adfc5217SJeff Kirsher enet_writel(priv, val, ENET_TXCTL_REG); 672adfc5217SJeff Kirsher } 673adfc5217SJeff Kirsher 674adfc5217SJeff Kirsher /* 675adfc5217SJeff Kirsher * set mac flow control parameters 676adfc5217SJeff Kirsher */ 677adfc5217SJeff Kirsher static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en) 678adfc5217SJeff Kirsher { 679adfc5217SJeff Kirsher u32 val; 680adfc5217SJeff Kirsher 681adfc5217SJeff Kirsher /* rx flow control (pause frame handling) */ 682adfc5217SJeff Kirsher val = enet_readl(priv, ENET_RXCFG_REG); 683adfc5217SJeff Kirsher if (rx_en) 684adfc5217SJeff Kirsher val |= ENET_RXCFG_ENFLOW_MASK; 685adfc5217SJeff Kirsher else 686adfc5217SJeff Kirsher val &= ~ENET_RXCFG_ENFLOW_MASK; 687adfc5217SJeff Kirsher enet_writel(priv, val, ENET_RXCFG_REG); 688adfc5217SJeff Kirsher 689adfc5217SJeff Kirsher /* tx flow control (pause frame generation) */ 690adfc5217SJeff Kirsher val = enet_dma_readl(priv, ENETDMA_CFG_REG); 691adfc5217SJeff Kirsher if (tx_en) 692adfc5217SJeff Kirsher val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); 693adfc5217SJeff Kirsher else 694adfc5217SJeff Kirsher val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); 695adfc5217SJeff Kirsher enet_dma_writel(priv, val, ENETDMA_CFG_REG); 696adfc5217SJeff Kirsher } 697adfc5217SJeff Kirsher 698adfc5217SJeff Kirsher /* 699adfc5217SJeff Kirsher * link changed callback (from phylib) 700adfc5217SJeff Kirsher */ 701adfc5217SJeff Kirsher static void bcm_enet_adjust_phy_link(struct net_device *dev) 702adfc5217SJeff Kirsher { 703adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 704adfc5217SJeff Kirsher struct phy_device *phydev; 705adfc5217SJeff Kirsher int status_changed; 706adfc5217SJeff Kirsher 707adfc5217SJeff Kirsher priv = netdev_priv(dev); 708adfc5217SJeff Kirsher phydev = priv->phydev; 709adfc5217SJeff Kirsher status_changed = 0; 710adfc5217SJeff Kirsher 711adfc5217SJeff Kirsher if (priv->old_link != phydev->link) { 712adfc5217SJeff Kirsher status_changed = 1; 713adfc5217SJeff Kirsher priv->old_link = phydev->link; 714adfc5217SJeff Kirsher } 715adfc5217SJeff Kirsher 716adfc5217SJeff Kirsher /* reflect duplex change in mac configuration */ 717adfc5217SJeff Kirsher if (phydev->link && phydev->duplex != priv->old_duplex) { 718adfc5217SJeff Kirsher bcm_enet_set_duplex(priv, 719adfc5217SJeff Kirsher (phydev->duplex == DUPLEX_FULL) ? 1 : 0); 720adfc5217SJeff Kirsher status_changed = 1; 721adfc5217SJeff Kirsher priv->old_duplex = phydev->duplex; 722adfc5217SJeff Kirsher } 723adfc5217SJeff Kirsher 724adfc5217SJeff Kirsher /* enable flow control if remote advertise it (trust phylib to 725adfc5217SJeff Kirsher * check that duplex is full */ 726adfc5217SJeff Kirsher if (phydev->link && phydev->pause != priv->old_pause) { 727adfc5217SJeff Kirsher int rx_pause_en, tx_pause_en; 728adfc5217SJeff Kirsher 729adfc5217SJeff Kirsher if (phydev->pause) { 730adfc5217SJeff Kirsher /* pause was advertised by lpa and us */ 731adfc5217SJeff Kirsher rx_pause_en = 1; 732adfc5217SJeff Kirsher tx_pause_en = 1; 733adfc5217SJeff Kirsher } else if (!priv->pause_auto) { 734adfc5217SJeff Kirsher /* pause setting overrided by user */ 735adfc5217SJeff Kirsher rx_pause_en = priv->pause_rx; 736adfc5217SJeff Kirsher tx_pause_en = priv->pause_tx; 737adfc5217SJeff Kirsher } else { 738adfc5217SJeff Kirsher rx_pause_en = 0; 739adfc5217SJeff Kirsher tx_pause_en = 0; 740adfc5217SJeff Kirsher } 741adfc5217SJeff Kirsher 742adfc5217SJeff Kirsher bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en); 743adfc5217SJeff Kirsher status_changed = 1; 744adfc5217SJeff Kirsher priv->old_pause = phydev->pause; 745adfc5217SJeff Kirsher } 746adfc5217SJeff Kirsher 747adfc5217SJeff Kirsher if (status_changed) { 748adfc5217SJeff Kirsher pr_info("%s: link %s", dev->name, phydev->link ? 749adfc5217SJeff Kirsher "UP" : "DOWN"); 750adfc5217SJeff Kirsher if (phydev->link) 751adfc5217SJeff Kirsher pr_cont(" - %d/%s - flow control %s", phydev->speed, 752adfc5217SJeff Kirsher DUPLEX_FULL == phydev->duplex ? "full" : "half", 753adfc5217SJeff Kirsher phydev->pause == 1 ? "rx&tx" : "off"); 754adfc5217SJeff Kirsher 755adfc5217SJeff Kirsher pr_cont("\n"); 756adfc5217SJeff Kirsher } 757adfc5217SJeff Kirsher } 758adfc5217SJeff Kirsher 759adfc5217SJeff Kirsher /* 760adfc5217SJeff Kirsher * link changed callback (if phylib is not used) 761adfc5217SJeff Kirsher */ 762adfc5217SJeff Kirsher static void bcm_enet_adjust_link(struct net_device *dev) 763adfc5217SJeff Kirsher { 764adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 765adfc5217SJeff Kirsher 766adfc5217SJeff Kirsher priv = netdev_priv(dev); 767adfc5217SJeff Kirsher bcm_enet_set_duplex(priv, priv->force_duplex_full); 768adfc5217SJeff Kirsher bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx); 769adfc5217SJeff Kirsher netif_carrier_on(dev); 770adfc5217SJeff Kirsher 771adfc5217SJeff Kirsher pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n", 772adfc5217SJeff Kirsher dev->name, 773adfc5217SJeff Kirsher priv->force_speed_100 ? 100 : 10, 774adfc5217SJeff Kirsher priv->force_duplex_full ? "full" : "half", 775adfc5217SJeff Kirsher priv->pause_rx ? "rx" : "off", 776adfc5217SJeff Kirsher priv->pause_tx ? "tx" : "off"); 777adfc5217SJeff Kirsher } 778adfc5217SJeff Kirsher 779adfc5217SJeff Kirsher /* 780adfc5217SJeff Kirsher * open callback, allocate dma rings & buffers and start rx operation 781adfc5217SJeff Kirsher */ 782adfc5217SJeff Kirsher static int bcm_enet_open(struct net_device *dev) 783adfc5217SJeff Kirsher { 784adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 785adfc5217SJeff Kirsher struct sockaddr addr; 786adfc5217SJeff Kirsher struct device *kdev; 787adfc5217SJeff Kirsher struct phy_device *phydev; 788adfc5217SJeff Kirsher int i, ret; 789adfc5217SJeff Kirsher unsigned int size; 790adfc5217SJeff Kirsher char phy_id[MII_BUS_ID_SIZE + 3]; 791adfc5217SJeff Kirsher void *p; 792adfc5217SJeff Kirsher u32 val; 793adfc5217SJeff Kirsher 794adfc5217SJeff Kirsher priv = netdev_priv(dev); 795adfc5217SJeff Kirsher kdev = &priv->pdev->dev; 796adfc5217SJeff Kirsher 797adfc5217SJeff Kirsher if (priv->has_phy) { 798adfc5217SJeff Kirsher /* connect to PHY */ 799adfc5217SJeff Kirsher snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 800c56e9e2aSFlorian Fainelli priv->mii_bus->id, priv->phy_id); 801adfc5217SJeff Kirsher 802adfc5217SJeff Kirsher phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0, 803adfc5217SJeff Kirsher PHY_INTERFACE_MODE_MII); 804adfc5217SJeff Kirsher 805adfc5217SJeff Kirsher if (IS_ERR(phydev)) { 806adfc5217SJeff Kirsher dev_err(kdev, "could not attach to PHY\n"); 807adfc5217SJeff Kirsher return PTR_ERR(phydev); 808adfc5217SJeff Kirsher } 809adfc5217SJeff Kirsher 810adfc5217SJeff Kirsher /* mask with MAC supported features */ 811adfc5217SJeff Kirsher phydev->supported &= (SUPPORTED_10baseT_Half | 812adfc5217SJeff Kirsher SUPPORTED_10baseT_Full | 813adfc5217SJeff Kirsher SUPPORTED_100baseT_Half | 814adfc5217SJeff Kirsher SUPPORTED_100baseT_Full | 815adfc5217SJeff Kirsher SUPPORTED_Autoneg | 816adfc5217SJeff Kirsher SUPPORTED_Pause | 817adfc5217SJeff Kirsher SUPPORTED_MII); 818adfc5217SJeff Kirsher phydev->advertising = phydev->supported; 819adfc5217SJeff Kirsher 820adfc5217SJeff Kirsher if (priv->pause_auto && priv->pause_rx && priv->pause_tx) 821adfc5217SJeff Kirsher phydev->advertising |= SUPPORTED_Pause; 822adfc5217SJeff Kirsher else 823adfc5217SJeff Kirsher phydev->advertising &= ~SUPPORTED_Pause; 824adfc5217SJeff Kirsher 825adfc5217SJeff Kirsher dev_info(kdev, "attached PHY at address %d [%s]\n", 826adfc5217SJeff Kirsher phydev->addr, phydev->drv->name); 827adfc5217SJeff Kirsher 828adfc5217SJeff Kirsher priv->old_link = 0; 829adfc5217SJeff Kirsher priv->old_duplex = -1; 830adfc5217SJeff Kirsher priv->old_pause = -1; 831adfc5217SJeff Kirsher priv->phydev = phydev; 832adfc5217SJeff Kirsher } 833adfc5217SJeff Kirsher 834adfc5217SJeff Kirsher /* mask all interrupts and request them */ 835adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_IRMASK_REG); 836adfc5217SJeff Kirsher enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); 837adfc5217SJeff Kirsher enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); 838adfc5217SJeff Kirsher 839adfc5217SJeff Kirsher ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); 840adfc5217SJeff Kirsher if (ret) 841adfc5217SJeff Kirsher goto out_phy_disconnect; 842adfc5217SJeff Kirsher 843adfc5217SJeff Kirsher ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED, 844adfc5217SJeff Kirsher dev->name, dev); 845adfc5217SJeff Kirsher if (ret) 846adfc5217SJeff Kirsher goto out_freeirq; 847adfc5217SJeff Kirsher 848adfc5217SJeff Kirsher ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, 849adfc5217SJeff Kirsher IRQF_DISABLED, dev->name, dev); 850adfc5217SJeff Kirsher if (ret) 851adfc5217SJeff Kirsher goto out_freeirq_rx; 852adfc5217SJeff Kirsher 853adfc5217SJeff Kirsher /* initialize perfect match registers */ 854adfc5217SJeff Kirsher for (i = 0; i < 4; i++) { 855adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_PML_REG(i)); 856adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_PMH_REG(i)); 857adfc5217SJeff Kirsher } 858adfc5217SJeff Kirsher 859adfc5217SJeff Kirsher /* write device mac address */ 860adfc5217SJeff Kirsher memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN); 861adfc5217SJeff Kirsher bcm_enet_set_mac_address(dev, &addr); 862adfc5217SJeff Kirsher 863adfc5217SJeff Kirsher /* allocate rx dma ring */ 864adfc5217SJeff Kirsher size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 865adfc5217SJeff Kirsher p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 866adfc5217SJeff Kirsher if (!p) { 867adfc5217SJeff Kirsher dev_err(kdev, "cannot allocate rx ring %u\n", size); 868adfc5217SJeff Kirsher ret = -ENOMEM; 869adfc5217SJeff Kirsher goto out_freeirq_tx; 870adfc5217SJeff Kirsher } 871adfc5217SJeff Kirsher 872adfc5217SJeff Kirsher memset(p, 0, size); 873adfc5217SJeff Kirsher priv->rx_desc_alloc_size = size; 874adfc5217SJeff Kirsher priv->rx_desc_cpu = p; 875adfc5217SJeff Kirsher 876adfc5217SJeff Kirsher /* allocate tx dma ring */ 877adfc5217SJeff Kirsher size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 878adfc5217SJeff Kirsher p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 879adfc5217SJeff Kirsher if (!p) { 880adfc5217SJeff Kirsher dev_err(kdev, "cannot allocate tx ring\n"); 881adfc5217SJeff Kirsher ret = -ENOMEM; 882adfc5217SJeff Kirsher goto out_free_rx_ring; 883adfc5217SJeff Kirsher } 884adfc5217SJeff Kirsher 885adfc5217SJeff Kirsher memset(p, 0, size); 886adfc5217SJeff Kirsher priv->tx_desc_alloc_size = size; 887adfc5217SJeff Kirsher priv->tx_desc_cpu = p; 888adfc5217SJeff Kirsher 889adfc5217SJeff Kirsher priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size, 890adfc5217SJeff Kirsher GFP_KERNEL); 891adfc5217SJeff Kirsher if (!priv->tx_skb) { 892adfc5217SJeff Kirsher dev_err(kdev, "cannot allocate rx skb queue\n"); 893adfc5217SJeff Kirsher ret = -ENOMEM; 894adfc5217SJeff Kirsher goto out_free_tx_ring; 895adfc5217SJeff Kirsher } 896adfc5217SJeff Kirsher 897adfc5217SJeff Kirsher priv->tx_desc_count = priv->tx_ring_size; 898adfc5217SJeff Kirsher priv->tx_dirty_desc = 0; 899adfc5217SJeff Kirsher priv->tx_curr_desc = 0; 900adfc5217SJeff Kirsher spin_lock_init(&priv->tx_lock); 901adfc5217SJeff Kirsher 902adfc5217SJeff Kirsher /* init & fill rx ring with skbs */ 903adfc5217SJeff Kirsher priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size, 904adfc5217SJeff Kirsher GFP_KERNEL); 905adfc5217SJeff Kirsher if (!priv->rx_skb) { 906adfc5217SJeff Kirsher dev_err(kdev, "cannot allocate rx skb queue\n"); 907adfc5217SJeff Kirsher ret = -ENOMEM; 908adfc5217SJeff Kirsher goto out_free_tx_skb; 909adfc5217SJeff Kirsher } 910adfc5217SJeff Kirsher 911adfc5217SJeff Kirsher priv->rx_desc_count = 0; 912adfc5217SJeff Kirsher priv->rx_dirty_desc = 0; 913adfc5217SJeff Kirsher priv->rx_curr_desc = 0; 914adfc5217SJeff Kirsher 915adfc5217SJeff Kirsher /* initialize flow control buffer allocation */ 916adfc5217SJeff Kirsher enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, 917adfc5217SJeff Kirsher ENETDMA_BUFALLOC_REG(priv->rx_chan)); 918adfc5217SJeff Kirsher 919adfc5217SJeff Kirsher if (bcm_enet_refill_rx(dev)) { 920adfc5217SJeff Kirsher dev_err(kdev, "cannot allocate rx skb queue\n"); 921adfc5217SJeff Kirsher ret = -ENOMEM; 922adfc5217SJeff Kirsher goto out; 923adfc5217SJeff Kirsher } 924adfc5217SJeff Kirsher 925adfc5217SJeff Kirsher /* write rx & tx ring addresses */ 926adfc5217SJeff Kirsher enet_dma_writel(priv, priv->rx_desc_dma, 927adfc5217SJeff Kirsher ENETDMA_RSTART_REG(priv->rx_chan)); 928adfc5217SJeff Kirsher enet_dma_writel(priv, priv->tx_desc_dma, 929adfc5217SJeff Kirsher ENETDMA_RSTART_REG(priv->tx_chan)); 930adfc5217SJeff Kirsher 931adfc5217SJeff Kirsher /* clear remaining state ram for rx & tx channel */ 932adfc5217SJeff Kirsher enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan)); 933adfc5217SJeff Kirsher enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan)); 934adfc5217SJeff Kirsher enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan)); 935adfc5217SJeff Kirsher enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan)); 936adfc5217SJeff Kirsher enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan)); 937adfc5217SJeff Kirsher enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan)); 938adfc5217SJeff Kirsher 939adfc5217SJeff Kirsher /* set max rx/tx length */ 940adfc5217SJeff Kirsher enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); 941adfc5217SJeff Kirsher enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); 942adfc5217SJeff Kirsher 943adfc5217SJeff Kirsher /* set dma maximum burst len */ 944adfc5217SJeff Kirsher enet_dma_writel(priv, BCMENET_DMA_MAXBURST, 945adfc5217SJeff Kirsher ENETDMA_MAXBURST_REG(priv->rx_chan)); 946adfc5217SJeff Kirsher enet_dma_writel(priv, BCMENET_DMA_MAXBURST, 947adfc5217SJeff Kirsher ENETDMA_MAXBURST_REG(priv->tx_chan)); 948adfc5217SJeff Kirsher 949adfc5217SJeff Kirsher /* set correct transmit fifo watermark */ 950adfc5217SJeff Kirsher enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); 951adfc5217SJeff Kirsher 952adfc5217SJeff Kirsher /* set flow control low/high threshold to 1/3 / 2/3 */ 953adfc5217SJeff Kirsher val = priv->rx_ring_size / 3; 954adfc5217SJeff Kirsher enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); 955adfc5217SJeff Kirsher val = (priv->rx_ring_size * 2) / 3; 956adfc5217SJeff Kirsher enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); 957adfc5217SJeff Kirsher 958adfc5217SJeff Kirsher /* all set, enable mac and interrupts, start dma engine and 959adfc5217SJeff Kirsher * kick rx dma channel */ 960adfc5217SJeff Kirsher wmb(); 961adfc5217SJeff Kirsher val = enet_readl(priv, ENET_CTL_REG); 962adfc5217SJeff Kirsher val |= ENET_CTL_ENABLE_MASK; 963adfc5217SJeff Kirsher enet_writel(priv, val, ENET_CTL_REG); 964adfc5217SJeff Kirsher enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); 965adfc5217SJeff Kirsher enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, 966adfc5217SJeff Kirsher ENETDMA_CHANCFG_REG(priv->rx_chan)); 967adfc5217SJeff Kirsher 968adfc5217SJeff Kirsher /* watch "mib counters about to overflow" interrupt */ 969adfc5217SJeff Kirsher enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); 970adfc5217SJeff Kirsher enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); 971adfc5217SJeff Kirsher 972adfc5217SJeff Kirsher /* watch "packet transferred" interrupt in rx and tx */ 973adfc5217SJeff Kirsher enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, 974adfc5217SJeff Kirsher ENETDMA_IR_REG(priv->rx_chan)); 975adfc5217SJeff Kirsher enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, 976adfc5217SJeff Kirsher ENETDMA_IR_REG(priv->tx_chan)); 977adfc5217SJeff Kirsher 978adfc5217SJeff Kirsher /* make sure we enable napi before rx interrupt */ 979adfc5217SJeff Kirsher napi_enable(&priv->napi); 980adfc5217SJeff Kirsher 981adfc5217SJeff Kirsher enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, 982adfc5217SJeff Kirsher ENETDMA_IRMASK_REG(priv->rx_chan)); 983adfc5217SJeff Kirsher enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, 984adfc5217SJeff Kirsher ENETDMA_IRMASK_REG(priv->tx_chan)); 985adfc5217SJeff Kirsher 986adfc5217SJeff Kirsher if (priv->has_phy) 987adfc5217SJeff Kirsher phy_start(priv->phydev); 988adfc5217SJeff Kirsher else 989adfc5217SJeff Kirsher bcm_enet_adjust_link(dev); 990adfc5217SJeff Kirsher 991adfc5217SJeff Kirsher netif_start_queue(dev); 992adfc5217SJeff Kirsher return 0; 993adfc5217SJeff Kirsher 994adfc5217SJeff Kirsher out: 995adfc5217SJeff Kirsher for (i = 0; i < priv->rx_ring_size; i++) { 996adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 997adfc5217SJeff Kirsher 998adfc5217SJeff Kirsher if (!priv->rx_skb[i]) 999adfc5217SJeff Kirsher continue; 1000adfc5217SJeff Kirsher 1001adfc5217SJeff Kirsher desc = &priv->rx_desc_cpu[i]; 1002adfc5217SJeff Kirsher dma_unmap_single(kdev, desc->address, priv->rx_skb_size, 1003adfc5217SJeff Kirsher DMA_FROM_DEVICE); 1004adfc5217SJeff Kirsher kfree_skb(priv->rx_skb[i]); 1005adfc5217SJeff Kirsher } 1006adfc5217SJeff Kirsher kfree(priv->rx_skb); 1007adfc5217SJeff Kirsher 1008adfc5217SJeff Kirsher out_free_tx_skb: 1009adfc5217SJeff Kirsher kfree(priv->tx_skb); 1010adfc5217SJeff Kirsher 1011adfc5217SJeff Kirsher out_free_tx_ring: 1012adfc5217SJeff Kirsher dma_free_coherent(kdev, priv->tx_desc_alloc_size, 1013adfc5217SJeff Kirsher priv->tx_desc_cpu, priv->tx_desc_dma); 1014adfc5217SJeff Kirsher 1015adfc5217SJeff Kirsher out_free_rx_ring: 1016adfc5217SJeff Kirsher dma_free_coherent(kdev, priv->rx_desc_alloc_size, 1017adfc5217SJeff Kirsher priv->rx_desc_cpu, priv->rx_desc_dma); 1018adfc5217SJeff Kirsher 1019adfc5217SJeff Kirsher out_freeirq_tx: 1020adfc5217SJeff Kirsher free_irq(priv->irq_tx, dev); 1021adfc5217SJeff Kirsher 1022adfc5217SJeff Kirsher out_freeirq_rx: 1023adfc5217SJeff Kirsher free_irq(priv->irq_rx, dev); 1024adfc5217SJeff Kirsher 1025adfc5217SJeff Kirsher out_freeirq: 1026adfc5217SJeff Kirsher free_irq(dev->irq, dev); 1027adfc5217SJeff Kirsher 1028adfc5217SJeff Kirsher out_phy_disconnect: 1029adfc5217SJeff Kirsher phy_disconnect(priv->phydev); 1030adfc5217SJeff Kirsher 1031adfc5217SJeff Kirsher return ret; 1032adfc5217SJeff Kirsher } 1033adfc5217SJeff Kirsher 1034adfc5217SJeff Kirsher /* 1035adfc5217SJeff Kirsher * disable mac 1036adfc5217SJeff Kirsher */ 1037adfc5217SJeff Kirsher static void bcm_enet_disable_mac(struct bcm_enet_priv *priv) 1038adfc5217SJeff Kirsher { 1039adfc5217SJeff Kirsher int limit; 1040adfc5217SJeff Kirsher u32 val; 1041adfc5217SJeff Kirsher 1042adfc5217SJeff Kirsher val = enet_readl(priv, ENET_CTL_REG); 1043adfc5217SJeff Kirsher val |= ENET_CTL_DISABLE_MASK; 1044adfc5217SJeff Kirsher enet_writel(priv, val, ENET_CTL_REG); 1045adfc5217SJeff Kirsher 1046adfc5217SJeff Kirsher limit = 1000; 1047adfc5217SJeff Kirsher do { 1048adfc5217SJeff Kirsher u32 val; 1049adfc5217SJeff Kirsher 1050adfc5217SJeff Kirsher val = enet_readl(priv, ENET_CTL_REG); 1051adfc5217SJeff Kirsher if (!(val & ENET_CTL_DISABLE_MASK)) 1052adfc5217SJeff Kirsher break; 1053adfc5217SJeff Kirsher udelay(1); 1054adfc5217SJeff Kirsher } while (limit--); 1055adfc5217SJeff Kirsher } 1056adfc5217SJeff Kirsher 1057adfc5217SJeff Kirsher /* 1058adfc5217SJeff Kirsher * disable dma in given channel 1059adfc5217SJeff Kirsher */ 1060adfc5217SJeff Kirsher static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan) 1061adfc5217SJeff Kirsher { 1062adfc5217SJeff Kirsher int limit; 1063adfc5217SJeff Kirsher 1064adfc5217SJeff Kirsher enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan)); 1065adfc5217SJeff Kirsher 1066adfc5217SJeff Kirsher limit = 1000; 1067adfc5217SJeff Kirsher do { 1068adfc5217SJeff Kirsher u32 val; 1069adfc5217SJeff Kirsher 1070adfc5217SJeff Kirsher val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan)); 1071adfc5217SJeff Kirsher if (!(val & ENETDMA_CHANCFG_EN_MASK)) 1072adfc5217SJeff Kirsher break; 1073adfc5217SJeff Kirsher udelay(1); 1074adfc5217SJeff Kirsher } while (limit--); 1075adfc5217SJeff Kirsher } 1076adfc5217SJeff Kirsher 1077adfc5217SJeff Kirsher /* 1078adfc5217SJeff Kirsher * stop callback 1079adfc5217SJeff Kirsher */ 1080adfc5217SJeff Kirsher static int bcm_enet_stop(struct net_device *dev) 1081adfc5217SJeff Kirsher { 1082adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1083adfc5217SJeff Kirsher struct device *kdev; 1084adfc5217SJeff Kirsher int i; 1085adfc5217SJeff Kirsher 1086adfc5217SJeff Kirsher priv = netdev_priv(dev); 1087adfc5217SJeff Kirsher kdev = &priv->pdev->dev; 1088adfc5217SJeff Kirsher 1089adfc5217SJeff Kirsher netif_stop_queue(dev); 1090adfc5217SJeff Kirsher napi_disable(&priv->napi); 1091adfc5217SJeff Kirsher if (priv->has_phy) 1092adfc5217SJeff Kirsher phy_stop(priv->phydev); 1093adfc5217SJeff Kirsher del_timer_sync(&priv->rx_timeout); 1094adfc5217SJeff Kirsher 1095adfc5217SJeff Kirsher /* mask all interrupts */ 1096adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_IRMASK_REG); 1097adfc5217SJeff Kirsher enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); 1098adfc5217SJeff Kirsher enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); 1099adfc5217SJeff Kirsher 1100adfc5217SJeff Kirsher /* make sure no mib update is scheduled */ 1101adfc5217SJeff Kirsher cancel_work_sync(&priv->mib_update_task); 1102adfc5217SJeff Kirsher 1103adfc5217SJeff Kirsher /* disable dma & mac */ 1104adfc5217SJeff Kirsher bcm_enet_disable_dma(priv, priv->tx_chan); 1105adfc5217SJeff Kirsher bcm_enet_disable_dma(priv, priv->rx_chan); 1106adfc5217SJeff Kirsher bcm_enet_disable_mac(priv); 1107adfc5217SJeff Kirsher 1108adfc5217SJeff Kirsher /* force reclaim of all tx buffers */ 1109adfc5217SJeff Kirsher bcm_enet_tx_reclaim(dev, 1); 1110adfc5217SJeff Kirsher 1111adfc5217SJeff Kirsher /* free the rx skb ring */ 1112adfc5217SJeff Kirsher for (i = 0; i < priv->rx_ring_size; i++) { 1113adfc5217SJeff Kirsher struct bcm_enet_desc *desc; 1114adfc5217SJeff Kirsher 1115adfc5217SJeff Kirsher if (!priv->rx_skb[i]) 1116adfc5217SJeff Kirsher continue; 1117adfc5217SJeff Kirsher 1118adfc5217SJeff Kirsher desc = &priv->rx_desc_cpu[i]; 1119adfc5217SJeff Kirsher dma_unmap_single(kdev, desc->address, priv->rx_skb_size, 1120adfc5217SJeff Kirsher DMA_FROM_DEVICE); 1121adfc5217SJeff Kirsher kfree_skb(priv->rx_skb[i]); 1122adfc5217SJeff Kirsher } 1123adfc5217SJeff Kirsher 1124adfc5217SJeff Kirsher /* free remaining allocated memory */ 1125adfc5217SJeff Kirsher kfree(priv->rx_skb); 1126adfc5217SJeff Kirsher kfree(priv->tx_skb); 1127adfc5217SJeff Kirsher dma_free_coherent(kdev, priv->rx_desc_alloc_size, 1128adfc5217SJeff Kirsher priv->rx_desc_cpu, priv->rx_desc_dma); 1129adfc5217SJeff Kirsher dma_free_coherent(kdev, priv->tx_desc_alloc_size, 1130adfc5217SJeff Kirsher priv->tx_desc_cpu, priv->tx_desc_dma); 1131adfc5217SJeff Kirsher free_irq(priv->irq_tx, dev); 1132adfc5217SJeff Kirsher free_irq(priv->irq_rx, dev); 1133adfc5217SJeff Kirsher free_irq(dev->irq, dev); 1134adfc5217SJeff Kirsher 1135adfc5217SJeff Kirsher /* release phy */ 1136adfc5217SJeff Kirsher if (priv->has_phy) { 1137adfc5217SJeff Kirsher phy_disconnect(priv->phydev); 1138adfc5217SJeff Kirsher priv->phydev = NULL; 1139adfc5217SJeff Kirsher } 1140adfc5217SJeff Kirsher 1141adfc5217SJeff Kirsher return 0; 1142adfc5217SJeff Kirsher } 1143adfc5217SJeff Kirsher 1144adfc5217SJeff Kirsher /* 1145adfc5217SJeff Kirsher * ethtool callbacks 1146adfc5217SJeff Kirsher */ 1147adfc5217SJeff Kirsher struct bcm_enet_stats { 1148adfc5217SJeff Kirsher char stat_string[ETH_GSTRING_LEN]; 1149adfc5217SJeff Kirsher int sizeof_stat; 1150adfc5217SJeff Kirsher int stat_offset; 1151adfc5217SJeff Kirsher int mib_reg; 1152adfc5217SJeff Kirsher }; 1153adfc5217SJeff Kirsher 1154adfc5217SJeff Kirsher #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \ 1155adfc5217SJeff Kirsher offsetof(struct bcm_enet_priv, m) 1156adfc5217SJeff Kirsher #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \ 1157adfc5217SJeff Kirsher offsetof(struct net_device_stats, m) 1158adfc5217SJeff Kirsher 1159adfc5217SJeff Kirsher static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = { 1160adfc5217SJeff Kirsher { "rx_packets", DEV_STAT(rx_packets), -1 }, 1161adfc5217SJeff Kirsher { "tx_packets", DEV_STAT(tx_packets), -1 }, 1162adfc5217SJeff Kirsher { "rx_bytes", DEV_STAT(rx_bytes), -1 }, 1163adfc5217SJeff Kirsher { "tx_bytes", DEV_STAT(tx_bytes), -1 }, 1164adfc5217SJeff Kirsher { "rx_errors", DEV_STAT(rx_errors), -1 }, 1165adfc5217SJeff Kirsher { "tx_errors", DEV_STAT(tx_errors), -1 }, 1166adfc5217SJeff Kirsher { "rx_dropped", DEV_STAT(rx_dropped), -1 }, 1167adfc5217SJeff Kirsher { "tx_dropped", DEV_STAT(tx_dropped), -1 }, 1168adfc5217SJeff Kirsher 1169adfc5217SJeff Kirsher { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS}, 1170adfc5217SJeff Kirsher { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS }, 1171adfc5217SJeff Kirsher { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST }, 1172adfc5217SJeff Kirsher { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT }, 1173adfc5217SJeff Kirsher { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 }, 1174adfc5217SJeff Kirsher { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 }, 1175adfc5217SJeff Kirsher { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 }, 1176adfc5217SJeff Kirsher { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 }, 1177adfc5217SJeff Kirsher { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 }, 1178adfc5217SJeff Kirsher { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX }, 1179adfc5217SJeff Kirsher { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB }, 1180adfc5217SJeff Kirsher { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR }, 1181adfc5217SJeff Kirsher { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG }, 1182adfc5217SJeff Kirsher { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP }, 1183adfc5217SJeff Kirsher { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN }, 1184adfc5217SJeff Kirsher { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND }, 1185adfc5217SJeff Kirsher { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC }, 1186adfc5217SJeff Kirsher { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN }, 1187adfc5217SJeff Kirsher { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM }, 1188adfc5217SJeff Kirsher { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE }, 1189adfc5217SJeff Kirsher { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL }, 1190adfc5217SJeff Kirsher 1191adfc5217SJeff Kirsher { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS }, 1192adfc5217SJeff Kirsher { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS }, 1193adfc5217SJeff Kirsher { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST }, 1194adfc5217SJeff Kirsher { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT }, 1195adfc5217SJeff Kirsher { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 }, 1196adfc5217SJeff Kirsher { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 }, 1197adfc5217SJeff Kirsher { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 }, 1198adfc5217SJeff Kirsher { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 }, 1199adfc5217SJeff Kirsher { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023}, 1200adfc5217SJeff Kirsher { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX }, 1201adfc5217SJeff Kirsher { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB }, 1202adfc5217SJeff Kirsher { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR }, 1203adfc5217SJeff Kirsher { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG }, 1204adfc5217SJeff Kirsher { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN }, 1205adfc5217SJeff Kirsher { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL }, 1206adfc5217SJeff Kirsher { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL }, 1207adfc5217SJeff Kirsher { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL }, 1208adfc5217SJeff Kirsher { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL }, 1209adfc5217SJeff Kirsher { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE }, 1210adfc5217SJeff Kirsher { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF }, 1211adfc5217SJeff Kirsher { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS }, 1212adfc5217SJeff Kirsher { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE }, 1213adfc5217SJeff Kirsher 1214adfc5217SJeff Kirsher }; 1215adfc5217SJeff Kirsher 1216adfc5217SJeff Kirsher #define BCM_ENET_STATS_LEN \ 1217adfc5217SJeff Kirsher (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats)) 1218adfc5217SJeff Kirsher 1219adfc5217SJeff Kirsher static const u32 unused_mib_regs[] = { 1220adfc5217SJeff Kirsher ETH_MIB_TX_ALL_OCTETS, 1221adfc5217SJeff Kirsher ETH_MIB_TX_ALL_PKTS, 1222adfc5217SJeff Kirsher ETH_MIB_RX_ALL_OCTETS, 1223adfc5217SJeff Kirsher ETH_MIB_RX_ALL_PKTS, 1224adfc5217SJeff Kirsher }; 1225adfc5217SJeff Kirsher 1226adfc5217SJeff Kirsher 1227adfc5217SJeff Kirsher static void bcm_enet_get_drvinfo(struct net_device *netdev, 1228adfc5217SJeff Kirsher struct ethtool_drvinfo *drvinfo) 1229adfc5217SJeff Kirsher { 1230adfc5217SJeff Kirsher strncpy(drvinfo->driver, bcm_enet_driver_name, 32); 1231adfc5217SJeff Kirsher strncpy(drvinfo->version, bcm_enet_driver_version, 32); 1232adfc5217SJeff Kirsher strncpy(drvinfo->fw_version, "N/A", 32); 1233adfc5217SJeff Kirsher strncpy(drvinfo->bus_info, "bcm63xx", 32); 1234adfc5217SJeff Kirsher drvinfo->n_stats = BCM_ENET_STATS_LEN; 1235adfc5217SJeff Kirsher } 1236adfc5217SJeff Kirsher 1237adfc5217SJeff Kirsher static int bcm_enet_get_sset_count(struct net_device *netdev, 1238adfc5217SJeff Kirsher int string_set) 1239adfc5217SJeff Kirsher { 1240adfc5217SJeff Kirsher switch (string_set) { 1241adfc5217SJeff Kirsher case ETH_SS_STATS: 1242adfc5217SJeff Kirsher return BCM_ENET_STATS_LEN; 1243adfc5217SJeff Kirsher default: 1244adfc5217SJeff Kirsher return -EINVAL; 1245adfc5217SJeff Kirsher } 1246adfc5217SJeff Kirsher } 1247adfc5217SJeff Kirsher 1248adfc5217SJeff Kirsher static void bcm_enet_get_strings(struct net_device *netdev, 1249adfc5217SJeff Kirsher u32 stringset, u8 *data) 1250adfc5217SJeff Kirsher { 1251adfc5217SJeff Kirsher int i; 1252adfc5217SJeff Kirsher 1253adfc5217SJeff Kirsher switch (stringset) { 1254adfc5217SJeff Kirsher case ETH_SS_STATS: 1255adfc5217SJeff Kirsher for (i = 0; i < BCM_ENET_STATS_LEN; i++) { 1256adfc5217SJeff Kirsher memcpy(data + i * ETH_GSTRING_LEN, 1257adfc5217SJeff Kirsher bcm_enet_gstrings_stats[i].stat_string, 1258adfc5217SJeff Kirsher ETH_GSTRING_LEN); 1259adfc5217SJeff Kirsher } 1260adfc5217SJeff Kirsher break; 1261adfc5217SJeff Kirsher } 1262adfc5217SJeff Kirsher } 1263adfc5217SJeff Kirsher 1264adfc5217SJeff Kirsher static void update_mib_counters(struct bcm_enet_priv *priv) 1265adfc5217SJeff Kirsher { 1266adfc5217SJeff Kirsher int i; 1267adfc5217SJeff Kirsher 1268adfc5217SJeff Kirsher for (i = 0; i < BCM_ENET_STATS_LEN; i++) { 1269adfc5217SJeff Kirsher const struct bcm_enet_stats *s; 1270adfc5217SJeff Kirsher u32 val; 1271adfc5217SJeff Kirsher char *p; 1272adfc5217SJeff Kirsher 1273adfc5217SJeff Kirsher s = &bcm_enet_gstrings_stats[i]; 1274adfc5217SJeff Kirsher if (s->mib_reg == -1) 1275adfc5217SJeff Kirsher continue; 1276adfc5217SJeff Kirsher 1277adfc5217SJeff Kirsher val = enet_readl(priv, ENET_MIB_REG(s->mib_reg)); 1278adfc5217SJeff Kirsher p = (char *)priv + s->stat_offset; 1279adfc5217SJeff Kirsher 1280adfc5217SJeff Kirsher if (s->sizeof_stat == sizeof(u64)) 1281adfc5217SJeff Kirsher *(u64 *)p += val; 1282adfc5217SJeff Kirsher else 1283adfc5217SJeff Kirsher *(u32 *)p += val; 1284adfc5217SJeff Kirsher } 1285adfc5217SJeff Kirsher 1286adfc5217SJeff Kirsher /* also empty unused mib counters to make sure mib counter 1287adfc5217SJeff Kirsher * overflow interrupt is cleared */ 1288adfc5217SJeff Kirsher for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++) 1289adfc5217SJeff Kirsher (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i])); 1290adfc5217SJeff Kirsher } 1291adfc5217SJeff Kirsher 1292adfc5217SJeff Kirsher static void bcm_enet_update_mib_counters_defer(struct work_struct *t) 1293adfc5217SJeff Kirsher { 1294adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1295adfc5217SJeff Kirsher 1296adfc5217SJeff Kirsher priv = container_of(t, struct bcm_enet_priv, mib_update_task); 1297adfc5217SJeff Kirsher mutex_lock(&priv->mib_update_lock); 1298adfc5217SJeff Kirsher update_mib_counters(priv); 1299adfc5217SJeff Kirsher mutex_unlock(&priv->mib_update_lock); 1300adfc5217SJeff Kirsher 1301adfc5217SJeff Kirsher /* reenable mib interrupt */ 1302adfc5217SJeff Kirsher if (netif_running(priv->net_dev)) 1303adfc5217SJeff Kirsher enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); 1304adfc5217SJeff Kirsher } 1305adfc5217SJeff Kirsher 1306adfc5217SJeff Kirsher static void bcm_enet_get_ethtool_stats(struct net_device *netdev, 1307adfc5217SJeff Kirsher struct ethtool_stats *stats, 1308adfc5217SJeff Kirsher u64 *data) 1309adfc5217SJeff Kirsher { 1310adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1311adfc5217SJeff Kirsher int i; 1312adfc5217SJeff Kirsher 1313adfc5217SJeff Kirsher priv = netdev_priv(netdev); 1314adfc5217SJeff Kirsher 1315adfc5217SJeff Kirsher mutex_lock(&priv->mib_update_lock); 1316adfc5217SJeff Kirsher update_mib_counters(priv); 1317adfc5217SJeff Kirsher 1318adfc5217SJeff Kirsher for (i = 0; i < BCM_ENET_STATS_LEN; i++) { 1319adfc5217SJeff Kirsher const struct bcm_enet_stats *s; 1320adfc5217SJeff Kirsher char *p; 1321adfc5217SJeff Kirsher 1322adfc5217SJeff Kirsher s = &bcm_enet_gstrings_stats[i]; 1323adfc5217SJeff Kirsher if (s->mib_reg == -1) 1324adfc5217SJeff Kirsher p = (char *)&netdev->stats; 1325adfc5217SJeff Kirsher else 1326adfc5217SJeff Kirsher p = (char *)priv; 1327adfc5217SJeff Kirsher p += s->stat_offset; 1328adfc5217SJeff Kirsher data[i] = (s->sizeof_stat == sizeof(u64)) ? 1329adfc5217SJeff Kirsher *(u64 *)p : *(u32 *)p; 1330adfc5217SJeff Kirsher } 1331adfc5217SJeff Kirsher mutex_unlock(&priv->mib_update_lock); 1332adfc5217SJeff Kirsher } 1333adfc5217SJeff Kirsher 1334adfc5217SJeff Kirsher static int bcm_enet_get_settings(struct net_device *dev, 1335adfc5217SJeff Kirsher struct ethtool_cmd *cmd) 1336adfc5217SJeff Kirsher { 1337adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1338adfc5217SJeff Kirsher 1339adfc5217SJeff Kirsher priv = netdev_priv(dev); 1340adfc5217SJeff Kirsher 1341adfc5217SJeff Kirsher cmd->maxrxpkt = 0; 1342adfc5217SJeff Kirsher cmd->maxtxpkt = 0; 1343adfc5217SJeff Kirsher 1344adfc5217SJeff Kirsher if (priv->has_phy) { 1345adfc5217SJeff Kirsher if (!priv->phydev) 1346adfc5217SJeff Kirsher return -ENODEV; 1347adfc5217SJeff Kirsher return phy_ethtool_gset(priv->phydev, cmd); 1348adfc5217SJeff Kirsher } else { 1349adfc5217SJeff Kirsher cmd->autoneg = 0; 1350adfc5217SJeff Kirsher ethtool_cmd_speed_set(cmd, ((priv->force_speed_100) 1351adfc5217SJeff Kirsher ? SPEED_100 : SPEED_10)); 1352adfc5217SJeff Kirsher cmd->duplex = (priv->force_duplex_full) ? 1353adfc5217SJeff Kirsher DUPLEX_FULL : DUPLEX_HALF; 1354adfc5217SJeff Kirsher cmd->supported = ADVERTISED_10baseT_Half | 1355adfc5217SJeff Kirsher ADVERTISED_10baseT_Full | 1356adfc5217SJeff Kirsher ADVERTISED_100baseT_Half | 1357adfc5217SJeff Kirsher ADVERTISED_100baseT_Full; 1358adfc5217SJeff Kirsher cmd->advertising = 0; 1359adfc5217SJeff Kirsher cmd->port = PORT_MII; 1360adfc5217SJeff Kirsher cmd->transceiver = XCVR_EXTERNAL; 1361adfc5217SJeff Kirsher } 1362adfc5217SJeff Kirsher return 0; 1363adfc5217SJeff Kirsher } 1364adfc5217SJeff Kirsher 1365adfc5217SJeff Kirsher static int bcm_enet_set_settings(struct net_device *dev, 1366adfc5217SJeff Kirsher struct ethtool_cmd *cmd) 1367adfc5217SJeff Kirsher { 1368adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1369adfc5217SJeff Kirsher 1370adfc5217SJeff Kirsher priv = netdev_priv(dev); 1371adfc5217SJeff Kirsher if (priv->has_phy) { 1372adfc5217SJeff Kirsher if (!priv->phydev) 1373adfc5217SJeff Kirsher return -ENODEV; 1374adfc5217SJeff Kirsher return phy_ethtool_sset(priv->phydev, cmd); 1375adfc5217SJeff Kirsher } else { 1376adfc5217SJeff Kirsher 1377adfc5217SJeff Kirsher if (cmd->autoneg || 1378adfc5217SJeff Kirsher (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) || 1379adfc5217SJeff Kirsher cmd->port != PORT_MII) 1380adfc5217SJeff Kirsher return -EINVAL; 1381adfc5217SJeff Kirsher 1382adfc5217SJeff Kirsher priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0; 1383adfc5217SJeff Kirsher priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0; 1384adfc5217SJeff Kirsher 1385adfc5217SJeff Kirsher if (netif_running(dev)) 1386adfc5217SJeff Kirsher bcm_enet_adjust_link(dev); 1387adfc5217SJeff Kirsher return 0; 1388adfc5217SJeff Kirsher } 1389adfc5217SJeff Kirsher } 1390adfc5217SJeff Kirsher 1391adfc5217SJeff Kirsher static void bcm_enet_get_ringparam(struct net_device *dev, 1392adfc5217SJeff Kirsher struct ethtool_ringparam *ering) 1393adfc5217SJeff Kirsher { 1394adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1395adfc5217SJeff Kirsher 1396adfc5217SJeff Kirsher priv = netdev_priv(dev); 1397adfc5217SJeff Kirsher 1398adfc5217SJeff Kirsher /* rx/tx ring is actually only limited by memory */ 1399adfc5217SJeff Kirsher ering->rx_max_pending = 8192; 1400adfc5217SJeff Kirsher ering->tx_max_pending = 8192; 1401adfc5217SJeff Kirsher ering->rx_pending = priv->rx_ring_size; 1402adfc5217SJeff Kirsher ering->tx_pending = priv->tx_ring_size; 1403adfc5217SJeff Kirsher } 1404adfc5217SJeff Kirsher 1405adfc5217SJeff Kirsher static int bcm_enet_set_ringparam(struct net_device *dev, 1406adfc5217SJeff Kirsher struct ethtool_ringparam *ering) 1407adfc5217SJeff Kirsher { 1408adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1409adfc5217SJeff Kirsher int was_running; 1410adfc5217SJeff Kirsher 1411adfc5217SJeff Kirsher priv = netdev_priv(dev); 1412adfc5217SJeff Kirsher 1413adfc5217SJeff Kirsher was_running = 0; 1414adfc5217SJeff Kirsher if (netif_running(dev)) { 1415adfc5217SJeff Kirsher bcm_enet_stop(dev); 1416adfc5217SJeff Kirsher was_running = 1; 1417adfc5217SJeff Kirsher } 1418adfc5217SJeff Kirsher 1419adfc5217SJeff Kirsher priv->rx_ring_size = ering->rx_pending; 1420adfc5217SJeff Kirsher priv->tx_ring_size = ering->tx_pending; 1421adfc5217SJeff Kirsher 1422adfc5217SJeff Kirsher if (was_running) { 1423adfc5217SJeff Kirsher int err; 1424adfc5217SJeff Kirsher 1425adfc5217SJeff Kirsher err = bcm_enet_open(dev); 1426adfc5217SJeff Kirsher if (err) 1427adfc5217SJeff Kirsher dev_close(dev); 1428adfc5217SJeff Kirsher else 1429adfc5217SJeff Kirsher bcm_enet_set_multicast_list(dev); 1430adfc5217SJeff Kirsher } 1431adfc5217SJeff Kirsher return 0; 1432adfc5217SJeff Kirsher } 1433adfc5217SJeff Kirsher 1434adfc5217SJeff Kirsher static void bcm_enet_get_pauseparam(struct net_device *dev, 1435adfc5217SJeff Kirsher struct ethtool_pauseparam *ecmd) 1436adfc5217SJeff Kirsher { 1437adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1438adfc5217SJeff Kirsher 1439adfc5217SJeff Kirsher priv = netdev_priv(dev); 1440adfc5217SJeff Kirsher ecmd->autoneg = priv->pause_auto; 1441adfc5217SJeff Kirsher ecmd->rx_pause = priv->pause_rx; 1442adfc5217SJeff Kirsher ecmd->tx_pause = priv->pause_tx; 1443adfc5217SJeff Kirsher } 1444adfc5217SJeff Kirsher 1445adfc5217SJeff Kirsher static int bcm_enet_set_pauseparam(struct net_device *dev, 1446adfc5217SJeff Kirsher struct ethtool_pauseparam *ecmd) 1447adfc5217SJeff Kirsher { 1448adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1449adfc5217SJeff Kirsher 1450adfc5217SJeff Kirsher priv = netdev_priv(dev); 1451adfc5217SJeff Kirsher 1452adfc5217SJeff Kirsher if (priv->has_phy) { 1453adfc5217SJeff Kirsher if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) { 1454adfc5217SJeff Kirsher /* asymetric pause mode not supported, 1455adfc5217SJeff Kirsher * actually possible but integrated PHY has RO 1456adfc5217SJeff Kirsher * asym_pause bit */ 1457adfc5217SJeff Kirsher return -EINVAL; 1458adfc5217SJeff Kirsher } 1459adfc5217SJeff Kirsher } else { 1460adfc5217SJeff Kirsher /* no pause autoneg on direct mii connection */ 1461adfc5217SJeff Kirsher if (ecmd->autoneg) 1462adfc5217SJeff Kirsher return -EINVAL; 1463adfc5217SJeff Kirsher } 1464adfc5217SJeff Kirsher 1465adfc5217SJeff Kirsher priv->pause_auto = ecmd->autoneg; 1466adfc5217SJeff Kirsher priv->pause_rx = ecmd->rx_pause; 1467adfc5217SJeff Kirsher priv->pause_tx = ecmd->tx_pause; 1468adfc5217SJeff Kirsher 1469adfc5217SJeff Kirsher return 0; 1470adfc5217SJeff Kirsher } 1471adfc5217SJeff Kirsher 14721aff0cbeSstephen hemminger static const struct ethtool_ops bcm_enet_ethtool_ops = { 1473adfc5217SJeff Kirsher .get_strings = bcm_enet_get_strings, 1474adfc5217SJeff Kirsher .get_sset_count = bcm_enet_get_sset_count, 1475adfc5217SJeff Kirsher .get_ethtool_stats = bcm_enet_get_ethtool_stats, 1476adfc5217SJeff Kirsher .get_settings = bcm_enet_get_settings, 1477adfc5217SJeff Kirsher .set_settings = bcm_enet_set_settings, 1478adfc5217SJeff Kirsher .get_drvinfo = bcm_enet_get_drvinfo, 1479adfc5217SJeff Kirsher .get_link = ethtool_op_get_link, 1480adfc5217SJeff Kirsher .get_ringparam = bcm_enet_get_ringparam, 1481adfc5217SJeff Kirsher .set_ringparam = bcm_enet_set_ringparam, 1482adfc5217SJeff Kirsher .get_pauseparam = bcm_enet_get_pauseparam, 1483adfc5217SJeff Kirsher .set_pauseparam = bcm_enet_set_pauseparam, 1484adfc5217SJeff Kirsher }; 1485adfc5217SJeff Kirsher 1486adfc5217SJeff Kirsher static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1487adfc5217SJeff Kirsher { 1488adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1489adfc5217SJeff Kirsher 1490adfc5217SJeff Kirsher priv = netdev_priv(dev); 1491adfc5217SJeff Kirsher if (priv->has_phy) { 1492adfc5217SJeff Kirsher if (!priv->phydev) 1493adfc5217SJeff Kirsher return -ENODEV; 1494adfc5217SJeff Kirsher return phy_mii_ioctl(priv->phydev, rq, cmd); 1495adfc5217SJeff Kirsher } else { 1496adfc5217SJeff Kirsher struct mii_if_info mii; 1497adfc5217SJeff Kirsher 1498adfc5217SJeff Kirsher mii.dev = dev; 1499adfc5217SJeff Kirsher mii.mdio_read = bcm_enet_mdio_read_mii; 1500adfc5217SJeff Kirsher mii.mdio_write = bcm_enet_mdio_write_mii; 1501adfc5217SJeff Kirsher mii.phy_id = 0; 1502adfc5217SJeff Kirsher mii.phy_id_mask = 0x3f; 1503adfc5217SJeff Kirsher mii.reg_num_mask = 0x1f; 1504adfc5217SJeff Kirsher return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); 1505adfc5217SJeff Kirsher } 1506adfc5217SJeff Kirsher } 1507adfc5217SJeff Kirsher 1508adfc5217SJeff Kirsher /* 1509adfc5217SJeff Kirsher * calculate actual hardware mtu 1510adfc5217SJeff Kirsher */ 1511adfc5217SJeff Kirsher static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu) 1512adfc5217SJeff Kirsher { 1513adfc5217SJeff Kirsher int actual_mtu; 1514adfc5217SJeff Kirsher 1515adfc5217SJeff Kirsher actual_mtu = mtu; 1516adfc5217SJeff Kirsher 1517adfc5217SJeff Kirsher /* add ethernet header + vlan tag size */ 1518adfc5217SJeff Kirsher actual_mtu += VLAN_ETH_HLEN; 1519adfc5217SJeff Kirsher 1520adfc5217SJeff Kirsher if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU) 1521adfc5217SJeff Kirsher return -EINVAL; 1522adfc5217SJeff Kirsher 1523adfc5217SJeff Kirsher /* 1524adfc5217SJeff Kirsher * setup maximum size before we get overflow mark in 1525adfc5217SJeff Kirsher * descriptor, note that this will not prevent reception of 1526adfc5217SJeff Kirsher * big frames, they will be split into multiple buffers 1527adfc5217SJeff Kirsher * anyway 1528adfc5217SJeff Kirsher */ 1529adfc5217SJeff Kirsher priv->hw_mtu = actual_mtu; 1530adfc5217SJeff Kirsher 1531adfc5217SJeff Kirsher /* 1532adfc5217SJeff Kirsher * align rx buffer size to dma burst len, account FCS since 1533adfc5217SJeff Kirsher * it's appended 1534adfc5217SJeff Kirsher */ 1535adfc5217SJeff Kirsher priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN, 1536adfc5217SJeff Kirsher BCMENET_DMA_MAXBURST * 4); 1537adfc5217SJeff Kirsher return 0; 1538adfc5217SJeff Kirsher } 1539adfc5217SJeff Kirsher 1540adfc5217SJeff Kirsher /* 1541adfc5217SJeff Kirsher * adjust mtu, can't be called while device is running 1542adfc5217SJeff Kirsher */ 1543adfc5217SJeff Kirsher static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu) 1544adfc5217SJeff Kirsher { 1545adfc5217SJeff Kirsher int ret; 1546adfc5217SJeff Kirsher 1547adfc5217SJeff Kirsher if (netif_running(dev)) 1548adfc5217SJeff Kirsher return -EBUSY; 1549adfc5217SJeff Kirsher 1550adfc5217SJeff Kirsher ret = compute_hw_mtu(netdev_priv(dev), new_mtu); 1551adfc5217SJeff Kirsher if (ret) 1552adfc5217SJeff Kirsher return ret; 1553adfc5217SJeff Kirsher dev->mtu = new_mtu; 1554adfc5217SJeff Kirsher return 0; 1555adfc5217SJeff Kirsher } 1556adfc5217SJeff Kirsher 1557adfc5217SJeff Kirsher /* 1558adfc5217SJeff Kirsher * preinit hardware to allow mii operation while device is down 1559adfc5217SJeff Kirsher */ 1560adfc5217SJeff Kirsher static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv) 1561adfc5217SJeff Kirsher { 1562adfc5217SJeff Kirsher u32 val; 1563adfc5217SJeff Kirsher int limit; 1564adfc5217SJeff Kirsher 1565adfc5217SJeff Kirsher /* make sure mac is disabled */ 1566adfc5217SJeff Kirsher bcm_enet_disable_mac(priv); 1567adfc5217SJeff Kirsher 1568adfc5217SJeff Kirsher /* soft reset mac */ 1569adfc5217SJeff Kirsher val = ENET_CTL_SRESET_MASK; 1570adfc5217SJeff Kirsher enet_writel(priv, val, ENET_CTL_REG); 1571adfc5217SJeff Kirsher wmb(); 1572adfc5217SJeff Kirsher 1573adfc5217SJeff Kirsher limit = 1000; 1574adfc5217SJeff Kirsher do { 1575adfc5217SJeff Kirsher val = enet_readl(priv, ENET_CTL_REG); 1576adfc5217SJeff Kirsher if (!(val & ENET_CTL_SRESET_MASK)) 1577adfc5217SJeff Kirsher break; 1578adfc5217SJeff Kirsher udelay(1); 1579adfc5217SJeff Kirsher } while (limit--); 1580adfc5217SJeff Kirsher 1581adfc5217SJeff Kirsher /* select correct mii interface */ 1582adfc5217SJeff Kirsher val = enet_readl(priv, ENET_CTL_REG); 1583adfc5217SJeff Kirsher if (priv->use_external_mii) 1584adfc5217SJeff Kirsher val |= ENET_CTL_EPHYSEL_MASK; 1585adfc5217SJeff Kirsher else 1586adfc5217SJeff Kirsher val &= ~ENET_CTL_EPHYSEL_MASK; 1587adfc5217SJeff Kirsher enet_writel(priv, val, ENET_CTL_REG); 1588adfc5217SJeff Kirsher 1589adfc5217SJeff Kirsher /* turn on mdc clock */ 1590adfc5217SJeff Kirsher enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) | 1591adfc5217SJeff Kirsher ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG); 1592adfc5217SJeff Kirsher 1593adfc5217SJeff Kirsher /* set mib counters to self-clear when read */ 1594adfc5217SJeff Kirsher val = enet_readl(priv, ENET_MIBCTL_REG); 1595adfc5217SJeff Kirsher val |= ENET_MIBCTL_RDCLEAR_MASK; 1596adfc5217SJeff Kirsher enet_writel(priv, val, ENET_MIBCTL_REG); 1597adfc5217SJeff Kirsher } 1598adfc5217SJeff Kirsher 1599adfc5217SJeff Kirsher static const struct net_device_ops bcm_enet_ops = { 1600adfc5217SJeff Kirsher .ndo_open = bcm_enet_open, 1601adfc5217SJeff Kirsher .ndo_stop = bcm_enet_stop, 1602adfc5217SJeff Kirsher .ndo_start_xmit = bcm_enet_start_xmit, 1603adfc5217SJeff Kirsher .ndo_set_mac_address = bcm_enet_set_mac_address, 1604afc4b13dSJiri Pirko .ndo_set_rx_mode = bcm_enet_set_multicast_list, 1605adfc5217SJeff Kirsher .ndo_do_ioctl = bcm_enet_ioctl, 1606adfc5217SJeff Kirsher .ndo_change_mtu = bcm_enet_change_mtu, 1607adfc5217SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 1608adfc5217SJeff Kirsher .ndo_poll_controller = bcm_enet_netpoll, 1609adfc5217SJeff Kirsher #endif 1610adfc5217SJeff Kirsher }; 1611adfc5217SJeff Kirsher 1612adfc5217SJeff Kirsher /* 1613adfc5217SJeff Kirsher * allocate netdevice, request register memory and register device. 1614adfc5217SJeff Kirsher */ 1615047fc566SBill Pemberton static int bcm_enet_probe(struct platform_device *pdev) 1616adfc5217SJeff Kirsher { 1617adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1618adfc5217SJeff Kirsher struct net_device *dev; 1619adfc5217SJeff Kirsher struct bcm63xx_enet_platform_data *pd; 1620adfc5217SJeff Kirsher struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx; 1621adfc5217SJeff Kirsher struct mii_bus *bus; 1622adfc5217SJeff Kirsher const char *clk_name; 1623adfc5217SJeff Kirsher unsigned int iomem_size; 1624adfc5217SJeff Kirsher int i, ret; 1625adfc5217SJeff Kirsher 1626adfc5217SJeff Kirsher /* stop if shared driver failed, assume driver->probe will be 1627adfc5217SJeff Kirsher * called in the same order we register devices (correct ?) */ 1628adfc5217SJeff Kirsher if (!bcm_enet_shared_base) 1629adfc5217SJeff Kirsher return -ENODEV; 1630adfc5217SJeff Kirsher 1631adfc5217SJeff Kirsher res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1632adfc5217SJeff Kirsher res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1633adfc5217SJeff Kirsher res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 1634adfc5217SJeff Kirsher res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2); 1635adfc5217SJeff Kirsher if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx) 1636adfc5217SJeff Kirsher return -ENODEV; 1637adfc5217SJeff Kirsher 1638adfc5217SJeff Kirsher ret = 0; 1639adfc5217SJeff Kirsher dev = alloc_etherdev(sizeof(*priv)); 1640adfc5217SJeff Kirsher if (!dev) 1641adfc5217SJeff Kirsher return -ENOMEM; 1642adfc5217SJeff Kirsher priv = netdev_priv(dev); 1643adfc5217SJeff Kirsher 1644adfc5217SJeff Kirsher ret = compute_hw_mtu(priv, dev->mtu); 1645adfc5217SJeff Kirsher if (ret) 1646adfc5217SJeff Kirsher goto out; 1647adfc5217SJeff Kirsher 1648adfc5217SJeff Kirsher iomem_size = resource_size(res_mem); 1649adfc5217SJeff Kirsher if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) { 1650adfc5217SJeff Kirsher ret = -EBUSY; 1651adfc5217SJeff Kirsher goto out; 1652adfc5217SJeff Kirsher } 1653adfc5217SJeff Kirsher 1654adfc5217SJeff Kirsher priv->base = ioremap(res_mem->start, iomem_size); 1655adfc5217SJeff Kirsher if (priv->base == NULL) { 1656adfc5217SJeff Kirsher ret = -ENOMEM; 1657adfc5217SJeff Kirsher goto out_release_mem; 1658adfc5217SJeff Kirsher } 1659adfc5217SJeff Kirsher dev->irq = priv->irq = res_irq->start; 1660adfc5217SJeff Kirsher priv->irq_rx = res_irq_rx->start; 1661adfc5217SJeff Kirsher priv->irq_tx = res_irq_tx->start; 1662adfc5217SJeff Kirsher priv->mac_id = pdev->id; 1663adfc5217SJeff Kirsher 1664adfc5217SJeff Kirsher /* get rx & tx dma channel id for this mac */ 1665adfc5217SJeff Kirsher if (priv->mac_id == 0) { 1666adfc5217SJeff Kirsher priv->rx_chan = 0; 1667adfc5217SJeff Kirsher priv->tx_chan = 1; 1668adfc5217SJeff Kirsher clk_name = "enet0"; 1669adfc5217SJeff Kirsher } else { 1670adfc5217SJeff Kirsher priv->rx_chan = 2; 1671adfc5217SJeff Kirsher priv->tx_chan = 3; 1672adfc5217SJeff Kirsher clk_name = "enet1"; 1673adfc5217SJeff Kirsher } 1674adfc5217SJeff Kirsher 1675adfc5217SJeff Kirsher priv->mac_clk = clk_get(&pdev->dev, clk_name); 1676adfc5217SJeff Kirsher if (IS_ERR(priv->mac_clk)) { 1677adfc5217SJeff Kirsher ret = PTR_ERR(priv->mac_clk); 1678adfc5217SJeff Kirsher goto out_unmap; 1679adfc5217SJeff Kirsher } 1680adfc5217SJeff Kirsher clk_enable(priv->mac_clk); 1681adfc5217SJeff Kirsher 1682adfc5217SJeff Kirsher /* initialize default and fetch platform data */ 1683adfc5217SJeff Kirsher priv->rx_ring_size = BCMENET_DEF_RX_DESC; 1684adfc5217SJeff Kirsher priv->tx_ring_size = BCMENET_DEF_TX_DESC; 1685adfc5217SJeff Kirsher 1686adfc5217SJeff Kirsher pd = pdev->dev.platform_data; 1687adfc5217SJeff Kirsher if (pd) { 1688adfc5217SJeff Kirsher memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); 1689adfc5217SJeff Kirsher priv->has_phy = pd->has_phy; 1690adfc5217SJeff Kirsher priv->phy_id = pd->phy_id; 1691adfc5217SJeff Kirsher priv->has_phy_interrupt = pd->has_phy_interrupt; 1692adfc5217SJeff Kirsher priv->phy_interrupt = pd->phy_interrupt; 1693adfc5217SJeff Kirsher priv->use_external_mii = !pd->use_internal_phy; 1694adfc5217SJeff Kirsher priv->pause_auto = pd->pause_auto; 1695adfc5217SJeff Kirsher priv->pause_rx = pd->pause_rx; 1696adfc5217SJeff Kirsher priv->pause_tx = pd->pause_tx; 1697adfc5217SJeff Kirsher priv->force_duplex_full = pd->force_duplex_full; 1698adfc5217SJeff Kirsher priv->force_speed_100 = pd->force_speed_100; 1699adfc5217SJeff Kirsher } 1700adfc5217SJeff Kirsher 1701adfc5217SJeff Kirsher if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) { 1702adfc5217SJeff Kirsher /* using internal PHY, enable clock */ 1703adfc5217SJeff Kirsher priv->phy_clk = clk_get(&pdev->dev, "ephy"); 1704adfc5217SJeff Kirsher if (IS_ERR(priv->phy_clk)) { 1705adfc5217SJeff Kirsher ret = PTR_ERR(priv->phy_clk); 1706adfc5217SJeff Kirsher priv->phy_clk = NULL; 1707adfc5217SJeff Kirsher goto out_put_clk_mac; 1708adfc5217SJeff Kirsher } 1709adfc5217SJeff Kirsher clk_enable(priv->phy_clk); 1710adfc5217SJeff Kirsher } 1711adfc5217SJeff Kirsher 1712adfc5217SJeff Kirsher /* do minimal hardware init to be able to probe mii bus */ 1713adfc5217SJeff Kirsher bcm_enet_hw_preinit(priv); 1714adfc5217SJeff Kirsher 1715adfc5217SJeff Kirsher /* MII bus registration */ 1716adfc5217SJeff Kirsher if (priv->has_phy) { 1717adfc5217SJeff Kirsher 1718adfc5217SJeff Kirsher priv->mii_bus = mdiobus_alloc(); 1719adfc5217SJeff Kirsher if (!priv->mii_bus) { 1720adfc5217SJeff Kirsher ret = -ENOMEM; 1721adfc5217SJeff Kirsher goto out_uninit_hw; 1722adfc5217SJeff Kirsher } 1723adfc5217SJeff Kirsher 1724adfc5217SJeff Kirsher bus = priv->mii_bus; 1725adfc5217SJeff Kirsher bus->name = "bcm63xx_enet MII bus"; 1726adfc5217SJeff Kirsher bus->parent = &pdev->dev; 1727adfc5217SJeff Kirsher bus->priv = priv; 1728adfc5217SJeff Kirsher bus->read = bcm_enet_mdio_read_phylib; 1729adfc5217SJeff Kirsher bus->write = bcm_enet_mdio_write_phylib; 17303e617506SFlorian Fainelli sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id); 1731adfc5217SJeff Kirsher 1732adfc5217SJeff Kirsher /* only probe bus where we think the PHY is, because 1733adfc5217SJeff Kirsher * the mdio read operation return 0 instead of 0xffff 1734adfc5217SJeff Kirsher * if a slave is not present on hw */ 1735adfc5217SJeff Kirsher bus->phy_mask = ~(1 << priv->phy_id); 1736adfc5217SJeff Kirsher 1737adfc5217SJeff Kirsher bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); 1738adfc5217SJeff Kirsher if (!bus->irq) { 1739adfc5217SJeff Kirsher ret = -ENOMEM; 1740adfc5217SJeff Kirsher goto out_free_mdio; 1741adfc5217SJeff Kirsher } 1742adfc5217SJeff Kirsher 1743adfc5217SJeff Kirsher if (priv->has_phy_interrupt) 1744adfc5217SJeff Kirsher bus->irq[priv->phy_id] = priv->phy_interrupt; 1745adfc5217SJeff Kirsher else 1746adfc5217SJeff Kirsher bus->irq[priv->phy_id] = PHY_POLL; 1747adfc5217SJeff Kirsher 1748adfc5217SJeff Kirsher ret = mdiobus_register(bus); 1749adfc5217SJeff Kirsher if (ret) { 1750adfc5217SJeff Kirsher dev_err(&pdev->dev, "unable to register mdio bus\n"); 1751adfc5217SJeff Kirsher goto out_free_mdio; 1752adfc5217SJeff Kirsher } 1753adfc5217SJeff Kirsher } else { 1754adfc5217SJeff Kirsher 1755adfc5217SJeff Kirsher /* run platform code to initialize PHY device */ 1756adfc5217SJeff Kirsher if (pd->mii_config && 1757adfc5217SJeff Kirsher pd->mii_config(dev, 1, bcm_enet_mdio_read_mii, 1758adfc5217SJeff Kirsher bcm_enet_mdio_write_mii)) { 1759adfc5217SJeff Kirsher dev_err(&pdev->dev, "unable to configure mdio bus\n"); 1760adfc5217SJeff Kirsher goto out_uninit_hw; 1761adfc5217SJeff Kirsher } 1762adfc5217SJeff Kirsher } 1763adfc5217SJeff Kirsher 1764adfc5217SJeff Kirsher spin_lock_init(&priv->rx_lock); 1765adfc5217SJeff Kirsher 1766adfc5217SJeff Kirsher /* init rx timeout (used for oom) */ 1767adfc5217SJeff Kirsher init_timer(&priv->rx_timeout); 1768adfc5217SJeff Kirsher priv->rx_timeout.function = bcm_enet_refill_rx_timer; 1769adfc5217SJeff Kirsher priv->rx_timeout.data = (unsigned long)dev; 1770adfc5217SJeff Kirsher 1771adfc5217SJeff Kirsher /* init the mib update lock&work */ 1772adfc5217SJeff Kirsher mutex_init(&priv->mib_update_lock); 1773adfc5217SJeff Kirsher INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer); 1774adfc5217SJeff Kirsher 1775adfc5217SJeff Kirsher /* zero mib counters */ 1776adfc5217SJeff Kirsher for (i = 0; i < ENET_MIB_REG_COUNT; i++) 1777adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_MIB_REG(i)); 1778adfc5217SJeff Kirsher 1779adfc5217SJeff Kirsher /* register netdevice */ 1780adfc5217SJeff Kirsher dev->netdev_ops = &bcm_enet_ops; 1781adfc5217SJeff Kirsher netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); 1782adfc5217SJeff Kirsher 1783adfc5217SJeff Kirsher SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops); 1784adfc5217SJeff Kirsher SET_NETDEV_DEV(dev, &pdev->dev); 1785adfc5217SJeff Kirsher 1786adfc5217SJeff Kirsher ret = register_netdev(dev); 1787adfc5217SJeff Kirsher if (ret) 1788adfc5217SJeff Kirsher goto out_unregister_mdio; 1789adfc5217SJeff Kirsher 1790adfc5217SJeff Kirsher netif_carrier_off(dev); 1791adfc5217SJeff Kirsher platform_set_drvdata(pdev, dev); 1792adfc5217SJeff Kirsher priv->pdev = pdev; 1793adfc5217SJeff Kirsher priv->net_dev = dev; 1794adfc5217SJeff Kirsher 1795adfc5217SJeff Kirsher return 0; 1796adfc5217SJeff Kirsher 1797adfc5217SJeff Kirsher out_unregister_mdio: 1798adfc5217SJeff Kirsher if (priv->mii_bus) { 1799adfc5217SJeff Kirsher mdiobus_unregister(priv->mii_bus); 1800adfc5217SJeff Kirsher kfree(priv->mii_bus->irq); 1801adfc5217SJeff Kirsher } 1802adfc5217SJeff Kirsher 1803adfc5217SJeff Kirsher out_free_mdio: 1804adfc5217SJeff Kirsher if (priv->mii_bus) 1805adfc5217SJeff Kirsher mdiobus_free(priv->mii_bus); 1806adfc5217SJeff Kirsher 1807adfc5217SJeff Kirsher out_uninit_hw: 1808adfc5217SJeff Kirsher /* turn off mdc clock */ 1809adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_MIISC_REG); 1810adfc5217SJeff Kirsher if (priv->phy_clk) { 1811adfc5217SJeff Kirsher clk_disable(priv->phy_clk); 1812adfc5217SJeff Kirsher clk_put(priv->phy_clk); 1813adfc5217SJeff Kirsher } 1814adfc5217SJeff Kirsher 1815adfc5217SJeff Kirsher out_put_clk_mac: 1816adfc5217SJeff Kirsher clk_disable(priv->mac_clk); 1817adfc5217SJeff Kirsher clk_put(priv->mac_clk); 1818adfc5217SJeff Kirsher 1819adfc5217SJeff Kirsher out_unmap: 1820adfc5217SJeff Kirsher iounmap(priv->base); 1821adfc5217SJeff Kirsher 1822adfc5217SJeff Kirsher out_release_mem: 1823adfc5217SJeff Kirsher release_mem_region(res_mem->start, iomem_size); 1824adfc5217SJeff Kirsher out: 1825adfc5217SJeff Kirsher free_netdev(dev); 1826adfc5217SJeff Kirsher return ret; 1827adfc5217SJeff Kirsher } 1828adfc5217SJeff Kirsher 1829adfc5217SJeff Kirsher 1830adfc5217SJeff Kirsher /* 1831adfc5217SJeff Kirsher * exit func, stops hardware and unregisters netdevice 1832adfc5217SJeff Kirsher */ 1833047fc566SBill Pemberton static int bcm_enet_remove(struct platform_device *pdev) 1834adfc5217SJeff Kirsher { 1835adfc5217SJeff Kirsher struct bcm_enet_priv *priv; 1836adfc5217SJeff Kirsher struct net_device *dev; 1837adfc5217SJeff Kirsher struct resource *res; 1838adfc5217SJeff Kirsher 1839adfc5217SJeff Kirsher /* stop netdevice */ 1840adfc5217SJeff Kirsher dev = platform_get_drvdata(pdev); 1841adfc5217SJeff Kirsher priv = netdev_priv(dev); 1842adfc5217SJeff Kirsher unregister_netdev(dev); 1843adfc5217SJeff Kirsher 1844adfc5217SJeff Kirsher /* turn off mdc clock */ 1845adfc5217SJeff Kirsher enet_writel(priv, 0, ENET_MIISC_REG); 1846adfc5217SJeff Kirsher 1847adfc5217SJeff Kirsher if (priv->has_phy) { 1848adfc5217SJeff Kirsher mdiobus_unregister(priv->mii_bus); 1849adfc5217SJeff Kirsher kfree(priv->mii_bus->irq); 1850adfc5217SJeff Kirsher mdiobus_free(priv->mii_bus); 1851adfc5217SJeff Kirsher } else { 1852adfc5217SJeff Kirsher struct bcm63xx_enet_platform_data *pd; 1853adfc5217SJeff Kirsher 1854adfc5217SJeff Kirsher pd = pdev->dev.platform_data; 1855adfc5217SJeff Kirsher if (pd && pd->mii_config) 1856adfc5217SJeff Kirsher pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, 1857adfc5217SJeff Kirsher bcm_enet_mdio_write_mii); 1858adfc5217SJeff Kirsher } 1859adfc5217SJeff Kirsher 1860adfc5217SJeff Kirsher /* release device resources */ 1861adfc5217SJeff Kirsher iounmap(priv->base); 1862adfc5217SJeff Kirsher res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1863adfc5217SJeff Kirsher release_mem_region(res->start, resource_size(res)); 1864adfc5217SJeff Kirsher 1865adfc5217SJeff Kirsher /* disable hw block clocks */ 1866adfc5217SJeff Kirsher if (priv->phy_clk) { 1867adfc5217SJeff Kirsher clk_disable(priv->phy_clk); 1868adfc5217SJeff Kirsher clk_put(priv->phy_clk); 1869adfc5217SJeff Kirsher } 1870adfc5217SJeff Kirsher clk_disable(priv->mac_clk); 1871adfc5217SJeff Kirsher clk_put(priv->mac_clk); 1872adfc5217SJeff Kirsher 1873adfc5217SJeff Kirsher platform_set_drvdata(pdev, NULL); 1874adfc5217SJeff Kirsher free_netdev(dev); 1875adfc5217SJeff Kirsher return 0; 1876adfc5217SJeff Kirsher } 1877adfc5217SJeff Kirsher 1878adfc5217SJeff Kirsher struct platform_driver bcm63xx_enet_driver = { 1879adfc5217SJeff Kirsher .probe = bcm_enet_probe, 1880047fc566SBill Pemberton .remove = bcm_enet_remove, 1881adfc5217SJeff Kirsher .driver = { 1882adfc5217SJeff Kirsher .name = "bcm63xx_enet", 1883adfc5217SJeff Kirsher .owner = THIS_MODULE, 1884adfc5217SJeff Kirsher }, 1885adfc5217SJeff Kirsher }; 1886adfc5217SJeff Kirsher 1887adfc5217SJeff Kirsher /* 1888adfc5217SJeff Kirsher * reserve & remap memory space shared between all macs 1889adfc5217SJeff Kirsher */ 1890047fc566SBill Pemberton static int bcm_enet_shared_probe(struct platform_device *pdev) 1891adfc5217SJeff Kirsher { 1892adfc5217SJeff Kirsher struct resource *res; 1893adfc5217SJeff Kirsher unsigned int iomem_size; 1894adfc5217SJeff Kirsher 1895adfc5217SJeff Kirsher res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1896adfc5217SJeff Kirsher if (!res) 1897adfc5217SJeff Kirsher return -ENODEV; 1898adfc5217SJeff Kirsher 1899adfc5217SJeff Kirsher iomem_size = resource_size(res); 1900adfc5217SJeff Kirsher if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma")) 1901adfc5217SJeff Kirsher return -EBUSY; 1902adfc5217SJeff Kirsher 1903adfc5217SJeff Kirsher bcm_enet_shared_base = ioremap(res->start, iomem_size); 1904adfc5217SJeff Kirsher if (!bcm_enet_shared_base) { 1905adfc5217SJeff Kirsher release_mem_region(res->start, iomem_size); 1906adfc5217SJeff Kirsher return -ENOMEM; 1907adfc5217SJeff Kirsher } 1908adfc5217SJeff Kirsher return 0; 1909adfc5217SJeff Kirsher } 1910adfc5217SJeff Kirsher 1911047fc566SBill Pemberton static int bcm_enet_shared_remove(struct platform_device *pdev) 1912adfc5217SJeff Kirsher { 1913adfc5217SJeff Kirsher struct resource *res; 1914adfc5217SJeff Kirsher 1915adfc5217SJeff Kirsher iounmap(bcm_enet_shared_base); 1916adfc5217SJeff Kirsher res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1917adfc5217SJeff Kirsher release_mem_region(res->start, resource_size(res)); 1918adfc5217SJeff Kirsher return 0; 1919adfc5217SJeff Kirsher } 1920adfc5217SJeff Kirsher 1921adfc5217SJeff Kirsher /* 1922adfc5217SJeff Kirsher * this "shared" driver is needed because both macs share a single 1923adfc5217SJeff Kirsher * address space 1924adfc5217SJeff Kirsher */ 1925adfc5217SJeff Kirsher struct platform_driver bcm63xx_enet_shared_driver = { 1926adfc5217SJeff Kirsher .probe = bcm_enet_shared_probe, 1927047fc566SBill Pemberton .remove = bcm_enet_shared_remove, 1928adfc5217SJeff Kirsher .driver = { 1929adfc5217SJeff Kirsher .name = "bcm63xx_enet_shared", 1930adfc5217SJeff Kirsher .owner = THIS_MODULE, 1931adfc5217SJeff Kirsher }, 1932adfc5217SJeff Kirsher }; 1933adfc5217SJeff Kirsher 1934adfc5217SJeff Kirsher /* 1935adfc5217SJeff Kirsher * entry point 1936adfc5217SJeff Kirsher */ 1937adfc5217SJeff Kirsher static int __init bcm_enet_init(void) 1938adfc5217SJeff Kirsher { 1939adfc5217SJeff Kirsher int ret; 1940adfc5217SJeff Kirsher 1941adfc5217SJeff Kirsher ret = platform_driver_register(&bcm63xx_enet_shared_driver); 1942adfc5217SJeff Kirsher if (ret) 1943adfc5217SJeff Kirsher return ret; 1944adfc5217SJeff Kirsher 1945adfc5217SJeff Kirsher ret = platform_driver_register(&bcm63xx_enet_driver); 1946adfc5217SJeff Kirsher if (ret) 1947adfc5217SJeff Kirsher platform_driver_unregister(&bcm63xx_enet_shared_driver); 1948adfc5217SJeff Kirsher 1949adfc5217SJeff Kirsher return ret; 1950adfc5217SJeff Kirsher } 1951adfc5217SJeff Kirsher 1952adfc5217SJeff Kirsher static void __exit bcm_enet_exit(void) 1953adfc5217SJeff Kirsher { 1954adfc5217SJeff Kirsher platform_driver_unregister(&bcm63xx_enet_driver); 1955adfc5217SJeff Kirsher platform_driver_unregister(&bcm63xx_enet_shared_driver); 1956adfc5217SJeff Kirsher } 1957adfc5217SJeff Kirsher 1958adfc5217SJeff Kirsher 1959adfc5217SJeff Kirsher module_init(bcm_enet_init); 1960adfc5217SJeff Kirsher module_exit(bcm_enet_exit); 1961adfc5217SJeff Kirsher 1962adfc5217SJeff Kirsher MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver"); 1963adfc5217SJeff Kirsher MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); 1964adfc5217SJeff Kirsher MODULE_LICENSE("GPL"); 1965