1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2e4f2379dSAlexey Brodkin /* 3e4f2379dSAlexey Brodkin * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com) 4e4f2379dSAlexey Brodkin * 5e4f2379dSAlexey Brodkin * Driver for the ARC EMAC 10100 (hardware revision 5) 6e4f2379dSAlexey Brodkin * 7e4f2379dSAlexey Brodkin * Contributors: 8e4f2379dSAlexey Brodkin * Amit Bhor 9e4f2379dSAlexey Brodkin * Sameer Dhavale 10e4f2379dSAlexey Brodkin * Vineet Gupta 11e4f2379dSAlexey Brodkin */ 12e4f2379dSAlexey Brodkin 13775dd682SBeniamino Galvani #include <linux/crc32.h> 14e4f2379dSAlexey Brodkin #include <linux/etherdevice.h> 15e4f2379dSAlexey Brodkin #include <linux/interrupt.h> 16e4f2379dSAlexey Brodkin #include <linux/io.h> 17e4f2379dSAlexey Brodkin #include <linux/module.h> 18e4f2379dSAlexey Brodkin #include <linux/of_address.h> 19e4f2379dSAlexey Brodkin #include <linux/of_irq.h> 20e4f2379dSAlexey Brodkin #include <linux/of_mdio.h> 21e4f2379dSAlexey Brodkin #include <linux/of_net.h> 22e4f2379dSAlexey Brodkin #include <linux/of_platform.h> 23e4f2379dSAlexey Brodkin 24e4f2379dSAlexey Brodkin #include "emac.h" 25e4f2379dSAlexey Brodkin 2678aa0975SAlexander Kochetkov static void arc_emac_restart(struct net_device *ndev); 2778aa0975SAlexander Kochetkov 28e4f2379dSAlexey Brodkin /** 2974dd40bcSBeniamino Galvani * arc_emac_tx_avail - Return the number of available slots in the tx ring. 3074dd40bcSBeniamino Galvani * @priv: Pointer to ARC EMAC private data structure. 3174dd40bcSBeniamino Galvani * 3274dd40bcSBeniamino Galvani * returns: the number of slots available for transmission in tx the ring. 3374dd40bcSBeniamino Galvani */ 3474dd40bcSBeniamino Galvani static inline int arc_emac_tx_avail(struct arc_emac_priv *priv) 3574dd40bcSBeniamino Galvani { 3674dd40bcSBeniamino Galvani return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM; 3774dd40bcSBeniamino Galvani } 3874dd40bcSBeniamino Galvani 3974dd40bcSBeniamino Galvani /** 40e4f2379dSAlexey Brodkin * arc_emac_adjust_link - Adjust the PHY link duplex. 41e4f2379dSAlexey Brodkin * @ndev: Pointer to the net_device structure. 42e4f2379dSAlexey Brodkin * 43e4f2379dSAlexey Brodkin * This function is called to change the duplex setting after auto negotiation 44e4f2379dSAlexey Brodkin * is done by the PHY. 45e4f2379dSAlexey Brodkin */ 46e4f2379dSAlexey Brodkin static void arc_emac_adjust_link(struct net_device *ndev) 47e4f2379dSAlexey Brodkin { 48e4f2379dSAlexey Brodkin struct arc_emac_priv *priv = netdev_priv(ndev); 4901dea536SPhilippe Reynes struct phy_device *phy_dev = ndev->phydev; 50e4f2379dSAlexey Brodkin unsigned int reg, state_changed = 0; 51e4f2379dSAlexey Brodkin 52e4f2379dSAlexey Brodkin if (priv->link != phy_dev->link) { 53e4f2379dSAlexey Brodkin priv->link = phy_dev->link; 54e4f2379dSAlexey Brodkin state_changed = 1; 55e4f2379dSAlexey Brodkin } 56e4f2379dSAlexey Brodkin 57e4f2379dSAlexey Brodkin if (priv->speed != phy_dev->speed) { 58e4f2379dSAlexey Brodkin priv->speed = phy_dev->speed; 59e4f2379dSAlexey Brodkin state_changed = 1; 606eacf311SRomain Perier if (priv->set_mac_speed) 616eacf311SRomain Perier priv->set_mac_speed(priv, priv->speed); 62e4f2379dSAlexey Brodkin } 63e4f2379dSAlexey Brodkin 64e4f2379dSAlexey Brodkin if (priv->duplex != phy_dev->duplex) { 65e4f2379dSAlexey Brodkin reg = arc_reg_get(priv, R_CTRL); 66e4f2379dSAlexey Brodkin 67663713ebSCaesar Wang if (phy_dev->duplex == DUPLEX_FULL) 68e4f2379dSAlexey Brodkin reg |= ENFL_MASK; 69e4f2379dSAlexey Brodkin else 70e4f2379dSAlexey Brodkin reg &= ~ENFL_MASK; 71e4f2379dSAlexey Brodkin 72e4f2379dSAlexey Brodkin arc_reg_set(priv, R_CTRL, reg); 73e4f2379dSAlexey Brodkin priv->duplex = phy_dev->duplex; 74e4f2379dSAlexey Brodkin state_changed = 1; 75e4f2379dSAlexey Brodkin } 76e4f2379dSAlexey Brodkin 77e4f2379dSAlexey Brodkin if (state_changed) 78e4f2379dSAlexey Brodkin phy_print_status(phy_dev); 79e4f2379dSAlexey Brodkin } 80e4f2379dSAlexey Brodkin 81e4f2379dSAlexey Brodkin /** 82e4f2379dSAlexey Brodkin * arc_emac_get_drvinfo - Get EMAC driver information. 83e4f2379dSAlexey Brodkin * @ndev: Pointer to net_device structure. 84e4f2379dSAlexey Brodkin * @info: Pointer to ethtool_drvinfo structure. 85e4f2379dSAlexey Brodkin * 86e4f2379dSAlexey Brodkin * This implements ethtool command for getting the driver information. 87e4f2379dSAlexey Brodkin * Issue "ethtool -i ethX" under linux prompt to execute this function. 88e4f2379dSAlexey Brodkin */ 89e4f2379dSAlexey Brodkin static void arc_emac_get_drvinfo(struct net_device *ndev, 90e4f2379dSAlexey Brodkin struct ethtool_drvinfo *info) 91e4f2379dSAlexey Brodkin { 9223d2d9a6SRomain Perier struct arc_emac_priv *priv = netdev_priv(ndev); 9323d2d9a6SRomain Perier 9423d2d9a6SRomain Perier strlcpy(info->driver, priv->drv_name, sizeof(info->driver)); 9523d2d9a6SRomain Perier strlcpy(info->version, priv->drv_version, sizeof(info->version)); 96e4f2379dSAlexey Brodkin } 97e4f2379dSAlexey Brodkin 98e4f2379dSAlexey Brodkin static const struct ethtool_ops arc_emac_ethtool_ops = { 99e4f2379dSAlexey Brodkin .get_drvinfo = arc_emac_get_drvinfo, 100e4f2379dSAlexey Brodkin .get_link = ethtool_op_get_link, 1014694e6e3SPhilippe Reynes .get_link_ksettings = phy_ethtool_get_link_ksettings, 1024694e6e3SPhilippe Reynes .set_link_ksettings = phy_ethtool_set_link_ksettings, 103e4f2379dSAlexey Brodkin }; 104e4f2379dSAlexey Brodkin 105e4f2379dSAlexey Brodkin #define FIRST_OR_LAST_MASK (FIRST_MASK | LAST_MASK) 106e4f2379dSAlexey Brodkin 107e4f2379dSAlexey Brodkin /** 108e4f2379dSAlexey Brodkin * arc_emac_tx_clean - clears processed by EMAC Tx BDs. 109e4f2379dSAlexey Brodkin * @ndev: Pointer to the network device. 110e4f2379dSAlexey Brodkin */ 111e4f2379dSAlexey Brodkin static void arc_emac_tx_clean(struct net_device *ndev) 112e4f2379dSAlexey Brodkin { 113e4f2379dSAlexey Brodkin struct arc_emac_priv *priv = netdev_priv(ndev); 114ff458f6fSTobias Klauser struct net_device_stats *stats = &ndev->stats; 115e4f2379dSAlexey Brodkin unsigned int i; 116e4f2379dSAlexey Brodkin 117e4f2379dSAlexey Brodkin for (i = 0; i < TX_BD_NUM; i++) { 118e4f2379dSAlexey Brodkin unsigned int *txbd_dirty = &priv->txbd_dirty; 119e4f2379dSAlexey Brodkin struct arc_emac_bd *txbd = &priv->txbd[*txbd_dirty]; 120e4f2379dSAlexey Brodkin struct buffer_state *tx_buff = &priv->tx_buff[*txbd_dirty]; 121e4f2379dSAlexey Brodkin struct sk_buff *skb = tx_buff->skb; 122e4f2379dSAlexey Brodkin unsigned int info = le32_to_cpu(txbd->info); 123e4f2379dSAlexey Brodkin 124c278c253SAlexander Kochetkov if ((info & FOR_EMAC) || !txbd->data || !skb) 125e4f2379dSAlexey Brodkin break; 126e4f2379dSAlexey Brodkin 127e4f2379dSAlexey Brodkin if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) { 128e4f2379dSAlexey Brodkin stats->tx_errors++; 129e4f2379dSAlexey Brodkin stats->tx_dropped++; 130e4f2379dSAlexey Brodkin 131e4f2379dSAlexey Brodkin if (info & DEFR) 132e4f2379dSAlexey Brodkin stats->tx_carrier_errors++; 133e4f2379dSAlexey Brodkin 134e4f2379dSAlexey Brodkin if (info & LTCL) 135e4f2379dSAlexey Brodkin stats->collisions++; 136e4f2379dSAlexey Brodkin 137e4f2379dSAlexey Brodkin if (info & UFLO) 138e4f2379dSAlexey Brodkin stats->tx_fifo_errors++; 139e4f2379dSAlexey Brodkin } else if (likely(info & FIRST_OR_LAST_MASK)) { 140e4f2379dSAlexey Brodkin stats->tx_packets++; 141e4f2379dSAlexey Brodkin stats->tx_bytes += skb->len; 142e4f2379dSAlexey Brodkin } 143e4f2379dSAlexey Brodkin 144a4a1139bSAlexey Brodkin dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr), 145a4a1139bSAlexey Brodkin dma_unmap_len(tx_buff, len), DMA_TO_DEVICE); 146e4f2379dSAlexey Brodkin 147e4f2379dSAlexey Brodkin /* return the sk_buff to system */ 14867633e78SYang Wei dev_consume_skb_irq(skb); 149e4f2379dSAlexey Brodkin 150e4f2379dSAlexey Brodkin txbd->data = 0; 151e4f2379dSAlexey Brodkin txbd->info = 0; 152c278c253SAlexander Kochetkov tx_buff->skb = NULL; 153e4f2379dSAlexey Brodkin 15427082ee1SVineet Gupta *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; 155e4f2379dSAlexey Brodkin } 15674dd40bcSBeniamino Galvani 15774dd40bcSBeniamino Galvani /* Ensure that txbd_dirty is visible to tx() before checking 15874dd40bcSBeniamino Galvani * for queue stopped. 15974dd40bcSBeniamino Galvani */ 16074dd40bcSBeniamino Galvani smp_mb(); 16174dd40bcSBeniamino Galvani 16274dd40bcSBeniamino Galvani if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv)) 16374dd40bcSBeniamino Galvani netif_wake_queue(ndev); 164e4f2379dSAlexey Brodkin } 165e4f2379dSAlexey Brodkin 166e4f2379dSAlexey Brodkin /** 167e4f2379dSAlexey Brodkin * arc_emac_rx - processing of Rx packets. 168e4f2379dSAlexey Brodkin * @ndev: Pointer to the network device. 169e4f2379dSAlexey Brodkin * @budget: How many BDs to process on 1 call. 170e4f2379dSAlexey Brodkin * 171e4f2379dSAlexey Brodkin * returns: Number of processed BDs 172e4f2379dSAlexey Brodkin * 173e4f2379dSAlexey Brodkin * Iterate through Rx BDs and deliver received packages to upper layer. 174e4f2379dSAlexey Brodkin */ 175e4f2379dSAlexey Brodkin static int arc_emac_rx(struct net_device *ndev, int budget) 176e4f2379dSAlexey Brodkin { 177e4f2379dSAlexey Brodkin struct arc_emac_priv *priv = netdev_priv(ndev); 178e4f2379dSAlexey Brodkin unsigned int work_done; 179e4f2379dSAlexey Brodkin 1809cff866eSAlexey Brodkin for (work_done = 0; work_done < budget; work_done++) { 181e4f2379dSAlexey Brodkin unsigned int *last_rx_bd = &priv->last_rx_bd; 182ff458f6fSTobias Klauser struct net_device_stats *stats = &ndev->stats; 183e4f2379dSAlexey Brodkin struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; 184e4f2379dSAlexey Brodkin struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; 185e4f2379dSAlexey Brodkin unsigned int pktlen, info = le32_to_cpu(rxbd->info); 186e4f2379dSAlexey Brodkin struct sk_buff *skb; 187e4f2379dSAlexey Brodkin dma_addr_t addr; 188e4f2379dSAlexey Brodkin 189e4f2379dSAlexey Brodkin if (unlikely((info & OWN_MASK) == FOR_EMAC)) 190e4f2379dSAlexey Brodkin break; 191e4f2379dSAlexey Brodkin 192e4f2379dSAlexey Brodkin /* Make a note that we saw a packet at this BD. 193e4f2379dSAlexey Brodkin * So next time, driver starts from this + 1 194e4f2379dSAlexey Brodkin */ 195e4f2379dSAlexey Brodkin *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; 196e4f2379dSAlexey Brodkin 197e4f2379dSAlexey Brodkin if (unlikely((info & FIRST_OR_LAST_MASK) != 198e4f2379dSAlexey Brodkin FIRST_OR_LAST_MASK)) { 199e4f2379dSAlexey Brodkin /* We pre-allocate buffers of MTU size so incoming 200e4f2379dSAlexey Brodkin * packets won't be split/chained. 201e4f2379dSAlexey Brodkin */ 202e4f2379dSAlexey Brodkin if (net_ratelimit()) 203e4f2379dSAlexey Brodkin netdev_err(ndev, "incomplete packet received\n"); 204e4f2379dSAlexey Brodkin 205e4f2379dSAlexey Brodkin /* Return ownership to EMAC */ 206a4a1139bSAlexey Brodkin rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); 207e4f2379dSAlexey Brodkin stats->rx_errors++; 208e4f2379dSAlexey Brodkin stats->rx_length_errors++; 209e4f2379dSAlexey Brodkin continue; 210e4f2379dSAlexey Brodkin } 211e4f2379dSAlexey Brodkin 212e688822dSAlexander Kochetkov /* Prepare the BD for next cycle. netif_receive_skb() 213e688822dSAlexander Kochetkov * only if new skb was allocated and mapped to avoid holes 214e688822dSAlexander Kochetkov * in the RX fifo. 215e688822dSAlexander Kochetkov */ 216e688822dSAlexander Kochetkov skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE); 217e688822dSAlexander Kochetkov if (unlikely(!skb)) { 218e688822dSAlexander Kochetkov if (net_ratelimit()) 219e688822dSAlexander Kochetkov netdev_err(ndev, "cannot allocate skb\n"); 220e688822dSAlexander Kochetkov /* Return ownership to EMAC */ 221e688822dSAlexander Kochetkov rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); 222e4f2379dSAlexey Brodkin stats->rx_errors++; 223e4f2379dSAlexey Brodkin stats->rx_dropped++; 224e4f2379dSAlexey Brodkin continue; 225e4f2379dSAlexey Brodkin } 226e4f2379dSAlexey Brodkin 227e688822dSAlexander Kochetkov addr = dma_map_single(&ndev->dev, (void *)skb->data, 228a4a1139bSAlexey Brodkin EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); 229e4f2379dSAlexey Brodkin if (dma_mapping_error(&ndev->dev, addr)) { 230e4f2379dSAlexey Brodkin if (net_ratelimit()) 231e688822dSAlexander Kochetkov netdev_err(ndev, "cannot map dma buffer\n"); 232e688822dSAlexander Kochetkov dev_kfree_skb(skb); 233e688822dSAlexander Kochetkov /* Return ownership to EMAC */ 234e688822dSAlexander Kochetkov rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); 235e4f2379dSAlexey Brodkin stats->rx_errors++; 236e688822dSAlexander Kochetkov stats->rx_dropped++; 237e4f2379dSAlexey Brodkin continue; 238e4f2379dSAlexey Brodkin } 239e688822dSAlexander Kochetkov 240e688822dSAlexander Kochetkov /* unmap previosly mapped skb */ 241e688822dSAlexander Kochetkov dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), 242e688822dSAlexander Kochetkov dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); 243e688822dSAlexander Kochetkov 244e688822dSAlexander Kochetkov pktlen = info & LEN_MASK; 245e688822dSAlexander Kochetkov stats->rx_packets++; 246e688822dSAlexander Kochetkov stats->rx_bytes += pktlen; 247e688822dSAlexander Kochetkov skb_put(rx_buff->skb, pktlen); 248e688822dSAlexander Kochetkov rx_buff->skb->dev = ndev; 249e688822dSAlexander Kochetkov rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev); 250e688822dSAlexander Kochetkov 251e688822dSAlexander Kochetkov netif_receive_skb(rx_buff->skb); 252e688822dSAlexander Kochetkov 253e688822dSAlexander Kochetkov rx_buff->skb = skb; 254a4a1139bSAlexey Brodkin dma_unmap_addr_set(rx_buff, addr, addr); 255a4a1139bSAlexey Brodkin dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); 256e4f2379dSAlexey Brodkin 257a4a1139bSAlexey Brodkin rxbd->data = cpu_to_le32(addr); 258e4f2379dSAlexey Brodkin 259e4f2379dSAlexey Brodkin /* Make sure pointer to data buffer is set */ 260e4f2379dSAlexey Brodkin wmb(); 261e4f2379dSAlexey Brodkin 262e4f2379dSAlexey Brodkin /* Return ownership to EMAC */ 263a4a1139bSAlexey Brodkin rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); 264e4f2379dSAlexey Brodkin } 265e4f2379dSAlexey Brodkin 266e4f2379dSAlexey Brodkin return work_done; 267e4f2379dSAlexey Brodkin } 268e4f2379dSAlexey Brodkin 269e4f2379dSAlexey Brodkin /** 27078aa0975SAlexander Kochetkov * arc_emac_rx_miss_handle - handle R_MISS register 27178aa0975SAlexander Kochetkov * @ndev: Pointer to the net_device structure. 27278aa0975SAlexander Kochetkov */ 27378aa0975SAlexander Kochetkov static void arc_emac_rx_miss_handle(struct net_device *ndev) 27478aa0975SAlexander Kochetkov { 27578aa0975SAlexander Kochetkov struct arc_emac_priv *priv = netdev_priv(ndev); 27678aa0975SAlexander Kochetkov struct net_device_stats *stats = &ndev->stats; 27778aa0975SAlexander Kochetkov unsigned int miss; 27878aa0975SAlexander Kochetkov 27978aa0975SAlexander Kochetkov miss = arc_reg_get(priv, R_MISS); 28078aa0975SAlexander Kochetkov if (miss) { 28178aa0975SAlexander Kochetkov stats->rx_errors += miss; 28278aa0975SAlexander Kochetkov stats->rx_missed_errors += miss; 28378aa0975SAlexander Kochetkov priv->rx_missed_errors += miss; 28478aa0975SAlexander Kochetkov } 28578aa0975SAlexander Kochetkov } 28678aa0975SAlexander Kochetkov 28778aa0975SAlexander Kochetkov /** 28878aa0975SAlexander Kochetkov * arc_emac_rx_stall_check - check RX stall 28978aa0975SAlexander Kochetkov * @ndev: Pointer to the net_device structure. 29078aa0975SAlexander Kochetkov * @budget: How many BDs requested to process on 1 call. 29178aa0975SAlexander Kochetkov * @work_done: How many BDs processed 29278aa0975SAlexander Kochetkov * 29378aa0975SAlexander Kochetkov * Under certain conditions EMAC stop reception of incoming packets and 29478aa0975SAlexander Kochetkov * continuously increment R_MISS register instead of saving data into 29578aa0975SAlexander Kochetkov * provided buffer. This function detect that condition and restart 29678aa0975SAlexander Kochetkov * EMAC. 29778aa0975SAlexander Kochetkov */ 29878aa0975SAlexander Kochetkov static void arc_emac_rx_stall_check(struct net_device *ndev, 29978aa0975SAlexander Kochetkov int budget, unsigned int work_done) 30078aa0975SAlexander Kochetkov { 30178aa0975SAlexander Kochetkov struct arc_emac_priv *priv = netdev_priv(ndev); 30278aa0975SAlexander Kochetkov struct arc_emac_bd *rxbd; 30378aa0975SAlexander Kochetkov 30478aa0975SAlexander Kochetkov if (work_done) 30578aa0975SAlexander Kochetkov priv->rx_missed_errors = 0; 30678aa0975SAlexander Kochetkov 30778aa0975SAlexander Kochetkov if (priv->rx_missed_errors && budget) { 30878aa0975SAlexander Kochetkov rxbd = &priv->rxbd[priv->last_rx_bd]; 30978aa0975SAlexander Kochetkov if (le32_to_cpu(rxbd->info) & FOR_EMAC) { 31078aa0975SAlexander Kochetkov arc_emac_restart(ndev); 31178aa0975SAlexander Kochetkov priv->rx_missed_errors = 0; 31278aa0975SAlexander Kochetkov } 31378aa0975SAlexander Kochetkov } 31478aa0975SAlexander Kochetkov } 31578aa0975SAlexander Kochetkov 31678aa0975SAlexander Kochetkov /** 317e4f2379dSAlexey Brodkin * arc_emac_poll - NAPI poll handler. 318e4f2379dSAlexey Brodkin * @napi: Pointer to napi_struct structure. 319e4f2379dSAlexey Brodkin * @budget: How many BDs to process on 1 call. 320e4f2379dSAlexey Brodkin * 321e4f2379dSAlexey Brodkin * returns: Number of processed BDs 322e4f2379dSAlexey Brodkin */ 323e4f2379dSAlexey Brodkin static int arc_emac_poll(struct napi_struct *napi, int budget) 324e4f2379dSAlexey Brodkin { 325e4f2379dSAlexey Brodkin struct net_device *ndev = napi->dev; 326e4f2379dSAlexey Brodkin struct arc_emac_priv *priv = netdev_priv(ndev); 327e4f2379dSAlexey Brodkin unsigned int work_done; 328e4f2379dSAlexey Brodkin 329e4f2379dSAlexey Brodkin arc_emac_tx_clean(ndev); 33078aa0975SAlexander Kochetkov arc_emac_rx_miss_handle(ndev); 331e4f2379dSAlexey Brodkin 332e4f2379dSAlexey Brodkin work_done = arc_emac_rx(ndev, budget); 333e4f2379dSAlexey Brodkin if (work_done < budget) { 3346ad20165SEric Dumazet napi_complete_done(napi, work_done); 3357ce7679dSBeniamino Galvani arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); 336e4f2379dSAlexey Brodkin } 337e4f2379dSAlexey Brodkin 33878aa0975SAlexander Kochetkov arc_emac_rx_stall_check(ndev, budget, work_done); 33978aa0975SAlexander Kochetkov 340e4f2379dSAlexey Brodkin return work_done; 341e4f2379dSAlexey Brodkin } 342e4f2379dSAlexey Brodkin 343e4f2379dSAlexey Brodkin /** 344e4f2379dSAlexey Brodkin * arc_emac_intr - Global interrupt handler for EMAC. 345e4f2379dSAlexey Brodkin * @irq: irq number. 346e4f2379dSAlexey Brodkin * @dev_instance: device instance. 347e4f2379dSAlexey Brodkin * 348e4f2379dSAlexey Brodkin * returns: IRQ_HANDLED for all cases. 349e4f2379dSAlexey Brodkin * 350e4f2379dSAlexey Brodkin * ARC EMAC has only 1 interrupt line, and depending on bits raised in 351e4f2379dSAlexey Brodkin * STATUS register we may tell what is a reason for interrupt to fire. 352e4f2379dSAlexey Brodkin */ 353e4f2379dSAlexey Brodkin static irqreturn_t arc_emac_intr(int irq, void *dev_instance) 354e4f2379dSAlexey Brodkin { 355e4f2379dSAlexey Brodkin struct net_device *ndev = dev_instance; 356e4f2379dSAlexey Brodkin struct arc_emac_priv *priv = netdev_priv(ndev); 357ff458f6fSTobias Klauser struct net_device_stats *stats = &ndev->stats; 358e4f2379dSAlexey Brodkin unsigned int status; 359e4f2379dSAlexey Brodkin 360e4f2379dSAlexey Brodkin status = arc_reg_get(priv, R_STATUS); 361e4f2379dSAlexey Brodkin status &= ~MDIO_MASK; 362e4f2379dSAlexey Brodkin 363e4f2379dSAlexey Brodkin /* Reset all flags except "MDIO complete" */ 364e4f2379dSAlexey Brodkin arc_reg_set(priv, R_STATUS, status); 365e4f2379dSAlexey Brodkin 3667ce7679dSBeniamino Galvani if (status & (RXINT_MASK | TXINT_MASK)) { 367e4f2379dSAlexey Brodkin if (likely(napi_schedule_prep(&priv->napi))) { 3687ce7679dSBeniamino Galvani arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); 369e4f2379dSAlexey Brodkin __napi_schedule(&priv->napi); 370e4f2379dSAlexey Brodkin } 371e4f2379dSAlexey Brodkin } 372e4f2379dSAlexey Brodkin 373e4f2379dSAlexey Brodkin if (status & ERR_MASK) { 374e4f2379dSAlexey Brodkin /* MSER/RXCR/RXFR/RXFL interrupt fires on corresponding 375e4f2379dSAlexey Brodkin * 8-bit error counter overrun. 376e4f2379dSAlexey Brodkin */ 377e4f2379dSAlexey Brodkin 378e4f2379dSAlexey Brodkin if (status & MSER_MASK) { 379e4f2379dSAlexey Brodkin stats->rx_missed_errors += 0x100; 380e4f2379dSAlexey Brodkin stats->rx_errors += 0x100; 38178aa0975SAlexander Kochetkov priv->rx_missed_errors += 0x100; 38278aa0975SAlexander Kochetkov napi_schedule(&priv->napi); 383e4f2379dSAlexey Brodkin } 384e4f2379dSAlexey Brodkin 385e4f2379dSAlexey Brodkin if (status & RXCR_MASK) { 386e4f2379dSAlexey Brodkin stats->rx_crc_errors += 0x100; 387e4f2379dSAlexey Brodkin stats->rx_errors += 0x100; 388e4f2379dSAlexey Brodkin } 389e4f2379dSAlexey Brodkin 390e4f2379dSAlexey Brodkin if (status & RXFR_MASK) { 391e4f2379dSAlexey Brodkin stats->rx_frame_errors += 0x100; 392e4f2379dSAlexey Brodkin stats->rx_errors += 0x100; 393e4f2379dSAlexey Brodkin } 394e4f2379dSAlexey Brodkin 395e4f2379dSAlexey Brodkin if (status & RXFL_MASK) { 396e4f2379dSAlexey Brodkin stats->rx_over_errors += 0x100; 397e4f2379dSAlexey Brodkin stats->rx_errors += 0x100; 398e4f2379dSAlexey Brodkin } 399e4f2379dSAlexey Brodkin } 400e4f2379dSAlexey Brodkin 401e4f2379dSAlexey Brodkin return IRQ_HANDLED; 402e4f2379dSAlexey Brodkin } 403e4f2379dSAlexey Brodkin 4045a45e57aSBeniamino Galvani #ifdef CONFIG_NET_POLL_CONTROLLER 4055a45e57aSBeniamino Galvani static void arc_emac_poll_controller(struct net_device *dev) 4065a45e57aSBeniamino Galvani { 4075a45e57aSBeniamino Galvani disable_irq(dev->irq); 4085a45e57aSBeniamino Galvani arc_emac_intr(dev->irq, dev); 4095a45e57aSBeniamino Galvani enable_irq(dev->irq); 4105a45e57aSBeniamino Galvani } 4115a45e57aSBeniamino Galvani #endif 4125a45e57aSBeniamino Galvani 413e4f2379dSAlexey Brodkin /** 414e4f2379dSAlexey Brodkin * arc_emac_open - Open the network device. 415e4f2379dSAlexey Brodkin * @ndev: Pointer to the network device. 416e4f2379dSAlexey Brodkin * 417e4f2379dSAlexey Brodkin * returns: 0, on success or non-zero error value on failure. 418e4f2379dSAlexey Brodkin * 419e4f2379dSAlexey Brodkin * This function sets the MAC address, requests and enables an IRQ 420e4f2379dSAlexey Brodkin * for the EMAC device and starts the Tx queue. 421e4f2379dSAlexey Brodkin * It also connects to the phy device. 422e4f2379dSAlexey Brodkin */ 423e4f2379dSAlexey Brodkin static int arc_emac_open(struct net_device *ndev) 424e4f2379dSAlexey Brodkin { 425e4f2379dSAlexey Brodkin struct arc_emac_priv *priv = netdev_priv(ndev); 42601dea536SPhilippe Reynes struct phy_device *phy_dev = ndev->phydev; 427e4f2379dSAlexey Brodkin int i; 428e4f2379dSAlexey Brodkin 429e4f2379dSAlexey Brodkin phy_dev->autoneg = AUTONEG_ENABLE; 430e4f2379dSAlexey Brodkin phy_dev->speed = 0; 431e4f2379dSAlexey Brodkin phy_dev->duplex = 0; 4323c1bcc86SAndrew Lunn linkmode_and(phy_dev->advertising, phy_dev->advertising, 4333c1bcc86SAndrew Lunn phy_dev->supported); 434e4f2379dSAlexey Brodkin 435a4a1139bSAlexey Brodkin priv->last_rx_bd = 0; 436a4a1139bSAlexey Brodkin 437e4f2379dSAlexey Brodkin /* Allocate and set buffers for Rx BD's */ 438e4f2379dSAlexey Brodkin for (i = 0; i < RX_BD_NUM; i++) { 439a4a1139bSAlexey Brodkin dma_addr_t addr; 440a4a1139bSAlexey Brodkin unsigned int *last_rx_bd = &priv->last_rx_bd; 441a4a1139bSAlexey Brodkin struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; 442a4a1139bSAlexey Brodkin struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; 443a4a1139bSAlexey Brodkin 444a4a1139bSAlexey Brodkin rx_buff->skb = netdev_alloc_skb_ip_align(ndev, 445a4a1139bSAlexey Brodkin EMAC_BUFFER_SIZE); 446a4a1139bSAlexey Brodkin if (unlikely(!rx_buff->skb)) 447e4f2379dSAlexey Brodkin return -ENOMEM; 448e4f2379dSAlexey Brodkin 449a4a1139bSAlexey Brodkin addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, 450a4a1139bSAlexey Brodkin EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); 451a4a1139bSAlexey Brodkin if (dma_mapping_error(&ndev->dev, addr)) { 452a4a1139bSAlexey Brodkin netdev_err(ndev, "cannot dma map\n"); 453a4a1139bSAlexey Brodkin dev_kfree_skb(rx_buff->skb); 454a4a1139bSAlexey Brodkin return -ENOMEM; 455a4a1139bSAlexey Brodkin } 456a4a1139bSAlexey Brodkin dma_unmap_addr_set(rx_buff, addr, addr); 457a4a1139bSAlexey Brodkin dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); 458a4a1139bSAlexey Brodkin 459a4a1139bSAlexey Brodkin rxbd->data = cpu_to_le32(addr); 460e4f2379dSAlexey Brodkin 461e4f2379dSAlexey Brodkin /* Make sure pointer to data buffer is set */ 462e4f2379dSAlexey Brodkin wmb(); 463e4f2379dSAlexey Brodkin 464a4a1139bSAlexey Brodkin /* Return ownership to EMAC */ 465a4a1139bSAlexey Brodkin rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); 466e4f2379dSAlexey Brodkin 467a4a1139bSAlexey Brodkin *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; 468a4a1139bSAlexey Brodkin } 469e4f2379dSAlexey Brodkin 47099f93a15SAlexander Kochetkov priv->txbd_curr = 0; 47199f93a15SAlexander Kochetkov priv->txbd_dirty = 0; 47299f93a15SAlexander Kochetkov 473e4f2379dSAlexey Brodkin /* Clean Tx BD's */ 474e4f2379dSAlexey Brodkin memset(priv->txbd, 0, TX_RING_SZ); 475e4f2379dSAlexey Brodkin 476e4f2379dSAlexey Brodkin /* Initialize logical address filter */ 477e4f2379dSAlexey Brodkin arc_reg_set(priv, R_LAFL, 0); 478e4f2379dSAlexey Brodkin arc_reg_set(priv, R_LAFH, 0); 479e4f2379dSAlexey Brodkin 480e4f2379dSAlexey Brodkin /* Set BD ring pointers for device side */ 481e4f2379dSAlexey Brodkin arc_reg_set(priv, R_RX_RING, (unsigned int)priv->rxbd_dma); 482e4f2379dSAlexey Brodkin arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); 483e4f2379dSAlexey Brodkin 484e4f2379dSAlexey Brodkin /* Enable interrupts */ 4857ce7679dSBeniamino Galvani arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); 486e4f2379dSAlexey Brodkin 487e4f2379dSAlexey Brodkin /* Set CONTROL */ 488e4f2379dSAlexey Brodkin arc_reg_set(priv, R_CTRL, 489e4f2379dSAlexey Brodkin (RX_BD_NUM << 24) | /* RX BD table length */ 490e4f2379dSAlexey Brodkin (TX_BD_NUM << 16) | /* TX BD table length */ 491e4f2379dSAlexey Brodkin TXRN_MASK | RXRN_MASK); 492e4f2379dSAlexey Brodkin 493e4f2379dSAlexey Brodkin napi_enable(&priv->napi); 494e4f2379dSAlexey Brodkin 495e4f2379dSAlexey Brodkin /* Enable EMAC */ 496e4f2379dSAlexey Brodkin arc_reg_or(priv, R_CTRL, EN_MASK); 497e4f2379dSAlexey Brodkin 498b18b7453SAlexander Kochetkov phy_start(ndev->phydev); 499e4f2379dSAlexey Brodkin 500e4f2379dSAlexey Brodkin netif_start_queue(ndev); 501e4f2379dSAlexey Brodkin 502e4f2379dSAlexey Brodkin return 0; 503e4f2379dSAlexey Brodkin } 504e4f2379dSAlexey Brodkin 505e4f2379dSAlexey Brodkin /** 506775dd682SBeniamino Galvani * arc_emac_set_rx_mode - Change the receive filtering mode. 507775dd682SBeniamino Galvani * @ndev: Pointer to the network device. 508775dd682SBeniamino Galvani * 509775dd682SBeniamino Galvani * This function enables/disables promiscuous or all-multicast mode 510775dd682SBeniamino Galvani * and updates the multicast filtering list of the network device. 511775dd682SBeniamino Galvani */ 512775dd682SBeniamino Galvani static void arc_emac_set_rx_mode(struct net_device *ndev) 513775dd682SBeniamino Galvani { 514775dd682SBeniamino Galvani struct arc_emac_priv *priv = netdev_priv(ndev); 515775dd682SBeniamino Galvani 516775dd682SBeniamino Galvani if (ndev->flags & IFF_PROMISC) { 517775dd682SBeniamino Galvani arc_reg_or(priv, R_CTRL, PROM_MASK); 518775dd682SBeniamino Galvani } else { 519775dd682SBeniamino Galvani arc_reg_clr(priv, R_CTRL, PROM_MASK); 520775dd682SBeniamino Galvani 521775dd682SBeniamino Galvani if (ndev->flags & IFF_ALLMULTI) { 522775dd682SBeniamino Galvani arc_reg_set(priv, R_LAFL, ~0); 523775dd682SBeniamino Galvani arc_reg_set(priv, R_LAFH, ~0); 524d0e3f65bSAlexander Kochetkov } else if (ndev->flags & IFF_MULTICAST) { 525775dd682SBeniamino Galvani struct netdev_hw_addr *ha; 526775dd682SBeniamino Galvani unsigned int filter[2] = { 0, 0 }; 527775dd682SBeniamino Galvani int bit; 528775dd682SBeniamino Galvani 529775dd682SBeniamino Galvani netdev_for_each_mc_addr(ha, ndev) { 530775dd682SBeniamino Galvani bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26; 531775dd682SBeniamino Galvani filter[bit >> 5] |= 1 << (bit & 31); 532775dd682SBeniamino Galvani } 533775dd682SBeniamino Galvani 534775dd682SBeniamino Galvani arc_reg_set(priv, R_LAFL, filter[0]); 535775dd682SBeniamino Galvani arc_reg_set(priv, R_LAFH, filter[1]); 536d0e3f65bSAlexander Kochetkov } else { 537d0e3f65bSAlexander Kochetkov arc_reg_set(priv, R_LAFL, 0); 538d0e3f65bSAlexander Kochetkov arc_reg_set(priv, R_LAFH, 0); 539775dd682SBeniamino Galvani } 540775dd682SBeniamino Galvani } 541775dd682SBeniamino Galvani } 542775dd682SBeniamino Galvani 543775dd682SBeniamino Galvani /** 544b530b164SAlexander Kochetkov * arc_free_tx_queue - free skb from tx queue 545b530b164SAlexander Kochetkov * @ndev: Pointer to the network device. 546b530b164SAlexander Kochetkov * 547b530b164SAlexander Kochetkov * This function must be called while EMAC disable 548b530b164SAlexander Kochetkov */ 549b530b164SAlexander Kochetkov static void arc_free_tx_queue(struct net_device *ndev) 550b530b164SAlexander Kochetkov { 551b530b164SAlexander Kochetkov struct arc_emac_priv *priv = netdev_priv(ndev); 552b530b164SAlexander Kochetkov unsigned int i; 553b530b164SAlexander Kochetkov 554b530b164SAlexander Kochetkov for (i = 0; i < TX_BD_NUM; i++) { 555b530b164SAlexander Kochetkov struct arc_emac_bd *txbd = &priv->txbd[i]; 556b530b164SAlexander Kochetkov struct buffer_state *tx_buff = &priv->tx_buff[i]; 557b530b164SAlexander Kochetkov 558b530b164SAlexander Kochetkov if (tx_buff->skb) { 559663713ebSCaesar Wang dma_unmap_single(&ndev->dev, 560663713ebSCaesar Wang dma_unmap_addr(tx_buff, addr), 561663713ebSCaesar Wang dma_unmap_len(tx_buff, len), 562663713ebSCaesar Wang DMA_TO_DEVICE); 563b530b164SAlexander Kochetkov 564b530b164SAlexander Kochetkov /* return the sk_buff to system */ 565b530b164SAlexander Kochetkov dev_kfree_skb_irq(tx_buff->skb); 566b530b164SAlexander Kochetkov } 567b530b164SAlexander Kochetkov 568b530b164SAlexander Kochetkov txbd->info = 0; 569b530b164SAlexander Kochetkov txbd->data = 0; 570b530b164SAlexander Kochetkov tx_buff->skb = NULL; 571b530b164SAlexander Kochetkov } 572b530b164SAlexander Kochetkov } 573b530b164SAlexander Kochetkov 574b530b164SAlexander Kochetkov /** 575b530b164SAlexander Kochetkov * arc_free_rx_queue - free skb from rx queue 576b530b164SAlexander Kochetkov * @ndev: Pointer to the network device. 577b530b164SAlexander Kochetkov * 578b530b164SAlexander Kochetkov * This function must be called while EMAC disable 579b530b164SAlexander Kochetkov */ 580b530b164SAlexander Kochetkov static void arc_free_rx_queue(struct net_device *ndev) 581b530b164SAlexander Kochetkov { 582b530b164SAlexander Kochetkov struct arc_emac_priv *priv = netdev_priv(ndev); 583b530b164SAlexander Kochetkov unsigned int i; 584b530b164SAlexander Kochetkov 585b530b164SAlexander Kochetkov for (i = 0; i < RX_BD_NUM; i++) { 586b530b164SAlexander Kochetkov struct arc_emac_bd *rxbd = &priv->rxbd[i]; 587b530b164SAlexander Kochetkov struct buffer_state *rx_buff = &priv->rx_buff[i]; 588b530b164SAlexander Kochetkov 589b530b164SAlexander Kochetkov if (rx_buff->skb) { 590663713ebSCaesar Wang dma_unmap_single(&ndev->dev, 591663713ebSCaesar Wang dma_unmap_addr(rx_buff, addr), 592663713ebSCaesar Wang dma_unmap_len(rx_buff, len), 593663713ebSCaesar Wang DMA_FROM_DEVICE); 594b530b164SAlexander Kochetkov 595b530b164SAlexander Kochetkov /* return the sk_buff to system */ 596b530b164SAlexander Kochetkov dev_kfree_skb_irq(rx_buff->skb); 597b530b164SAlexander Kochetkov } 598b530b164SAlexander Kochetkov 599b530b164SAlexander Kochetkov rxbd->info = 0; 600b530b164SAlexander Kochetkov rxbd->data = 0; 601b530b164SAlexander Kochetkov rx_buff->skb = NULL; 602b530b164SAlexander Kochetkov } 603b530b164SAlexander Kochetkov } 604b530b164SAlexander Kochetkov 605b530b164SAlexander Kochetkov /** 606e4f2379dSAlexey Brodkin * arc_emac_stop - Close the network device. 607e4f2379dSAlexey Brodkin * @ndev: Pointer to the network device. 608e4f2379dSAlexey Brodkin * 609e4f2379dSAlexey Brodkin * This function stops the Tx queue, disables interrupts and frees the IRQ for 610e4f2379dSAlexey Brodkin * the EMAC device. 611e4f2379dSAlexey Brodkin * It also disconnects the PHY device associated with the EMAC device. 612e4f2379dSAlexey Brodkin */ 613e4f2379dSAlexey Brodkin static int arc_emac_stop(struct net_device *ndev) 614e4f2379dSAlexey Brodkin { 615e4f2379dSAlexey Brodkin struct arc_emac_priv *priv = netdev_priv(ndev); 616e4f2379dSAlexey Brodkin 617e4f2379dSAlexey Brodkin napi_disable(&priv->napi); 618e4f2379dSAlexey Brodkin netif_stop_queue(ndev); 619e4f2379dSAlexey Brodkin 620b18b7453SAlexander Kochetkov phy_stop(ndev->phydev); 621b18b7453SAlexander Kochetkov 622e4f2379dSAlexey Brodkin /* Disable interrupts */ 6237ce7679dSBeniamino Galvani arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); 624e4f2379dSAlexey Brodkin 625e4f2379dSAlexey Brodkin /* Disable EMAC */ 626e4f2379dSAlexey Brodkin arc_reg_clr(priv, R_CTRL, EN_MASK); 627e4f2379dSAlexey Brodkin 628b530b164SAlexander Kochetkov /* Return the sk_buff to system */ 629b530b164SAlexander Kochetkov arc_free_tx_queue(ndev); 630b530b164SAlexander Kochetkov arc_free_rx_queue(ndev); 631b530b164SAlexander Kochetkov 632e4f2379dSAlexey Brodkin return 0; 633e4f2379dSAlexey Brodkin } 634e4f2379dSAlexey Brodkin 635e4f2379dSAlexey Brodkin /** 636e4f2379dSAlexey Brodkin * arc_emac_stats - Get system network statistics. 637e4f2379dSAlexey Brodkin * @ndev: Pointer to net_device structure. 638e4f2379dSAlexey Brodkin * 639e4f2379dSAlexey Brodkin * Returns the address of the device statistics structure. 640e4f2379dSAlexey Brodkin * Statistics are updated in interrupt handler. 641e4f2379dSAlexey Brodkin */ 642e4f2379dSAlexey Brodkin static struct net_device_stats *arc_emac_stats(struct net_device *ndev) 643e4f2379dSAlexey Brodkin { 644e4f2379dSAlexey Brodkin struct arc_emac_priv *priv = netdev_priv(ndev); 645ff458f6fSTobias Klauser struct net_device_stats *stats = &ndev->stats; 646e4f2379dSAlexey Brodkin unsigned long miss, rxerr; 647e4f2379dSAlexey Brodkin u8 rxcrc, rxfram, rxoflow; 648e4f2379dSAlexey Brodkin 649e4f2379dSAlexey Brodkin rxerr = arc_reg_get(priv, R_RXERR); 650e4f2379dSAlexey Brodkin miss = arc_reg_get(priv, R_MISS); 651e4f2379dSAlexey Brodkin 652e4f2379dSAlexey Brodkin rxcrc = rxerr; 653e4f2379dSAlexey Brodkin rxfram = rxerr >> 8; 654e4f2379dSAlexey Brodkin rxoflow = rxerr >> 16; 655e4f2379dSAlexey Brodkin 656e4f2379dSAlexey Brodkin stats->rx_errors += miss; 657e4f2379dSAlexey Brodkin stats->rx_errors += rxcrc + rxfram + rxoflow; 658e4f2379dSAlexey Brodkin 659e4f2379dSAlexey Brodkin stats->rx_over_errors += rxoflow; 660e4f2379dSAlexey Brodkin stats->rx_frame_errors += rxfram; 661e4f2379dSAlexey Brodkin stats->rx_crc_errors += rxcrc; 662e4f2379dSAlexey Brodkin stats->rx_missed_errors += miss; 663e4f2379dSAlexey Brodkin 664e4f2379dSAlexey Brodkin return stats; 665e4f2379dSAlexey Brodkin } 666e4f2379dSAlexey Brodkin 667e4f2379dSAlexey Brodkin /** 668e4f2379dSAlexey Brodkin * arc_emac_tx - Starts the data transmission. 669e4f2379dSAlexey Brodkin * @skb: sk_buff pointer that contains data to be Transmitted. 670e4f2379dSAlexey Brodkin * @ndev: Pointer to net_device structure. 671e4f2379dSAlexey Brodkin * 672e4f2379dSAlexey Brodkin * returns: NETDEV_TX_OK, on success 673e4f2379dSAlexey Brodkin * NETDEV_TX_BUSY, if any of the descriptors are not free. 674e4f2379dSAlexey Brodkin * 675e4f2379dSAlexey Brodkin * This function is invoked from upper layers to initiate transmission. 676e4f2379dSAlexey Brodkin */ 677e4f2379dSAlexey Brodkin static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) 678e4f2379dSAlexey Brodkin { 679e4f2379dSAlexey Brodkin struct arc_emac_priv *priv = netdev_priv(ndev); 680e4f2379dSAlexey Brodkin unsigned int len, *txbd_curr = &priv->txbd_curr; 681ff458f6fSTobias Klauser struct net_device_stats *stats = &ndev->stats; 682e4f2379dSAlexey Brodkin __le32 *info = &priv->txbd[*txbd_curr].info; 683e4f2379dSAlexey Brodkin dma_addr_t addr; 684e4f2379dSAlexey Brodkin 685e4f2379dSAlexey Brodkin if (skb_padto(skb, ETH_ZLEN)) 686e4f2379dSAlexey Brodkin return NETDEV_TX_OK; 687e4f2379dSAlexey Brodkin 688e4f2379dSAlexey Brodkin len = max_t(unsigned int, ETH_ZLEN, skb->len); 689e4f2379dSAlexey Brodkin 69074dd40bcSBeniamino Galvani if (unlikely(!arc_emac_tx_avail(priv))) { 691e4f2379dSAlexey Brodkin netif_stop_queue(ndev); 69274dd40bcSBeniamino Galvani netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n"); 693e4f2379dSAlexey Brodkin return NETDEV_TX_BUSY; 694e4f2379dSAlexey Brodkin } 695e4f2379dSAlexey Brodkin 696e4f2379dSAlexey Brodkin addr = dma_map_single(&ndev->dev, (void *)skb->data, len, 697e4f2379dSAlexey Brodkin DMA_TO_DEVICE); 698e4f2379dSAlexey Brodkin 699e4f2379dSAlexey Brodkin if (unlikely(dma_mapping_error(&ndev->dev, addr))) { 700e4f2379dSAlexey Brodkin stats->tx_dropped++; 701e4f2379dSAlexey Brodkin stats->tx_errors++; 7020f6e8761SWei Yongjun dev_kfree_skb_any(skb); 703e4f2379dSAlexey Brodkin return NETDEV_TX_OK; 704e4f2379dSAlexey Brodkin } 705a4a1139bSAlexey Brodkin dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr); 706e4f2379dSAlexey Brodkin dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len); 707e4f2379dSAlexey Brodkin 708a4a1139bSAlexey Brodkin priv->txbd[*txbd_curr].data = cpu_to_le32(addr); 709e4f2379dSAlexey Brodkin 710e4f2379dSAlexey Brodkin /* Make sure pointer to data buffer is set */ 711e4f2379dSAlexey Brodkin wmb(); 712e4f2379dSAlexey Brodkin 71337ec274eSEric Dumazet skb_tx_timestamp(skb); 71437ec274eSEric Dumazet 715e4f2379dSAlexey Brodkin *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); 716e4f2379dSAlexey Brodkin 717c278c253SAlexander Kochetkov /* Make sure info word is set */ 718c278c253SAlexander Kochetkov wmb(); 719c278c253SAlexander Kochetkov 720c278c253SAlexander Kochetkov priv->tx_buff[*txbd_curr].skb = skb; 721c278c253SAlexander Kochetkov 722e4f2379dSAlexey Brodkin /* Increment index to point to the next BD */ 723e4f2379dSAlexey Brodkin *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; 724e4f2379dSAlexey Brodkin 72574dd40bcSBeniamino Galvani /* Ensure that tx_clean() sees the new txbd_curr before 72674dd40bcSBeniamino Galvani * checking the queue status. This prevents an unneeded wake 72774dd40bcSBeniamino Galvani * of the queue in tx_clean(). 72874dd40bcSBeniamino Galvani */ 72974dd40bcSBeniamino Galvani smp_mb(); 730e4f2379dSAlexey Brodkin 73174dd40bcSBeniamino Galvani if (!arc_emac_tx_avail(priv)) { 732e4f2379dSAlexey Brodkin netif_stop_queue(ndev); 73374dd40bcSBeniamino Galvani /* Refresh tx_dirty */ 73474dd40bcSBeniamino Galvani smp_mb(); 73574dd40bcSBeniamino Galvani if (arc_emac_tx_avail(priv)) 73674dd40bcSBeniamino Galvani netif_start_queue(ndev); 73774dd40bcSBeniamino Galvani } 738e4f2379dSAlexey Brodkin 739e4f2379dSAlexey Brodkin arc_reg_set(priv, R_STATUS, TXPL_MASK); 740e4f2379dSAlexey Brodkin 741e4f2379dSAlexey Brodkin return NETDEV_TX_OK; 742e4f2379dSAlexey Brodkin } 743e4f2379dSAlexey Brodkin 744235a251aSMax Schwarz static void arc_emac_set_address_internal(struct net_device *ndev) 745235a251aSMax Schwarz { 746235a251aSMax Schwarz struct arc_emac_priv *priv = netdev_priv(ndev); 747235a251aSMax Schwarz unsigned int addr_low, addr_hi; 748235a251aSMax Schwarz 749235a251aSMax Schwarz addr_low = le32_to_cpu(*(__le32 *)&ndev->dev_addr[0]); 750235a251aSMax Schwarz addr_hi = le16_to_cpu(*(__le16 *)&ndev->dev_addr[4]); 751235a251aSMax Schwarz 752235a251aSMax Schwarz arc_reg_set(priv, R_ADDRL, addr_low); 753235a251aSMax Schwarz arc_reg_set(priv, R_ADDRH, addr_hi); 754235a251aSMax Schwarz } 755235a251aSMax Schwarz 756e4f2379dSAlexey Brodkin /** 757e4f2379dSAlexey Brodkin * arc_emac_set_address - Set the MAC address for this device. 758e4f2379dSAlexey Brodkin * @ndev: Pointer to net_device structure. 759e4f2379dSAlexey Brodkin * @p: 6 byte Address to be written as MAC address. 760e4f2379dSAlexey Brodkin * 761e4f2379dSAlexey Brodkin * This function copies the HW address from the sockaddr structure to the 762e4f2379dSAlexey Brodkin * net_device structure and updates the address in HW. 763e4f2379dSAlexey Brodkin * 764e4f2379dSAlexey Brodkin * returns: -EBUSY if the net device is busy or 0 if the address is set 765e4f2379dSAlexey Brodkin * successfully. 766e4f2379dSAlexey Brodkin */ 767e4f2379dSAlexey Brodkin static int arc_emac_set_address(struct net_device *ndev, void *p) 768e4f2379dSAlexey Brodkin { 769e4f2379dSAlexey Brodkin struct sockaddr *addr = p; 770e4f2379dSAlexey Brodkin 771e4f2379dSAlexey Brodkin if (netif_running(ndev)) 772e4f2379dSAlexey Brodkin return -EBUSY; 773e4f2379dSAlexey Brodkin 774e4f2379dSAlexey Brodkin if (!is_valid_ether_addr(addr->sa_data)) 775e4f2379dSAlexey Brodkin return -EADDRNOTAVAIL; 776e4f2379dSAlexey Brodkin 777e4f2379dSAlexey Brodkin memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 778e4f2379dSAlexey Brodkin 779235a251aSMax Schwarz arc_emac_set_address_internal(ndev); 780e4f2379dSAlexey Brodkin 781e4f2379dSAlexey Brodkin return 0; 782e4f2379dSAlexey Brodkin } 783e4f2379dSAlexey Brodkin 78478aa0975SAlexander Kochetkov /** 78578aa0975SAlexander Kochetkov * arc_emac_restart - Restart EMAC 78678aa0975SAlexander Kochetkov * @ndev: Pointer to net_device structure. 78778aa0975SAlexander Kochetkov * 78878aa0975SAlexander Kochetkov * This function do hardware reset of EMAC in order to restore 78978aa0975SAlexander Kochetkov * network packets reception. 79078aa0975SAlexander Kochetkov */ 79178aa0975SAlexander Kochetkov static void arc_emac_restart(struct net_device *ndev) 79278aa0975SAlexander Kochetkov { 79378aa0975SAlexander Kochetkov struct arc_emac_priv *priv = netdev_priv(ndev); 79478aa0975SAlexander Kochetkov struct net_device_stats *stats = &ndev->stats; 79578aa0975SAlexander Kochetkov int i; 79678aa0975SAlexander Kochetkov 79778aa0975SAlexander Kochetkov if (net_ratelimit()) 79878aa0975SAlexander Kochetkov netdev_warn(ndev, "restarting stalled EMAC\n"); 79978aa0975SAlexander Kochetkov 80078aa0975SAlexander Kochetkov netif_stop_queue(ndev); 80178aa0975SAlexander Kochetkov 80278aa0975SAlexander Kochetkov /* Disable interrupts */ 80378aa0975SAlexander Kochetkov arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); 80478aa0975SAlexander Kochetkov 80578aa0975SAlexander Kochetkov /* Disable EMAC */ 80678aa0975SAlexander Kochetkov arc_reg_clr(priv, R_CTRL, EN_MASK); 80778aa0975SAlexander Kochetkov 80878aa0975SAlexander Kochetkov /* Return the sk_buff to system */ 80978aa0975SAlexander Kochetkov arc_free_tx_queue(ndev); 81078aa0975SAlexander Kochetkov 81178aa0975SAlexander Kochetkov /* Clean Tx BD's */ 81278aa0975SAlexander Kochetkov priv->txbd_curr = 0; 81378aa0975SAlexander Kochetkov priv->txbd_dirty = 0; 81478aa0975SAlexander Kochetkov memset(priv->txbd, 0, TX_RING_SZ); 81578aa0975SAlexander Kochetkov 81678aa0975SAlexander Kochetkov for (i = 0; i < RX_BD_NUM; i++) { 81778aa0975SAlexander Kochetkov struct arc_emac_bd *rxbd = &priv->rxbd[i]; 81878aa0975SAlexander Kochetkov unsigned int info = le32_to_cpu(rxbd->info); 81978aa0975SAlexander Kochetkov 82078aa0975SAlexander Kochetkov if (!(info & FOR_EMAC)) { 82178aa0975SAlexander Kochetkov stats->rx_errors++; 82278aa0975SAlexander Kochetkov stats->rx_dropped++; 82378aa0975SAlexander Kochetkov } 82478aa0975SAlexander Kochetkov /* Return ownership to EMAC */ 82578aa0975SAlexander Kochetkov rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); 82678aa0975SAlexander Kochetkov } 82778aa0975SAlexander Kochetkov priv->last_rx_bd = 0; 82878aa0975SAlexander Kochetkov 82978aa0975SAlexander Kochetkov /* Make sure info is visible to EMAC before enable */ 83078aa0975SAlexander Kochetkov wmb(); 83178aa0975SAlexander Kochetkov 83278aa0975SAlexander Kochetkov /* Enable interrupts */ 83378aa0975SAlexander Kochetkov arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); 83478aa0975SAlexander Kochetkov 83578aa0975SAlexander Kochetkov /* Enable EMAC */ 83678aa0975SAlexander Kochetkov arc_reg_or(priv, R_CTRL, EN_MASK); 83778aa0975SAlexander Kochetkov 83878aa0975SAlexander Kochetkov netif_start_queue(ndev); 83978aa0975SAlexander Kochetkov } 84078aa0975SAlexander Kochetkov 841e4f2379dSAlexey Brodkin static const struct net_device_ops arc_emac_netdev_ops = { 842e4f2379dSAlexey Brodkin .ndo_open = arc_emac_open, 843e4f2379dSAlexey Brodkin .ndo_stop = arc_emac_stop, 844e4f2379dSAlexey Brodkin .ndo_start_xmit = arc_emac_tx, 845e4f2379dSAlexey Brodkin .ndo_set_mac_address = arc_emac_set_address, 846e4f2379dSAlexey Brodkin .ndo_get_stats = arc_emac_stats, 847775dd682SBeniamino Galvani .ndo_set_rx_mode = arc_emac_set_rx_mode, 848fd786fb1SHeiner Kallweit .ndo_do_ioctl = phy_do_ioctl_running, 8495a45e57aSBeniamino Galvani #ifdef CONFIG_NET_POLL_CONTROLLER 8505a45e57aSBeniamino Galvani .ndo_poll_controller = arc_emac_poll_controller, 8515a45e57aSBeniamino Galvani #endif 852e4f2379dSAlexey Brodkin }; 853e4f2379dSAlexey Brodkin 85423d2d9a6SRomain Perier int arc_emac_probe(struct net_device *ndev, int interface) 855e4f2379dSAlexey Brodkin { 85623d2d9a6SRomain Perier struct device *dev = ndev->dev.parent; 857f7578496SThierry Reding struct resource res_regs; 858e4f2379dSAlexey Brodkin struct device_node *phy_node; 85901dea536SPhilippe Reynes struct phy_device *phydev = NULL; 860e4f2379dSAlexey Brodkin struct arc_emac_priv *priv; 861e4f2379dSAlexey Brodkin const char *mac_addr; 862f7578496SThierry Reding unsigned int id, clock_frequency, irq; 863e4f2379dSAlexey Brodkin int err; 864e4f2379dSAlexey Brodkin 865e4f2379dSAlexey Brodkin /* Get PHY from device tree */ 866f15f44e0SRomain Perier phy_node = of_parse_phandle(dev->of_node, "phy", 0); 867e4f2379dSAlexey Brodkin if (!phy_node) { 868f15f44e0SRomain Perier dev_err(dev, "failed to retrieve phy description from device tree\n"); 869e4f2379dSAlexey Brodkin return -ENODEV; 870e4f2379dSAlexey Brodkin } 871e4f2379dSAlexey Brodkin 872e4f2379dSAlexey Brodkin /* Get EMAC registers base address from device tree */ 873f15f44e0SRomain Perier err = of_address_to_resource(dev->of_node, 0, &res_regs); 874e4f2379dSAlexey Brodkin if (err) { 875f15f44e0SRomain Perier dev_err(dev, "failed to retrieve registers base from device tree\n"); 876a94efbd7SPeter Chen err = -ENODEV; 877a94efbd7SPeter Chen goto out_put_node; 878e4f2379dSAlexey Brodkin } 879e4f2379dSAlexey Brodkin 880e4f2379dSAlexey Brodkin /* Get IRQ from device tree */ 881f15f44e0SRomain Perier irq = irq_of_parse_and_map(dev->of_node, 0); 882f7578496SThierry Reding if (!irq) { 883f15f44e0SRomain Perier dev_err(dev, "failed to retrieve <irq> value from device tree\n"); 884a94efbd7SPeter Chen err = -ENODEV; 885a94efbd7SPeter Chen goto out_put_node; 886e4f2379dSAlexey Brodkin } 887e4f2379dSAlexey Brodkin 888e4f2379dSAlexey Brodkin ndev->netdev_ops = &arc_emac_netdev_ops; 889e4f2379dSAlexey Brodkin ndev->ethtool_ops = &arc_emac_ethtool_ops; 890e4f2379dSAlexey Brodkin ndev->watchdog_timeo = TX_TIMEOUT; 891e4f2379dSAlexey Brodkin 892e4f2379dSAlexey Brodkin priv = netdev_priv(ndev); 893f15f44e0SRomain Perier priv->dev = dev; 894e4f2379dSAlexey Brodkin 895f15f44e0SRomain Perier priv->regs = devm_ioremap_resource(dev, &res_regs); 89654447f1aSWei Yongjun if (IS_ERR(priv->regs)) { 89754447f1aSWei Yongjun err = PTR_ERR(priv->regs); 89854447f1aSWei Yongjun goto out_put_node; 89954447f1aSWei Yongjun } 900663713ebSCaesar Wang 901f15f44e0SRomain Perier dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); 902e4f2379dSAlexey Brodkin 90323d2d9a6SRomain Perier if (priv->clk) { 90423d2d9a6SRomain Perier err = clk_prepare_enable(priv->clk); 90523d2d9a6SRomain Perier if (err) { 90623d2d9a6SRomain Perier dev_err(dev, "failed to enable clock\n"); 907a94efbd7SPeter Chen goto out_put_node; 90823d2d9a6SRomain Perier } 90923d2d9a6SRomain Perier 91023d2d9a6SRomain Perier clock_frequency = clk_get_rate(priv->clk); 91123d2d9a6SRomain Perier } else { 91288154c96SHeiko Stübner /* Get CPU clock frequency from device tree */ 913f15f44e0SRomain Perier if (of_property_read_u32(dev->of_node, "clock-frequency", 91488154c96SHeiko Stübner &clock_frequency)) { 915f15f44e0SRomain Perier dev_err(dev, "failed to retrieve <clock-frequency> from device tree\n"); 916a94efbd7SPeter Chen err = -EINVAL; 917a94efbd7SPeter Chen goto out_put_node; 91888154c96SHeiko Stübner } 91988154c96SHeiko Stübner } 92088154c96SHeiko Stübner 921e4f2379dSAlexey Brodkin id = arc_reg_get(priv, R_ID); 922e4f2379dSAlexey Brodkin 923e4f2379dSAlexey Brodkin /* Check for EMAC revision 5 or 7, magic number */ 924e4f2379dSAlexey Brodkin if (!(id == 0x0005fd02 || id == 0x0007fd02)) { 925f15f44e0SRomain Perier dev_err(dev, "ARC EMAC not detected, id=0x%x\n", id); 926e4f2379dSAlexey Brodkin err = -ENODEV; 92788154c96SHeiko Stübner goto out_clken; 928e4f2379dSAlexey Brodkin } 929f15f44e0SRomain Perier dev_info(dev, "ARC EMAC detected with id: 0x%x\n", id); 930e4f2379dSAlexey Brodkin 931e4f2379dSAlexey Brodkin /* Set poll rate so that it polls every 1 ms */ 932e4f2379dSAlexey Brodkin arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000); 933e4f2379dSAlexey Brodkin 934f7578496SThierry Reding ndev->irq = irq; 935f15f44e0SRomain Perier dev_info(dev, "IRQ is %d\n", ndev->irq); 936e4f2379dSAlexey Brodkin 937e4f2379dSAlexey Brodkin /* Register interrupt handler for device */ 938f15f44e0SRomain Perier err = devm_request_irq(dev, ndev->irq, arc_emac_intr, 0, 939e4f2379dSAlexey Brodkin ndev->name, ndev); 940e4f2379dSAlexey Brodkin if (err) { 941f15f44e0SRomain Perier dev_err(dev, "could not allocate IRQ\n"); 94288154c96SHeiko Stübner goto out_clken; 943e4f2379dSAlexey Brodkin } 944e4f2379dSAlexey Brodkin 945e4f2379dSAlexey Brodkin /* Get MAC address from device tree */ 946f15f44e0SRomain Perier mac_addr = of_get_mac_address(dev->of_node); 947e4f2379dSAlexey Brodkin 948a51645f7SPetr Štetiar if (!IS_ERR(mac_addr)) 9492d2924afSPetr Štetiar ether_addr_copy(ndev->dev_addr, mac_addr); 95099470819SLuka Perkov else 95199470819SLuka Perkov eth_hw_addr_random(ndev); 952e4f2379dSAlexey Brodkin 953235a251aSMax Schwarz arc_emac_set_address_internal(ndev); 954f15f44e0SRomain Perier dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr); 955e4f2379dSAlexey Brodkin 956e4f2379dSAlexey Brodkin /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */ 957f15f44e0SRomain Perier priv->rxbd = dmam_alloc_coherent(dev, RX_RING_SZ + TX_RING_SZ, 958e4f2379dSAlexey Brodkin &priv->rxbd_dma, GFP_KERNEL); 959e4f2379dSAlexey Brodkin 960e4f2379dSAlexey Brodkin if (!priv->rxbd) { 961f15f44e0SRomain Perier dev_err(dev, "failed to allocate data buffers\n"); 962e4f2379dSAlexey Brodkin err = -ENOMEM; 96388154c96SHeiko Stübner goto out_clken; 964e4f2379dSAlexey Brodkin } 965e4f2379dSAlexey Brodkin 966e4f2379dSAlexey Brodkin priv->txbd = priv->rxbd + RX_BD_NUM; 967e4f2379dSAlexey Brodkin 968e4f2379dSAlexey Brodkin priv->txbd_dma = priv->rxbd_dma + RX_RING_SZ; 969f15f44e0SRomain Perier dev_dbg(dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n", 970e4f2379dSAlexey Brodkin (unsigned int)priv->rxbd_dma, (unsigned int)priv->txbd_dma); 971e4f2379dSAlexey Brodkin 97293e91b3dSRomain Perier err = arc_mdio_probe(priv); 973e4f2379dSAlexey Brodkin if (err) { 974f15f44e0SRomain Perier dev_err(dev, "failed to probe MII bus\n"); 97588154c96SHeiko Stübner goto out_clken; 976e4f2379dSAlexey Brodkin } 977e4f2379dSAlexey Brodkin 97801dea536SPhilippe Reynes phydev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, 97923d2d9a6SRomain Perier interface); 98001dea536SPhilippe Reynes if (!phydev) { 981f15f44e0SRomain Perier dev_err(dev, "of_phy_connect() failed\n"); 982e4f2379dSAlexey Brodkin err = -ENODEV; 983796bec1eSHeiko Stübner goto out_mdio; 984e4f2379dSAlexey Brodkin } 985e4f2379dSAlexey Brodkin 986f15f44e0SRomain Perier dev_info(dev, "connected to %s phy with id 0x%x\n", 98701dea536SPhilippe Reynes phydev->drv->name, phydev->phy_id); 988e4f2379dSAlexey Brodkin 989e4f2379dSAlexey Brodkin netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT); 990e4f2379dSAlexey Brodkin 991e4f2379dSAlexey Brodkin err = register_netdev(ndev); 992e4f2379dSAlexey Brodkin if (err) { 993f15f44e0SRomain Perier dev_err(dev, "failed to register network device\n"); 994796bec1eSHeiko Stübner goto out_netif_api; 995e4f2379dSAlexey Brodkin } 996e4f2379dSAlexey Brodkin 997a94efbd7SPeter Chen of_node_put(phy_node); 998e4f2379dSAlexey Brodkin return 0; 999e4f2379dSAlexey Brodkin 1000796bec1eSHeiko Stübner out_netif_api: 1001796bec1eSHeiko Stübner netif_napi_del(&priv->napi); 100201dea536SPhilippe Reynes phy_disconnect(phydev); 1003796bec1eSHeiko Stübner out_mdio: 1004796bec1eSHeiko Stübner arc_mdio_remove(priv); 100588154c96SHeiko Stübner out_clken: 100623d2d9a6SRomain Perier if (priv->clk) 100788154c96SHeiko Stübner clk_disable_unprepare(priv->clk); 1008a94efbd7SPeter Chen out_put_node: 1009a94efbd7SPeter Chen of_node_put(phy_node); 1010a94efbd7SPeter Chen 1011e4f2379dSAlexey Brodkin return err; 1012e4f2379dSAlexey Brodkin } 101323d2d9a6SRomain Perier EXPORT_SYMBOL_GPL(arc_emac_probe); 1014e4f2379dSAlexey Brodkin 101523d2d9a6SRomain Perier int arc_emac_remove(struct net_device *ndev) 1016e4f2379dSAlexey Brodkin { 1017e4f2379dSAlexey Brodkin struct arc_emac_priv *priv = netdev_priv(ndev); 1018e4f2379dSAlexey Brodkin 101901dea536SPhilippe Reynes phy_disconnect(ndev->phydev); 1020e4f2379dSAlexey Brodkin arc_mdio_remove(priv); 1021e4f2379dSAlexey Brodkin unregister_netdev(ndev); 1022e4f2379dSAlexey Brodkin netif_napi_del(&priv->napi); 102388154c96SHeiko Stübner 1024663713ebSCaesar Wang if (!IS_ERR(priv->clk)) 102588154c96SHeiko Stübner clk_disable_unprepare(priv->clk); 1026e4f2379dSAlexey Brodkin 1027e4f2379dSAlexey Brodkin return 0; 1028e4f2379dSAlexey Brodkin } 102923d2d9a6SRomain Perier EXPORT_SYMBOL_GPL(arc_emac_remove); 1030e4f2379dSAlexey Brodkin 1031e4f2379dSAlexey Brodkin MODULE_AUTHOR("Alexey Brodkin <abrodkin@synopsys.com>"); 1032e4f2379dSAlexey Brodkin MODULE_DESCRIPTION("ARC EMAC driver"); 1033e4f2379dSAlexey Brodkin MODULE_LICENSE("GPL"); 1034