1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2e4f2379dSAlexey Brodkin /* 3e4f2379dSAlexey Brodkin * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com) 4e4f2379dSAlexey Brodkin * 5e4f2379dSAlexey Brodkin * Driver for the ARC EMAC 10100 (hardware revision 5) 6e4f2379dSAlexey Brodkin * 7e4f2379dSAlexey Brodkin * Contributors: 8e4f2379dSAlexey Brodkin * Amit Bhor 9e4f2379dSAlexey Brodkin * Sameer Dhavale 10e4f2379dSAlexey Brodkin * Vineet Gupta 11e4f2379dSAlexey Brodkin */ 12e4f2379dSAlexey Brodkin 13775dd682SBeniamino Galvani #include <linux/crc32.h> 14e4f2379dSAlexey Brodkin #include <linux/etherdevice.h> 15e4f2379dSAlexey Brodkin #include <linux/interrupt.h> 16e4f2379dSAlexey Brodkin #include <linux/io.h> 17e4f2379dSAlexey Brodkin #include <linux/module.h> 18e4f2379dSAlexey Brodkin #include <linux/of_address.h> 19e4f2379dSAlexey Brodkin #include <linux/of_irq.h> 20e4f2379dSAlexey Brodkin #include <linux/of_mdio.h> 21e4f2379dSAlexey Brodkin #include <linux/of_net.h> 22e4f2379dSAlexey Brodkin #include <linux/of_platform.h> 23e4f2379dSAlexey Brodkin 24e4f2379dSAlexey Brodkin #include "emac.h" 25e4f2379dSAlexey Brodkin 2678aa0975SAlexander Kochetkov static void arc_emac_restart(struct net_device *ndev); 2778aa0975SAlexander Kochetkov 28e4f2379dSAlexey Brodkin /** 2974dd40bcSBeniamino Galvani * arc_emac_tx_avail - Return the number of available slots in the tx ring. 3074dd40bcSBeniamino Galvani * @priv: Pointer to ARC EMAC private data structure. 3174dd40bcSBeniamino Galvani * 3274dd40bcSBeniamino Galvani * returns: the number of slots available for transmission in tx the ring. 3374dd40bcSBeniamino Galvani */ 3474dd40bcSBeniamino Galvani static inline int arc_emac_tx_avail(struct arc_emac_priv *priv) 3574dd40bcSBeniamino Galvani { 3674dd40bcSBeniamino Galvani return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM; 3774dd40bcSBeniamino Galvani } 3874dd40bcSBeniamino Galvani 3974dd40bcSBeniamino Galvani /** 40e4f2379dSAlexey Brodkin * arc_emac_adjust_link - Adjust the PHY link duplex. 41e4f2379dSAlexey Brodkin * @ndev: Pointer to the net_device structure. 42e4f2379dSAlexey Brodkin * 43e4f2379dSAlexey Brodkin * This function is called to change the duplex setting after auto negotiation 44e4f2379dSAlexey Brodkin * is done by the PHY. 45e4f2379dSAlexey Brodkin */ 46e4f2379dSAlexey Brodkin static void arc_emac_adjust_link(struct net_device *ndev) 47e4f2379dSAlexey Brodkin { 48e4f2379dSAlexey Brodkin struct arc_emac_priv *priv = netdev_priv(ndev); 4901dea536SPhilippe Reynes struct phy_device *phy_dev = ndev->phydev; 50e4f2379dSAlexey Brodkin unsigned int reg, state_changed = 0; 51e4f2379dSAlexey Brodkin 52e4f2379dSAlexey Brodkin if (priv->link != phy_dev->link) { 53e4f2379dSAlexey Brodkin priv->link = phy_dev->link; 54e4f2379dSAlexey Brodkin state_changed = 1; 55e4f2379dSAlexey Brodkin } 56e4f2379dSAlexey Brodkin 57e4f2379dSAlexey Brodkin if (priv->speed != phy_dev->speed) { 58e4f2379dSAlexey Brodkin priv->speed = phy_dev->speed; 59e4f2379dSAlexey Brodkin state_changed = 1; 606eacf311SRomain Perier if (priv->set_mac_speed) 616eacf311SRomain Perier priv->set_mac_speed(priv, priv->speed); 62e4f2379dSAlexey Brodkin } 63e4f2379dSAlexey Brodkin 64e4f2379dSAlexey Brodkin if (priv->duplex != phy_dev->duplex) { 65e4f2379dSAlexey Brodkin reg = arc_reg_get(priv, R_CTRL); 66e4f2379dSAlexey Brodkin 67663713ebSCaesar Wang if (phy_dev->duplex == DUPLEX_FULL) 68e4f2379dSAlexey Brodkin reg |= ENFL_MASK; 69e4f2379dSAlexey Brodkin else 70e4f2379dSAlexey Brodkin reg &= ~ENFL_MASK; 71e4f2379dSAlexey Brodkin 72e4f2379dSAlexey Brodkin arc_reg_set(priv, R_CTRL, reg); 73e4f2379dSAlexey Brodkin priv->duplex = phy_dev->duplex; 74e4f2379dSAlexey Brodkin state_changed = 1; 75e4f2379dSAlexey Brodkin } 76e4f2379dSAlexey Brodkin 77e4f2379dSAlexey Brodkin if (state_changed) 78e4f2379dSAlexey Brodkin phy_print_status(phy_dev); 79e4f2379dSAlexey Brodkin } 80e4f2379dSAlexey Brodkin 81e4f2379dSAlexey Brodkin /** 82e4f2379dSAlexey Brodkin * arc_emac_get_drvinfo - Get EMAC driver information. 83e4f2379dSAlexey Brodkin * @ndev: Pointer to net_device structure. 84e4f2379dSAlexey Brodkin * @info: Pointer to ethtool_drvinfo structure. 85e4f2379dSAlexey Brodkin * 86e4f2379dSAlexey Brodkin * This implements ethtool command for getting the driver information. 87e4f2379dSAlexey Brodkin * Issue "ethtool -i ethX" under linux prompt to execute this function. 88e4f2379dSAlexey Brodkin */ 89e4f2379dSAlexey Brodkin static void arc_emac_get_drvinfo(struct net_device *ndev, 90e4f2379dSAlexey Brodkin struct ethtool_drvinfo *info) 91e4f2379dSAlexey Brodkin { 9223d2d9a6SRomain Perier struct arc_emac_priv *priv = netdev_priv(ndev); 9323d2d9a6SRomain Perier 9423d2d9a6SRomain Perier strlcpy(info->driver, priv->drv_name, sizeof(info->driver)); 95e4f2379dSAlexey Brodkin } 96e4f2379dSAlexey Brodkin 97e4f2379dSAlexey Brodkin static const struct ethtool_ops arc_emac_ethtool_ops = { 98e4f2379dSAlexey Brodkin .get_drvinfo = arc_emac_get_drvinfo, 99e4f2379dSAlexey Brodkin .get_link = ethtool_op_get_link, 1004694e6e3SPhilippe Reynes .get_link_ksettings = phy_ethtool_get_link_ksettings, 1014694e6e3SPhilippe Reynes .set_link_ksettings = phy_ethtool_set_link_ksettings, 102e4f2379dSAlexey Brodkin }; 103e4f2379dSAlexey Brodkin 104e4f2379dSAlexey Brodkin #define FIRST_OR_LAST_MASK (FIRST_MASK | LAST_MASK) 105e4f2379dSAlexey Brodkin 106e4f2379dSAlexey Brodkin /** 107e4f2379dSAlexey Brodkin * arc_emac_tx_clean - clears processed by EMAC Tx BDs. 108e4f2379dSAlexey Brodkin * @ndev: Pointer to the network device. 109e4f2379dSAlexey Brodkin */ 110e4f2379dSAlexey Brodkin static void arc_emac_tx_clean(struct net_device *ndev) 111e4f2379dSAlexey Brodkin { 112e4f2379dSAlexey Brodkin struct arc_emac_priv *priv = netdev_priv(ndev); 113ff458f6fSTobias Klauser struct net_device_stats *stats = &ndev->stats; 114e4f2379dSAlexey Brodkin unsigned int i; 115e4f2379dSAlexey Brodkin 116e4f2379dSAlexey Brodkin for (i = 0; i < TX_BD_NUM; i++) { 117e4f2379dSAlexey Brodkin unsigned int *txbd_dirty = &priv->txbd_dirty; 118e4f2379dSAlexey Brodkin struct arc_emac_bd *txbd = &priv->txbd[*txbd_dirty]; 119e4f2379dSAlexey Brodkin struct buffer_state *tx_buff = &priv->tx_buff[*txbd_dirty]; 120e4f2379dSAlexey Brodkin struct sk_buff *skb = tx_buff->skb; 121e4f2379dSAlexey Brodkin unsigned int info = le32_to_cpu(txbd->info); 122e4f2379dSAlexey Brodkin 123c278c253SAlexander Kochetkov if ((info & FOR_EMAC) || !txbd->data || !skb) 124e4f2379dSAlexey Brodkin break; 125e4f2379dSAlexey Brodkin 126e4f2379dSAlexey Brodkin if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) { 127e4f2379dSAlexey Brodkin stats->tx_errors++; 128e4f2379dSAlexey Brodkin stats->tx_dropped++; 129e4f2379dSAlexey Brodkin 130e4f2379dSAlexey Brodkin if (info & DEFR) 131e4f2379dSAlexey Brodkin stats->tx_carrier_errors++; 132e4f2379dSAlexey Brodkin 133e4f2379dSAlexey Brodkin if (info & LTCL) 134e4f2379dSAlexey Brodkin stats->collisions++; 135e4f2379dSAlexey Brodkin 136e4f2379dSAlexey Brodkin if (info & UFLO) 137e4f2379dSAlexey Brodkin stats->tx_fifo_errors++; 138e4f2379dSAlexey Brodkin } else if (likely(info & FIRST_OR_LAST_MASK)) { 139e4f2379dSAlexey Brodkin stats->tx_packets++; 140e4f2379dSAlexey Brodkin stats->tx_bytes += skb->len; 141e4f2379dSAlexey Brodkin } 142e4f2379dSAlexey Brodkin 143a4a1139bSAlexey Brodkin dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr), 144a4a1139bSAlexey Brodkin dma_unmap_len(tx_buff, len), DMA_TO_DEVICE); 145e4f2379dSAlexey Brodkin 146e4f2379dSAlexey Brodkin /* return the sk_buff to system */ 14767633e78SYang Wei dev_consume_skb_irq(skb); 148e4f2379dSAlexey Brodkin 149e4f2379dSAlexey Brodkin txbd->data = 0; 150e4f2379dSAlexey Brodkin txbd->info = 0; 151c278c253SAlexander Kochetkov tx_buff->skb = NULL; 152e4f2379dSAlexey Brodkin 15327082ee1SVineet Gupta *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; 154e4f2379dSAlexey Brodkin } 15574dd40bcSBeniamino Galvani 15674dd40bcSBeniamino Galvani /* Ensure that txbd_dirty is visible to tx() before checking 15774dd40bcSBeniamino Galvani * for queue stopped. 15874dd40bcSBeniamino Galvani */ 15974dd40bcSBeniamino Galvani smp_mb(); 16074dd40bcSBeniamino Galvani 16174dd40bcSBeniamino Galvani if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv)) 16274dd40bcSBeniamino Galvani netif_wake_queue(ndev); 163e4f2379dSAlexey Brodkin } 164e4f2379dSAlexey Brodkin 165e4f2379dSAlexey Brodkin /** 166e4f2379dSAlexey Brodkin * arc_emac_rx - processing of Rx packets. 167e4f2379dSAlexey Brodkin * @ndev: Pointer to the network device. 168e4f2379dSAlexey Brodkin * @budget: How many BDs to process on 1 call. 169e4f2379dSAlexey Brodkin * 170e4f2379dSAlexey Brodkin * returns: Number of processed BDs 171e4f2379dSAlexey Brodkin * 172e4f2379dSAlexey Brodkin * Iterate through Rx BDs and deliver received packages to upper layer. 173e4f2379dSAlexey Brodkin */ 174e4f2379dSAlexey Brodkin static int arc_emac_rx(struct net_device *ndev, int budget) 175e4f2379dSAlexey Brodkin { 176e4f2379dSAlexey Brodkin struct arc_emac_priv *priv = netdev_priv(ndev); 177e4f2379dSAlexey Brodkin unsigned int work_done; 178e4f2379dSAlexey Brodkin 1799cff866eSAlexey Brodkin for (work_done = 0; work_done < budget; work_done++) { 180e4f2379dSAlexey Brodkin unsigned int *last_rx_bd = &priv->last_rx_bd; 181ff458f6fSTobias Klauser struct net_device_stats *stats = &ndev->stats; 182e4f2379dSAlexey Brodkin struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; 183e4f2379dSAlexey Brodkin struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; 184e4f2379dSAlexey Brodkin unsigned int pktlen, info = le32_to_cpu(rxbd->info); 185e4f2379dSAlexey Brodkin struct sk_buff *skb; 186e4f2379dSAlexey Brodkin dma_addr_t addr; 187e4f2379dSAlexey Brodkin 188e4f2379dSAlexey Brodkin if (unlikely((info & OWN_MASK) == FOR_EMAC)) 189e4f2379dSAlexey Brodkin break; 190e4f2379dSAlexey Brodkin 191e4f2379dSAlexey Brodkin /* Make a note that we saw a packet at this BD. 192e4f2379dSAlexey Brodkin * So next time, driver starts from this + 1 193e4f2379dSAlexey Brodkin */ 194e4f2379dSAlexey Brodkin *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; 195e4f2379dSAlexey Brodkin 196e4f2379dSAlexey Brodkin if (unlikely((info & FIRST_OR_LAST_MASK) != 197e4f2379dSAlexey Brodkin FIRST_OR_LAST_MASK)) { 198e4f2379dSAlexey Brodkin /* We pre-allocate buffers of MTU size so incoming 199e4f2379dSAlexey Brodkin * packets won't be split/chained. 200e4f2379dSAlexey Brodkin */ 201e4f2379dSAlexey Brodkin if (net_ratelimit()) 202e4f2379dSAlexey Brodkin netdev_err(ndev, "incomplete packet received\n"); 203e4f2379dSAlexey Brodkin 204e4f2379dSAlexey Brodkin /* Return ownership to EMAC */ 205a4a1139bSAlexey Brodkin rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); 206e4f2379dSAlexey Brodkin stats->rx_errors++; 207e4f2379dSAlexey Brodkin stats->rx_length_errors++; 208e4f2379dSAlexey Brodkin continue; 209e4f2379dSAlexey Brodkin } 210e4f2379dSAlexey Brodkin 211e688822dSAlexander Kochetkov /* Prepare the BD for next cycle. netif_receive_skb() 212e688822dSAlexander Kochetkov * only if new skb was allocated and mapped to avoid holes 213e688822dSAlexander Kochetkov * in the RX fifo. 214e688822dSAlexander Kochetkov */ 215e688822dSAlexander Kochetkov skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE); 216e688822dSAlexander Kochetkov if (unlikely(!skb)) { 217e688822dSAlexander Kochetkov if (net_ratelimit()) 218e688822dSAlexander Kochetkov netdev_err(ndev, "cannot allocate skb\n"); 219e688822dSAlexander Kochetkov /* Return ownership to EMAC */ 220e688822dSAlexander Kochetkov rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); 221e4f2379dSAlexey Brodkin stats->rx_errors++; 222e4f2379dSAlexey Brodkin stats->rx_dropped++; 223e4f2379dSAlexey Brodkin continue; 224e4f2379dSAlexey Brodkin } 225e4f2379dSAlexey Brodkin 226e688822dSAlexander Kochetkov addr = dma_map_single(&ndev->dev, (void *)skb->data, 227a4a1139bSAlexey Brodkin EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); 228e4f2379dSAlexey Brodkin if (dma_mapping_error(&ndev->dev, addr)) { 229e4f2379dSAlexey Brodkin if (net_ratelimit()) 230e688822dSAlexander Kochetkov netdev_err(ndev, "cannot map dma buffer\n"); 231e688822dSAlexander Kochetkov dev_kfree_skb(skb); 232e688822dSAlexander Kochetkov /* Return ownership to EMAC */ 233e688822dSAlexander Kochetkov rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); 234e4f2379dSAlexey Brodkin stats->rx_errors++; 235e688822dSAlexander Kochetkov stats->rx_dropped++; 236e4f2379dSAlexey Brodkin continue; 237e4f2379dSAlexey Brodkin } 238e688822dSAlexander Kochetkov 239e688822dSAlexander Kochetkov /* unmap previosly mapped skb */ 240e688822dSAlexander Kochetkov dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), 241e688822dSAlexander Kochetkov dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); 242e688822dSAlexander Kochetkov 243e688822dSAlexander Kochetkov pktlen = info & LEN_MASK; 244e688822dSAlexander Kochetkov stats->rx_packets++; 245e688822dSAlexander Kochetkov stats->rx_bytes += pktlen; 246e688822dSAlexander Kochetkov skb_put(rx_buff->skb, pktlen); 247e688822dSAlexander Kochetkov rx_buff->skb->dev = ndev; 248e688822dSAlexander Kochetkov rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev); 249e688822dSAlexander Kochetkov 250e688822dSAlexander Kochetkov netif_receive_skb(rx_buff->skb); 251e688822dSAlexander Kochetkov 252e688822dSAlexander Kochetkov rx_buff->skb = skb; 253a4a1139bSAlexey Brodkin dma_unmap_addr_set(rx_buff, addr, addr); 254a4a1139bSAlexey Brodkin dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); 255e4f2379dSAlexey Brodkin 256a4a1139bSAlexey Brodkin rxbd->data = cpu_to_le32(addr); 257e4f2379dSAlexey Brodkin 258e4f2379dSAlexey Brodkin /* Make sure pointer to data buffer is set */ 259e4f2379dSAlexey Brodkin wmb(); 260e4f2379dSAlexey Brodkin 261e4f2379dSAlexey Brodkin /* Return ownership to EMAC */ 262a4a1139bSAlexey Brodkin rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); 263e4f2379dSAlexey Brodkin } 264e4f2379dSAlexey Brodkin 265e4f2379dSAlexey Brodkin return work_done; 266e4f2379dSAlexey Brodkin } 267e4f2379dSAlexey Brodkin 268e4f2379dSAlexey Brodkin /** 26978aa0975SAlexander Kochetkov * arc_emac_rx_miss_handle - handle R_MISS register 27078aa0975SAlexander Kochetkov * @ndev: Pointer to the net_device structure. 27178aa0975SAlexander Kochetkov */ 27278aa0975SAlexander Kochetkov static void arc_emac_rx_miss_handle(struct net_device *ndev) 27378aa0975SAlexander Kochetkov { 27478aa0975SAlexander Kochetkov struct arc_emac_priv *priv = netdev_priv(ndev); 27578aa0975SAlexander Kochetkov struct net_device_stats *stats = &ndev->stats; 27678aa0975SAlexander Kochetkov unsigned int miss; 27778aa0975SAlexander Kochetkov 27878aa0975SAlexander Kochetkov miss = arc_reg_get(priv, R_MISS); 27978aa0975SAlexander Kochetkov if (miss) { 28078aa0975SAlexander Kochetkov stats->rx_errors += miss; 28178aa0975SAlexander Kochetkov stats->rx_missed_errors += miss; 28278aa0975SAlexander Kochetkov priv->rx_missed_errors += miss; 28378aa0975SAlexander Kochetkov } 28478aa0975SAlexander Kochetkov } 28578aa0975SAlexander Kochetkov 28678aa0975SAlexander Kochetkov /** 28778aa0975SAlexander Kochetkov * arc_emac_rx_stall_check - check RX stall 28878aa0975SAlexander Kochetkov * @ndev: Pointer to the net_device structure. 28978aa0975SAlexander Kochetkov * @budget: How many BDs requested to process on 1 call. 29078aa0975SAlexander Kochetkov * @work_done: How many BDs processed 29178aa0975SAlexander Kochetkov * 29278aa0975SAlexander Kochetkov * Under certain conditions EMAC stop reception of incoming packets and 29378aa0975SAlexander Kochetkov * continuously increment R_MISS register instead of saving data into 29478aa0975SAlexander Kochetkov * provided buffer. This function detect that condition and restart 29578aa0975SAlexander Kochetkov * EMAC. 29678aa0975SAlexander Kochetkov */ 29778aa0975SAlexander Kochetkov static void arc_emac_rx_stall_check(struct net_device *ndev, 29878aa0975SAlexander Kochetkov int budget, unsigned int work_done) 29978aa0975SAlexander Kochetkov { 30078aa0975SAlexander Kochetkov struct arc_emac_priv *priv = netdev_priv(ndev); 30178aa0975SAlexander Kochetkov struct arc_emac_bd *rxbd; 30278aa0975SAlexander Kochetkov 30378aa0975SAlexander Kochetkov if (work_done) 30478aa0975SAlexander Kochetkov priv->rx_missed_errors = 0; 30578aa0975SAlexander Kochetkov 30678aa0975SAlexander Kochetkov if (priv->rx_missed_errors && budget) { 30778aa0975SAlexander Kochetkov rxbd = &priv->rxbd[priv->last_rx_bd]; 30878aa0975SAlexander Kochetkov if (le32_to_cpu(rxbd->info) & FOR_EMAC) { 30978aa0975SAlexander Kochetkov arc_emac_restart(ndev); 31078aa0975SAlexander Kochetkov priv->rx_missed_errors = 0; 31178aa0975SAlexander Kochetkov } 31278aa0975SAlexander Kochetkov } 31378aa0975SAlexander Kochetkov } 31478aa0975SAlexander Kochetkov 31578aa0975SAlexander Kochetkov /** 316e4f2379dSAlexey Brodkin * arc_emac_poll - NAPI poll handler. 317e4f2379dSAlexey Brodkin * @napi: Pointer to napi_struct structure. 318e4f2379dSAlexey Brodkin * @budget: How many BDs to process on 1 call. 319e4f2379dSAlexey Brodkin * 320e4f2379dSAlexey Brodkin * returns: Number of processed BDs 321e4f2379dSAlexey Brodkin */ 322e4f2379dSAlexey Brodkin static int arc_emac_poll(struct napi_struct *napi, int budget) 323e4f2379dSAlexey Brodkin { 324e4f2379dSAlexey Brodkin struct net_device *ndev = napi->dev; 325e4f2379dSAlexey Brodkin struct arc_emac_priv *priv = netdev_priv(ndev); 326e4f2379dSAlexey Brodkin unsigned int work_done; 327e4f2379dSAlexey Brodkin 328e4f2379dSAlexey Brodkin arc_emac_tx_clean(ndev); 32978aa0975SAlexander Kochetkov arc_emac_rx_miss_handle(ndev); 330e4f2379dSAlexey Brodkin 331e4f2379dSAlexey Brodkin work_done = arc_emac_rx(ndev, budget); 332e4f2379dSAlexey Brodkin if (work_done < budget) { 3336ad20165SEric Dumazet napi_complete_done(napi, work_done); 3347ce7679dSBeniamino Galvani arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); 335e4f2379dSAlexey Brodkin } 336e4f2379dSAlexey Brodkin 33778aa0975SAlexander Kochetkov arc_emac_rx_stall_check(ndev, budget, work_done); 33878aa0975SAlexander Kochetkov 339e4f2379dSAlexey Brodkin return work_done; 340e4f2379dSAlexey Brodkin } 341e4f2379dSAlexey Brodkin 342e4f2379dSAlexey Brodkin /** 343e4f2379dSAlexey Brodkin * arc_emac_intr - Global interrupt handler for EMAC. 344e4f2379dSAlexey Brodkin * @irq: irq number. 345e4f2379dSAlexey Brodkin * @dev_instance: device instance. 346e4f2379dSAlexey Brodkin * 347e4f2379dSAlexey Brodkin * returns: IRQ_HANDLED for all cases. 348e4f2379dSAlexey Brodkin * 349e4f2379dSAlexey Brodkin * ARC EMAC has only 1 interrupt line, and depending on bits raised in 350e4f2379dSAlexey Brodkin * STATUS register we may tell what is a reason for interrupt to fire. 351e4f2379dSAlexey Brodkin */ 352e4f2379dSAlexey Brodkin static irqreturn_t arc_emac_intr(int irq, void *dev_instance) 353e4f2379dSAlexey Brodkin { 354e4f2379dSAlexey Brodkin struct net_device *ndev = dev_instance; 355e4f2379dSAlexey Brodkin struct arc_emac_priv *priv = netdev_priv(ndev); 356ff458f6fSTobias Klauser struct net_device_stats *stats = &ndev->stats; 357e4f2379dSAlexey Brodkin unsigned int status; 358e4f2379dSAlexey Brodkin 359e4f2379dSAlexey Brodkin status = arc_reg_get(priv, R_STATUS); 360e4f2379dSAlexey Brodkin status &= ~MDIO_MASK; 361e4f2379dSAlexey Brodkin 362e4f2379dSAlexey Brodkin /* Reset all flags except "MDIO complete" */ 363e4f2379dSAlexey Brodkin arc_reg_set(priv, R_STATUS, status); 364e4f2379dSAlexey Brodkin 3657ce7679dSBeniamino Galvani if (status & (RXINT_MASK | TXINT_MASK)) { 366e4f2379dSAlexey Brodkin if (likely(napi_schedule_prep(&priv->napi))) { 3677ce7679dSBeniamino Galvani arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); 368e4f2379dSAlexey Brodkin __napi_schedule(&priv->napi); 369e4f2379dSAlexey Brodkin } 370e4f2379dSAlexey Brodkin } 371e4f2379dSAlexey Brodkin 372e4f2379dSAlexey Brodkin if (status & ERR_MASK) { 373e4f2379dSAlexey Brodkin /* MSER/RXCR/RXFR/RXFL interrupt fires on corresponding 374e4f2379dSAlexey Brodkin * 8-bit error counter overrun. 375e4f2379dSAlexey Brodkin */ 376e4f2379dSAlexey Brodkin 377e4f2379dSAlexey Brodkin if (status & MSER_MASK) { 378e4f2379dSAlexey Brodkin stats->rx_missed_errors += 0x100; 379e4f2379dSAlexey Brodkin stats->rx_errors += 0x100; 38078aa0975SAlexander Kochetkov priv->rx_missed_errors += 0x100; 38178aa0975SAlexander Kochetkov napi_schedule(&priv->napi); 382e4f2379dSAlexey Brodkin } 383e4f2379dSAlexey Brodkin 384e4f2379dSAlexey Brodkin if (status & RXCR_MASK) { 385e4f2379dSAlexey Brodkin stats->rx_crc_errors += 0x100; 386e4f2379dSAlexey Brodkin stats->rx_errors += 0x100; 387e4f2379dSAlexey Brodkin } 388e4f2379dSAlexey Brodkin 389e4f2379dSAlexey Brodkin if (status & RXFR_MASK) { 390e4f2379dSAlexey Brodkin stats->rx_frame_errors += 0x100; 391e4f2379dSAlexey Brodkin stats->rx_errors += 0x100; 392e4f2379dSAlexey Brodkin } 393e4f2379dSAlexey Brodkin 394e4f2379dSAlexey Brodkin if (status & RXFL_MASK) { 395e4f2379dSAlexey Brodkin stats->rx_over_errors += 0x100; 396e4f2379dSAlexey Brodkin stats->rx_errors += 0x100; 397e4f2379dSAlexey Brodkin } 398e4f2379dSAlexey Brodkin } 399e4f2379dSAlexey Brodkin 400e4f2379dSAlexey Brodkin return IRQ_HANDLED; 401e4f2379dSAlexey Brodkin } 402e4f2379dSAlexey Brodkin 4035a45e57aSBeniamino Galvani #ifdef CONFIG_NET_POLL_CONTROLLER 4045a45e57aSBeniamino Galvani static void arc_emac_poll_controller(struct net_device *dev) 4055a45e57aSBeniamino Galvani { 4065a45e57aSBeniamino Galvani disable_irq(dev->irq); 4075a45e57aSBeniamino Galvani arc_emac_intr(dev->irq, dev); 4085a45e57aSBeniamino Galvani enable_irq(dev->irq); 4095a45e57aSBeniamino Galvani } 4105a45e57aSBeniamino Galvani #endif 4115a45e57aSBeniamino Galvani 412e4f2379dSAlexey Brodkin /** 413e4f2379dSAlexey Brodkin * arc_emac_open - Open the network device. 414e4f2379dSAlexey Brodkin * @ndev: Pointer to the network device. 415e4f2379dSAlexey Brodkin * 416e4f2379dSAlexey Brodkin * returns: 0, on success or non-zero error value on failure. 417e4f2379dSAlexey Brodkin * 418e4f2379dSAlexey Brodkin * This function sets the MAC address, requests and enables an IRQ 419e4f2379dSAlexey Brodkin * for the EMAC device and starts the Tx queue. 420e4f2379dSAlexey Brodkin * It also connects to the phy device. 421e4f2379dSAlexey Brodkin */ 422e4f2379dSAlexey Brodkin static int arc_emac_open(struct net_device *ndev) 423e4f2379dSAlexey Brodkin { 424e4f2379dSAlexey Brodkin struct arc_emac_priv *priv = netdev_priv(ndev); 42501dea536SPhilippe Reynes struct phy_device *phy_dev = ndev->phydev; 426e4f2379dSAlexey Brodkin int i; 427e4f2379dSAlexey Brodkin 428e4f2379dSAlexey Brodkin phy_dev->autoneg = AUTONEG_ENABLE; 429e4f2379dSAlexey Brodkin phy_dev->speed = 0; 430e4f2379dSAlexey Brodkin phy_dev->duplex = 0; 4313c1bcc86SAndrew Lunn linkmode_and(phy_dev->advertising, phy_dev->advertising, 4323c1bcc86SAndrew Lunn phy_dev->supported); 433e4f2379dSAlexey Brodkin 434a4a1139bSAlexey Brodkin priv->last_rx_bd = 0; 435a4a1139bSAlexey Brodkin 436e4f2379dSAlexey Brodkin /* Allocate and set buffers for Rx BD's */ 437e4f2379dSAlexey Brodkin for (i = 0; i < RX_BD_NUM; i++) { 438a4a1139bSAlexey Brodkin dma_addr_t addr; 439a4a1139bSAlexey Brodkin unsigned int *last_rx_bd = &priv->last_rx_bd; 440a4a1139bSAlexey Brodkin struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; 441a4a1139bSAlexey Brodkin struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; 442a4a1139bSAlexey Brodkin 443a4a1139bSAlexey Brodkin rx_buff->skb = netdev_alloc_skb_ip_align(ndev, 444a4a1139bSAlexey Brodkin EMAC_BUFFER_SIZE); 445a4a1139bSAlexey Brodkin if (unlikely(!rx_buff->skb)) 446e4f2379dSAlexey Brodkin return -ENOMEM; 447e4f2379dSAlexey Brodkin 448a4a1139bSAlexey Brodkin addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, 449a4a1139bSAlexey Brodkin EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); 450a4a1139bSAlexey Brodkin if (dma_mapping_error(&ndev->dev, addr)) { 451a4a1139bSAlexey Brodkin netdev_err(ndev, "cannot dma map\n"); 452a4a1139bSAlexey Brodkin dev_kfree_skb(rx_buff->skb); 453a4a1139bSAlexey Brodkin return -ENOMEM; 454a4a1139bSAlexey Brodkin } 455a4a1139bSAlexey Brodkin dma_unmap_addr_set(rx_buff, addr, addr); 456a4a1139bSAlexey Brodkin dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); 457a4a1139bSAlexey Brodkin 458a4a1139bSAlexey Brodkin rxbd->data = cpu_to_le32(addr); 459e4f2379dSAlexey Brodkin 460e4f2379dSAlexey Brodkin /* Make sure pointer to data buffer is set */ 461e4f2379dSAlexey Brodkin wmb(); 462e4f2379dSAlexey Brodkin 463a4a1139bSAlexey Brodkin /* Return ownership to EMAC */ 464a4a1139bSAlexey Brodkin rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); 465e4f2379dSAlexey Brodkin 466a4a1139bSAlexey Brodkin *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; 467a4a1139bSAlexey Brodkin } 468e4f2379dSAlexey Brodkin 46999f93a15SAlexander Kochetkov priv->txbd_curr = 0; 47099f93a15SAlexander Kochetkov priv->txbd_dirty = 0; 47199f93a15SAlexander Kochetkov 472e4f2379dSAlexey Brodkin /* Clean Tx BD's */ 473e4f2379dSAlexey Brodkin memset(priv->txbd, 0, TX_RING_SZ); 474e4f2379dSAlexey Brodkin 475e4f2379dSAlexey Brodkin /* Initialize logical address filter */ 476e4f2379dSAlexey Brodkin arc_reg_set(priv, R_LAFL, 0); 477e4f2379dSAlexey Brodkin arc_reg_set(priv, R_LAFH, 0); 478e4f2379dSAlexey Brodkin 479e4f2379dSAlexey Brodkin /* Set BD ring pointers for device side */ 480e4f2379dSAlexey Brodkin arc_reg_set(priv, R_RX_RING, (unsigned int)priv->rxbd_dma); 481e4f2379dSAlexey Brodkin arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); 482e4f2379dSAlexey Brodkin 483e4f2379dSAlexey Brodkin /* Enable interrupts */ 4847ce7679dSBeniamino Galvani arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); 485e4f2379dSAlexey Brodkin 486e4f2379dSAlexey Brodkin /* Set CONTROL */ 487e4f2379dSAlexey Brodkin arc_reg_set(priv, R_CTRL, 488e4f2379dSAlexey Brodkin (RX_BD_NUM << 24) | /* RX BD table length */ 489e4f2379dSAlexey Brodkin (TX_BD_NUM << 16) | /* TX BD table length */ 490e4f2379dSAlexey Brodkin TXRN_MASK | RXRN_MASK); 491e4f2379dSAlexey Brodkin 492e4f2379dSAlexey Brodkin napi_enable(&priv->napi); 493e4f2379dSAlexey Brodkin 494e4f2379dSAlexey Brodkin /* Enable EMAC */ 495e4f2379dSAlexey Brodkin arc_reg_or(priv, R_CTRL, EN_MASK); 496e4f2379dSAlexey Brodkin 497b18b7453SAlexander Kochetkov phy_start(ndev->phydev); 498e4f2379dSAlexey Brodkin 499e4f2379dSAlexey Brodkin netif_start_queue(ndev); 500e4f2379dSAlexey Brodkin 501e4f2379dSAlexey Brodkin return 0; 502e4f2379dSAlexey Brodkin } 503e4f2379dSAlexey Brodkin 504e4f2379dSAlexey Brodkin /** 505775dd682SBeniamino Galvani * arc_emac_set_rx_mode - Change the receive filtering mode. 506775dd682SBeniamino Galvani * @ndev: Pointer to the network device. 507775dd682SBeniamino Galvani * 508775dd682SBeniamino Galvani * This function enables/disables promiscuous or all-multicast mode 509775dd682SBeniamino Galvani * and updates the multicast filtering list of the network device. 510775dd682SBeniamino Galvani */ 511775dd682SBeniamino Galvani static void arc_emac_set_rx_mode(struct net_device *ndev) 512775dd682SBeniamino Galvani { 513775dd682SBeniamino Galvani struct arc_emac_priv *priv = netdev_priv(ndev); 514775dd682SBeniamino Galvani 515775dd682SBeniamino Galvani if (ndev->flags & IFF_PROMISC) { 516775dd682SBeniamino Galvani arc_reg_or(priv, R_CTRL, PROM_MASK); 517775dd682SBeniamino Galvani } else { 518775dd682SBeniamino Galvani arc_reg_clr(priv, R_CTRL, PROM_MASK); 519775dd682SBeniamino Galvani 520775dd682SBeniamino Galvani if (ndev->flags & IFF_ALLMULTI) { 521775dd682SBeniamino Galvani arc_reg_set(priv, R_LAFL, ~0); 522775dd682SBeniamino Galvani arc_reg_set(priv, R_LAFH, ~0); 523d0e3f65bSAlexander Kochetkov } else if (ndev->flags & IFF_MULTICAST) { 524775dd682SBeniamino Galvani struct netdev_hw_addr *ha; 525775dd682SBeniamino Galvani unsigned int filter[2] = { 0, 0 }; 526775dd682SBeniamino Galvani int bit; 527775dd682SBeniamino Galvani 528775dd682SBeniamino Galvani netdev_for_each_mc_addr(ha, ndev) { 529775dd682SBeniamino Galvani bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26; 530775dd682SBeniamino Galvani filter[bit >> 5] |= 1 << (bit & 31); 531775dd682SBeniamino Galvani } 532775dd682SBeniamino Galvani 533775dd682SBeniamino Galvani arc_reg_set(priv, R_LAFL, filter[0]); 534775dd682SBeniamino Galvani arc_reg_set(priv, R_LAFH, filter[1]); 535d0e3f65bSAlexander Kochetkov } else { 536d0e3f65bSAlexander Kochetkov arc_reg_set(priv, R_LAFL, 0); 537d0e3f65bSAlexander Kochetkov arc_reg_set(priv, R_LAFH, 0); 538775dd682SBeniamino Galvani } 539775dd682SBeniamino Galvani } 540775dd682SBeniamino Galvani } 541775dd682SBeniamino Galvani 542775dd682SBeniamino Galvani /** 543b530b164SAlexander Kochetkov * arc_free_tx_queue - free skb from tx queue 544b530b164SAlexander Kochetkov * @ndev: Pointer to the network device. 545b530b164SAlexander Kochetkov * 546b530b164SAlexander Kochetkov * This function must be called while EMAC disable 547b530b164SAlexander Kochetkov */ 548b530b164SAlexander Kochetkov static void arc_free_tx_queue(struct net_device *ndev) 549b530b164SAlexander Kochetkov { 550b530b164SAlexander Kochetkov struct arc_emac_priv *priv = netdev_priv(ndev); 551b530b164SAlexander Kochetkov unsigned int i; 552b530b164SAlexander Kochetkov 553b530b164SAlexander Kochetkov for (i = 0; i < TX_BD_NUM; i++) { 554b530b164SAlexander Kochetkov struct arc_emac_bd *txbd = &priv->txbd[i]; 555b530b164SAlexander Kochetkov struct buffer_state *tx_buff = &priv->tx_buff[i]; 556b530b164SAlexander Kochetkov 557b530b164SAlexander Kochetkov if (tx_buff->skb) { 558663713ebSCaesar Wang dma_unmap_single(&ndev->dev, 559663713ebSCaesar Wang dma_unmap_addr(tx_buff, addr), 560663713ebSCaesar Wang dma_unmap_len(tx_buff, len), 561663713ebSCaesar Wang DMA_TO_DEVICE); 562b530b164SAlexander Kochetkov 563b530b164SAlexander Kochetkov /* return the sk_buff to system */ 564b530b164SAlexander Kochetkov dev_kfree_skb_irq(tx_buff->skb); 565b530b164SAlexander Kochetkov } 566b530b164SAlexander Kochetkov 567b530b164SAlexander Kochetkov txbd->info = 0; 568b530b164SAlexander Kochetkov txbd->data = 0; 569b530b164SAlexander Kochetkov tx_buff->skb = NULL; 570b530b164SAlexander Kochetkov } 571b530b164SAlexander Kochetkov } 572b530b164SAlexander Kochetkov 573b530b164SAlexander Kochetkov /** 574b530b164SAlexander Kochetkov * arc_free_rx_queue - free skb from rx queue 575b530b164SAlexander Kochetkov * @ndev: Pointer to the network device. 576b530b164SAlexander Kochetkov * 577b530b164SAlexander Kochetkov * This function must be called while EMAC disable 578b530b164SAlexander Kochetkov */ 579b530b164SAlexander Kochetkov static void arc_free_rx_queue(struct net_device *ndev) 580b530b164SAlexander Kochetkov { 581b530b164SAlexander Kochetkov struct arc_emac_priv *priv = netdev_priv(ndev); 582b530b164SAlexander Kochetkov unsigned int i; 583b530b164SAlexander Kochetkov 584b530b164SAlexander Kochetkov for (i = 0; i < RX_BD_NUM; i++) { 585b530b164SAlexander Kochetkov struct arc_emac_bd *rxbd = &priv->rxbd[i]; 586b530b164SAlexander Kochetkov struct buffer_state *rx_buff = &priv->rx_buff[i]; 587b530b164SAlexander Kochetkov 588b530b164SAlexander Kochetkov if (rx_buff->skb) { 589663713ebSCaesar Wang dma_unmap_single(&ndev->dev, 590663713ebSCaesar Wang dma_unmap_addr(rx_buff, addr), 591663713ebSCaesar Wang dma_unmap_len(rx_buff, len), 592663713ebSCaesar Wang DMA_FROM_DEVICE); 593b530b164SAlexander Kochetkov 594b530b164SAlexander Kochetkov /* return the sk_buff to system */ 595b530b164SAlexander Kochetkov dev_kfree_skb_irq(rx_buff->skb); 596b530b164SAlexander Kochetkov } 597b530b164SAlexander Kochetkov 598b530b164SAlexander Kochetkov rxbd->info = 0; 599b530b164SAlexander Kochetkov rxbd->data = 0; 600b530b164SAlexander Kochetkov rx_buff->skb = NULL; 601b530b164SAlexander Kochetkov } 602b530b164SAlexander Kochetkov } 603b530b164SAlexander Kochetkov 604b530b164SAlexander Kochetkov /** 605e4f2379dSAlexey Brodkin * arc_emac_stop - Close the network device. 606e4f2379dSAlexey Brodkin * @ndev: Pointer to the network device. 607e4f2379dSAlexey Brodkin * 608e4f2379dSAlexey Brodkin * This function stops the Tx queue, disables interrupts and frees the IRQ for 609e4f2379dSAlexey Brodkin * the EMAC device. 610e4f2379dSAlexey Brodkin * It also disconnects the PHY device associated with the EMAC device. 611e4f2379dSAlexey Brodkin */ 612e4f2379dSAlexey Brodkin static int arc_emac_stop(struct net_device *ndev) 613e4f2379dSAlexey Brodkin { 614e4f2379dSAlexey Brodkin struct arc_emac_priv *priv = netdev_priv(ndev); 615e4f2379dSAlexey Brodkin 616e4f2379dSAlexey Brodkin napi_disable(&priv->napi); 617e4f2379dSAlexey Brodkin netif_stop_queue(ndev); 618e4f2379dSAlexey Brodkin 619b18b7453SAlexander Kochetkov phy_stop(ndev->phydev); 620b18b7453SAlexander Kochetkov 621e4f2379dSAlexey Brodkin /* Disable interrupts */ 6227ce7679dSBeniamino Galvani arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); 623e4f2379dSAlexey Brodkin 624e4f2379dSAlexey Brodkin /* Disable EMAC */ 625e4f2379dSAlexey Brodkin arc_reg_clr(priv, R_CTRL, EN_MASK); 626e4f2379dSAlexey Brodkin 627b530b164SAlexander Kochetkov /* Return the sk_buff to system */ 628b530b164SAlexander Kochetkov arc_free_tx_queue(ndev); 629b530b164SAlexander Kochetkov arc_free_rx_queue(ndev); 630b530b164SAlexander Kochetkov 631e4f2379dSAlexey Brodkin return 0; 632e4f2379dSAlexey Brodkin } 633e4f2379dSAlexey Brodkin 634e4f2379dSAlexey Brodkin /** 635e4f2379dSAlexey Brodkin * arc_emac_stats - Get system network statistics. 636e4f2379dSAlexey Brodkin * @ndev: Pointer to net_device structure. 637e4f2379dSAlexey Brodkin * 638e4f2379dSAlexey Brodkin * Returns the address of the device statistics structure. 639e4f2379dSAlexey Brodkin * Statistics are updated in interrupt handler. 640e4f2379dSAlexey Brodkin */ 641e4f2379dSAlexey Brodkin static struct net_device_stats *arc_emac_stats(struct net_device *ndev) 642e4f2379dSAlexey Brodkin { 643e4f2379dSAlexey Brodkin struct arc_emac_priv *priv = netdev_priv(ndev); 644ff458f6fSTobias Klauser struct net_device_stats *stats = &ndev->stats; 645e4f2379dSAlexey Brodkin unsigned long miss, rxerr; 646e4f2379dSAlexey Brodkin u8 rxcrc, rxfram, rxoflow; 647e4f2379dSAlexey Brodkin 648e4f2379dSAlexey Brodkin rxerr = arc_reg_get(priv, R_RXERR); 649e4f2379dSAlexey Brodkin miss = arc_reg_get(priv, R_MISS); 650e4f2379dSAlexey Brodkin 651e4f2379dSAlexey Brodkin rxcrc = rxerr; 652e4f2379dSAlexey Brodkin rxfram = rxerr >> 8; 653e4f2379dSAlexey Brodkin rxoflow = rxerr >> 16; 654e4f2379dSAlexey Brodkin 655e4f2379dSAlexey Brodkin stats->rx_errors += miss; 656e4f2379dSAlexey Brodkin stats->rx_errors += rxcrc + rxfram + rxoflow; 657e4f2379dSAlexey Brodkin 658e4f2379dSAlexey Brodkin stats->rx_over_errors += rxoflow; 659e4f2379dSAlexey Brodkin stats->rx_frame_errors += rxfram; 660e4f2379dSAlexey Brodkin stats->rx_crc_errors += rxcrc; 661e4f2379dSAlexey Brodkin stats->rx_missed_errors += miss; 662e4f2379dSAlexey Brodkin 663e4f2379dSAlexey Brodkin return stats; 664e4f2379dSAlexey Brodkin } 665e4f2379dSAlexey Brodkin 666e4f2379dSAlexey Brodkin /** 667e4f2379dSAlexey Brodkin * arc_emac_tx - Starts the data transmission. 668e4f2379dSAlexey Brodkin * @skb: sk_buff pointer that contains data to be Transmitted. 669e4f2379dSAlexey Brodkin * @ndev: Pointer to net_device structure. 670e4f2379dSAlexey Brodkin * 671e4f2379dSAlexey Brodkin * returns: NETDEV_TX_OK, on success 672e4f2379dSAlexey Brodkin * NETDEV_TX_BUSY, if any of the descriptors are not free. 673e4f2379dSAlexey Brodkin * 674e4f2379dSAlexey Brodkin * This function is invoked from upper layers to initiate transmission. 675e4f2379dSAlexey Brodkin */ 676de37b0a5SLuc Van Oostenryck static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) 677e4f2379dSAlexey Brodkin { 678e4f2379dSAlexey Brodkin struct arc_emac_priv *priv = netdev_priv(ndev); 679e4f2379dSAlexey Brodkin unsigned int len, *txbd_curr = &priv->txbd_curr; 680ff458f6fSTobias Klauser struct net_device_stats *stats = &ndev->stats; 681e4f2379dSAlexey Brodkin __le32 *info = &priv->txbd[*txbd_curr].info; 682e4f2379dSAlexey Brodkin dma_addr_t addr; 683e4f2379dSAlexey Brodkin 684e4f2379dSAlexey Brodkin if (skb_padto(skb, ETH_ZLEN)) 685e4f2379dSAlexey Brodkin return NETDEV_TX_OK; 686e4f2379dSAlexey Brodkin 687e4f2379dSAlexey Brodkin len = max_t(unsigned int, ETH_ZLEN, skb->len); 688e4f2379dSAlexey Brodkin 68974dd40bcSBeniamino Galvani if (unlikely(!arc_emac_tx_avail(priv))) { 690e4f2379dSAlexey Brodkin netif_stop_queue(ndev); 69174dd40bcSBeniamino Galvani netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n"); 692e4f2379dSAlexey Brodkin return NETDEV_TX_BUSY; 693e4f2379dSAlexey Brodkin } 694e4f2379dSAlexey Brodkin 695e4f2379dSAlexey Brodkin addr = dma_map_single(&ndev->dev, (void *)skb->data, len, 696e4f2379dSAlexey Brodkin DMA_TO_DEVICE); 697e4f2379dSAlexey Brodkin 698e4f2379dSAlexey Brodkin if (unlikely(dma_mapping_error(&ndev->dev, addr))) { 699e4f2379dSAlexey Brodkin stats->tx_dropped++; 700e4f2379dSAlexey Brodkin stats->tx_errors++; 7010f6e8761SWei Yongjun dev_kfree_skb_any(skb); 702e4f2379dSAlexey Brodkin return NETDEV_TX_OK; 703e4f2379dSAlexey Brodkin } 704a4a1139bSAlexey Brodkin dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr); 705e4f2379dSAlexey Brodkin dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len); 706e4f2379dSAlexey Brodkin 707a4a1139bSAlexey Brodkin priv->txbd[*txbd_curr].data = cpu_to_le32(addr); 708e4f2379dSAlexey Brodkin 709e4f2379dSAlexey Brodkin /* Make sure pointer to data buffer is set */ 710e4f2379dSAlexey Brodkin wmb(); 711e4f2379dSAlexey Brodkin 71237ec274eSEric Dumazet skb_tx_timestamp(skb); 71337ec274eSEric Dumazet 714e4f2379dSAlexey Brodkin *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); 715e4f2379dSAlexey Brodkin 716c278c253SAlexander Kochetkov /* Make sure info word is set */ 717c278c253SAlexander Kochetkov wmb(); 718c278c253SAlexander Kochetkov 719c278c253SAlexander Kochetkov priv->tx_buff[*txbd_curr].skb = skb; 720c278c253SAlexander Kochetkov 721e4f2379dSAlexey Brodkin /* Increment index to point to the next BD */ 722e4f2379dSAlexey Brodkin *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; 723e4f2379dSAlexey Brodkin 72474dd40bcSBeniamino Galvani /* Ensure that tx_clean() sees the new txbd_curr before 72574dd40bcSBeniamino Galvani * checking the queue status. This prevents an unneeded wake 72674dd40bcSBeniamino Galvani * of the queue in tx_clean(). 72774dd40bcSBeniamino Galvani */ 72874dd40bcSBeniamino Galvani smp_mb(); 729e4f2379dSAlexey Brodkin 73074dd40bcSBeniamino Galvani if (!arc_emac_tx_avail(priv)) { 731e4f2379dSAlexey Brodkin netif_stop_queue(ndev); 73274dd40bcSBeniamino Galvani /* Refresh tx_dirty */ 73374dd40bcSBeniamino Galvani smp_mb(); 73474dd40bcSBeniamino Galvani if (arc_emac_tx_avail(priv)) 73574dd40bcSBeniamino Galvani netif_start_queue(ndev); 73674dd40bcSBeniamino Galvani } 737e4f2379dSAlexey Brodkin 738e4f2379dSAlexey Brodkin arc_reg_set(priv, R_STATUS, TXPL_MASK); 739e4f2379dSAlexey Brodkin 740e4f2379dSAlexey Brodkin return NETDEV_TX_OK; 741e4f2379dSAlexey Brodkin } 742e4f2379dSAlexey Brodkin 743235a251aSMax Schwarz static void arc_emac_set_address_internal(struct net_device *ndev) 744235a251aSMax Schwarz { 745235a251aSMax Schwarz struct arc_emac_priv *priv = netdev_priv(ndev); 746235a251aSMax Schwarz unsigned int addr_low, addr_hi; 747235a251aSMax Schwarz 748235a251aSMax Schwarz addr_low = le32_to_cpu(*(__le32 *)&ndev->dev_addr[0]); 749235a251aSMax Schwarz addr_hi = le16_to_cpu(*(__le16 *)&ndev->dev_addr[4]); 750235a251aSMax Schwarz 751235a251aSMax Schwarz arc_reg_set(priv, R_ADDRL, addr_low); 752235a251aSMax Schwarz arc_reg_set(priv, R_ADDRH, addr_hi); 753235a251aSMax Schwarz } 754235a251aSMax Schwarz 755e4f2379dSAlexey Brodkin /** 756e4f2379dSAlexey Brodkin * arc_emac_set_address - Set the MAC address for this device. 757e4f2379dSAlexey Brodkin * @ndev: Pointer to net_device structure. 758e4f2379dSAlexey Brodkin * @p: 6 byte Address to be written as MAC address. 759e4f2379dSAlexey Brodkin * 760e4f2379dSAlexey Brodkin * This function copies the HW address from the sockaddr structure to the 761e4f2379dSAlexey Brodkin * net_device structure and updates the address in HW. 762e4f2379dSAlexey Brodkin * 763e4f2379dSAlexey Brodkin * returns: -EBUSY if the net device is busy or 0 if the address is set 764e4f2379dSAlexey Brodkin * successfully. 765e4f2379dSAlexey Brodkin */ 766e4f2379dSAlexey Brodkin static int arc_emac_set_address(struct net_device *ndev, void *p) 767e4f2379dSAlexey Brodkin { 768e4f2379dSAlexey Brodkin struct sockaddr *addr = p; 769e4f2379dSAlexey Brodkin 770e4f2379dSAlexey Brodkin if (netif_running(ndev)) 771e4f2379dSAlexey Brodkin return -EBUSY; 772e4f2379dSAlexey Brodkin 773e4f2379dSAlexey Brodkin if (!is_valid_ether_addr(addr->sa_data)) 774e4f2379dSAlexey Brodkin return -EADDRNOTAVAIL; 775e4f2379dSAlexey Brodkin 776e4f2379dSAlexey Brodkin memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 777e4f2379dSAlexey Brodkin 778235a251aSMax Schwarz arc_emac_set_address_internal(ndev); 779e4f2379dSAlexey Brodkin 780e4f2379dSAlexey Brodkin return 0; 781e4f2379dSAlexey Brodkin } 782e4f2379dSAlexey Brodkin 78378aa0975SAlexander Kochetkov /** 78478aa0975SAlexander Kochetkov * arc_emac_restart - Restart EMAC 78578aa0975SAlexander Kochetkov * @ndev: Pointer to net_device structure. 78678aa0975SAlexander Kochetkov * 78778aa0975SAlexander Kochetkov * This function do hardware reset of EMAC in order to restore 78878aa0975SAlexander Kochetkov * network packets reception. 78978aa0975SAlexander Kochetkov */ 79078aa0975SAlexander Kochetkov static void arc_emac_restart(struct net_device *ndev) 79178aa0975SAlexander Kochetkov { 79278aa0975SAlexander Kochetkov struct arc_emac_priv *priv = netdev_priv(ndev); 79378aa0975SAlexander Kochetkov struct net_device_stats *stats = &ndev->stats; 79478aa0975SAlexander Kochetkov int i; 79578aa0975SAlexander Kochetkov 79678aa0975SAlexander Kochetkov if (net_ratelimit()) 79778aa0975SAlexander Kochetkov netdev_warn(ndev, "restarting stalled EMAC\n"); 79878aa0975SAlexander Kochetkov 79978aa0975SAlexander Kochetkov netif_stop_queue(ndev); 80078aa0975SAlexander Kochetkov 80178aa0975SAlexander Kochetkov /* Disable interrupts */ 80278aa0975SAlexander Kochetkov arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); 80378aa0975SAlexander Kochetkov 80478aa0975SAlexander Kochetkov /* Disable EMAC */ 80578aa0975SAlexander Kochetkov arc_reg_clr(priv, R_CTRL, EN_MASK); 80678aa0975SAlexander Kochetkov 80778aa0975SAlexander Kochetkov /* Return the sk_buff to system */ 80878aa0975SAlexander Kochetkov arc_free_tx_queue(ndev); 80978aa0975SAlexander Kochetkov 81078aa0975SAlexander Kochetkov /* Clean Tx BD's */ 81178aa0975SAlexander Kochetkov priv->txbd_curr = 0; 81278aa0975SAlexander Kochetkov priv->txbd_dirty = 0; 81378aa0975SAlexander Kochetkov memset(priv->txbd, 0, TX_RING_SZ); 81478aa0975SAlexander Kochetkov 81578aa0975SAlexander Kochetkov for (i = 0; i < RX_BD_NUM; i++) { 81678aa0975SAlexander Kochetkov struct arc_emac_bd *rxbd = &priv->rxbd[i]; 81778aa0975SAlexander Kochetkov unsigned int info = le32_to_cpu(rxbd->info); 81878aa0975SAlexander Kochetkov 81978aa0975SAlexander Kochetkov if (!(info & FOR_EMAC)) { 82078aa0975SAlexander Kochetkov stats->rx_errors++; 82178aa0975SAlexander Kochetkov stats->rx_dropped++; 82278aa0975SAlexander Kochetkov } 82378aa0975SAlexander Kochetkov /* Return ownership to EMAC */ 82478aa0975SAlexander Kochetkov rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); 82578aa0975SAlexander Kochetkov } 82678aa0975SAlexander Kochetkov priv->last_rx_bd = 0; 82778aa0975SAlexander Kochetkov 82878aa0975SAlexander Kochetkov /* Make sure info is visible to EMAC before enable */ 82978aa0975SAlexander Kochetkov wmb(); 83078aa0975SAlexander Kochetkov 83178aa0975SAlexander Kochetkov /* Enable interrupts */ 83278aa0975SAlexander Kochetkov arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); 83378aa0975SAlexander Kochetkov 83478aa0975SAlexander Kochetkov /* Enable EMAC */ 83578aa0975SAlexander Kochetkov arc_reg_or(priv, R_CTRL, EN_MASK); 83678aa0975SAlexander Kochetkov 83778aa0975SAlexander Kochetkov netif_start_queue(ndev); 83878aa0975SAlexander Kochetkov } 83978aa0975SAlexander Kochetkov 840e4f2379dSAlexey Brodkin static const struct net_device_ops arc_emac_netdev_ops = { 841e4f2379dSAlexey Brodkin .ndo_open = arc_emac_open, 842e4f2379dSAlexey Brodkin .ndo_stop = arc_emac_stop, 843e4f2379dSAlexey Brodkin .ndo_start_xmit = arc_emac_tx, 844e4f2379dSAlexey Brodkin .ndo_set_mac_address = arc_emac_set_address, 845e4f2379dSAlexey Brodkin .ndo_get_stats = arc_emac_stats, 846775dd682SBeniamino Galvani .ndo_set_rx_mode = arc_emac_set_rx_mode, 847*a7605370SArnd Bergmann .ndo_eth_ioctl = phy_do_ioctl_running, 8485a45e57aSBeniamino Galvani #ifdef CONFIG_NET_POLL_CONTROLLER 8495a45e57aSBeniamino Galvani .ndo_poll_controller = arc_emac_poll_controller, 8505a45e57aSBeniamino Galvani #endif 851e4f2379dSAlexey Brodkin }; 852e4f2379dSAlexey Brodkin 85323d2d9a6SRomain Perier int arc_emac_probe(struct net_device *ndev, int interface) 854e4f2379dSAlexey Brodkin { 85523d2d9a6SRomain Perier struct device *dev = ndev->dev.parent; 856f7578496SThierry Reding struct resource res_regs; 857e4f2379dSAlexey Brodkin struct device_node *phy_node; 85801dea536SPhilippe Reynes struct phy_device *phydev = NULL; 859e4f2379dSAlexey Brodkin struct arc_emac_priv *priv; 860f7578496SThierry Reding unsigned int id, clock_frequency, irq; 861e4f2379dSAlexey Brodkin int err; 862e4f2379dSAlexey Brodkin 863e4f2379dSAlexey Brodkin /* Get PHY from device tree */ 864f15f44e0SRomain Perier phy_node = of_parse_phandle(dev->of_node, "phy", 0); 865e4f2379dSAlexey Brodkin if (!phy_node) { 866f15f44e0SRomain Perier dev_err(dev, "failed to retrieve phy description from device tree\n"); 867e4f2379dSAlexey Brodkin return -ENODEV; 868e4f2379dSAlexey Brodkin } 869e4f2379dSAlexey Brodkin 870e4f2379dSAlexey Brodkin /* Get EMAC registers base address from device tree */ 871f15f44e0SRomain Perier err = of_address_to_resource(dev->of_node, 0, &res_regs); 872e4f2379dSAlexey Brodkin if (err) { 873f15f44e0SRomain Perier dev_err(dev, "failed to retrieve registers base from device tree\n"); 874a94efbd7SPeter Chen err = -ENODEV; 875a94efbd7SPeter Chen goto out_put_node; 876e4f2379dSAlexey Brodkin } 877e4f2379dSAlexey Brodkin 878e4f2379dSAlexey Brodkin /* Get IRQ from device tree */ 879f15f44e0SRomain Perier irq = irq_of_parse_and_map(dev->of_node, 0); 880f7578496SThierry Reding if (!irq) { 881f15f44e0SRomain Perier dev_err(dev, "failed to retrieve <irq> value from device tree\n"); 882a94efbd7SPeter Chen err = -ENODEV; 883a94efbd7SPeter Chen goto out_put_node; 884e4f2379dSAlexey Brodkin } 885e4f2379dSAlexey Brodkin 886e4f2379dSAlexey Brodkin ndev->netdev_ops = &arc_emac_netdev_ops; 887e4f2379dSAlexey Brodkin ndev->ethtool_ops = &arc_emac_ethtool_ops; 888e4f2379dSAlexey Brodkin ndev->watchdog_timeo = TX_TIMEOUT; 889e4f2379dSAlexey Brodkin 890e4f2379dSAlexey Brodkin priv = netdev_priv(ndev); 891f15f44e0SRomain Perier priv->dev = dev; 892e4f2379dSAlexey Brodkin 893f15f44e0SRomain Perier priv->regs = devm_ioremap_resource(dev, &res_regs); 89454447f1aSWei Yongjun if (IS_ERR(priv->regs)) { 89554447f1aSWei Yongjun err = PTR_ERR(priv->regs); 89654447f1aSWei Yongjun goto out_put_node; 89754447f1aSWei Yongjun } 898663713ebSCaesar Wang 899f15f44e0SRomain Perier dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); 900e4f2379dSAlexey Brodkin 90123d2d9a6SRomain Perier if (priv->clk) { 90223d2d9a6SRomain Perier err = clk_prepare_enable(priv->clk); 90323d2d9a6SRomain Perier if (err) { 90423d2d9a6SRomain Perier dev_err(dev, "failed to enable clock\n"); 905a94efbd7SPeter Chen goto out_put_node; 90623d2d9a6SRomain Perier } 90723d2d9a6SRomain Perier 90823d2d9a6SRomain Perier clock_frequency = clk_get_rate(priv->clk); 90923d2d9a6SRomain Perier } else { 91088154c96SHeiko Stübner /* Get CPU clock frequency from device tree */ 911f15f44e0SRomain Perier if (of_property_read_u32(dev->of_node, "clock-frequency", 91288154c96SHeiko Stübner &clock_frequency)) { 913f15f44e0SRomain Perier dev_err(dev, "failed to retrieve <clock-frequency> from device tree\n"); 914a94efbd7SPeter Chen err = -EINVAL; 915a94efbd7SPeter Chen goto out_put_node; 91688154c96SHeiko Stübner } 91788154c96SHeiko Stübner } 91888154c96SHeiko Stübner 919e4f2379dSAlexey Brodkin id = arc_reg_get(priv, R_ID); 920e4f2379dSAlexey Brodkin 921e4f2379dSAlexey Brodkin /* Check for EMAC revision 5 or 7, magic number */ 922e4f2379dSAlexey Brodkin if (!(id == 0x0005fd02 || id == 0x0007fd02)) { 923f15f44e0SRomain Perier dev_err(dev, "ARC EMAC not detected, id=0x%x\n", id); 924e4f2379dSAlexey Brodkin err = -ENODEV; 92588154c96SHeiko Stübner goto out_clken; 926e4f2379dSAlexey Brodkin } 927f15f44e0SRomain Perier dev_info(dev, "ARC EMAC detected with id: 0x%x\n", id); 928e4f2379dSAlexey Brodkin 929e4f2379dSAlexey Brodkin /* Set poll rate so that it polls every 1 ms */ 930e4f2379dSAlexey Brodkin arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000); 931e4f2379dSAlexey Brodkin 932f7578496SThierry Reding ndev->irq = irq; 933f15f44e0SRomain Perier dev_info(dev, "IRQ is %d\n", ndev->irq); 934e4f2379dSAlexey Brodkin 935e4f2379dSAlexey Brodkin /* Register interrupt handler for device */ 936f15f44e0SRomain Perier err = devm_request_irq(dev, ndev->irq, arc_emac_intr, 0, 937e4f2379dSAlexey Brodkin ndev->name, ndev); 938e4f2379dSAlexey Brodkin if (err) { 939f15f44e0SRomain Perier dev_err(dev, "could not allocate IRQ\n"); 94088154c96SHeiko Stübner goto out_clken; 941e4f2379dSAlexey Brodkin } 942e4f2379dSAlexey Brodkin 943e4f2379dSAlexey Brodkin /* Get MAC address from device tree */ 94483216e39SMichael Walle err = of_get_mac_address(dev->of_node, ndev->dev_addr); 94583216e39SMichael Walle if (err) 94699470819SLuka Perkov eth_hw_addr_random(ndev); 947e4f2379dSAlexey Brodkin 948235a251aSMax Schwarz arc_emac_set_address_internal(ndev); 949f15f44e0SRomain Perier dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr); 950e4f2379dSAlexey Brodkin 951e4f2379dSAlexey Brodkin /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */ 952f15f44e0SRomain Perier priv->rxbd = dmam_alloc_coherent(dev, RX_RING_SZ + TX_RING_SZ, 953e4f2379dSAlexey Brodkin &priv->rxbd_dma, GFP_KERNEL); 954e4f2379dSAlexey Brodkin 955e4f2379dSAlexey Brodkin if (!priv->rxbd) { 956f15f44e0SRomain Perier dev_err(dev, "failed to allocate data buffers\n"); 957e4f2379dSAlexey Brodkin err = -ENOMEM; 95888154c96SHeiko Stübner goto out_clken; 959e4f2379dSAlexey Brodkin } 960e4f2379dSAlexey Brodkin 961e4f2379dSAlexey Brodkin priv->txbd = priv->rxbd + RX_BD_NUM; 962e4f2379dSAlexey Brodkin 963e4f2379dSAlexey Brodkin priv->txbd_dma = priv->rxbd_dma + RX_RING_SZ; 964f15f44e0SRomain Perier dev_dbg(dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n", 965e4f2379dSAlexey Brodkin (unsigned int)priv->rxbd_dma, (unsigned int)priv->txbd_dma); 966e4f2379dSAlexey Brodkin 96793e91b3dSRomain Perier err = arc_mdio_probe(priv); 968e4f2379dSAlexey Brodkin if (err) { 969f15f44e0SRomain Perier dev_err(dev, "failed to probe MII bus\n"); 97088154c96SHeiko Stübner goto out_clken; 971e4f2379dSAlexey Brodkin } 972e4f2379dSAlexey Brodkin 97301dea536SPhilippe Reynes phydev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, 97423d2d9a6SRomain Perier interface); 97501dea536SPhilippe Reynes if (!phydev) { 976f15f44e0SRomain Perier dev_err(dev, "of_phy_connect() failed\n"); 977e4f2379dSAlexey Brodkin err = -ENODEV; 978796bec1eSHeiko Stübner goto out_mdio; 979e4f2379dSAlexey Brodkin } 980e4f2379dSAlexey Brodkin 981f15f44e0SRomain Perier dev_info(dev, "connected to %s phy with id 0x%x\n", 98201dea536SPhilippe Reynes phydev->drv->name, phydev->phy_id); 983e4f2379dSAlexey Brodkin 984e4f2379dSAlexey Brodkin netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT); 985e4f2379dSAlexey Brodkin 986e4f2379dSAlexey Brodkin err = register_netdev(ndev); 987e4f2379dSAlexey Brodkin if (err) { 988f15f44e0SRomain Perier dev_err(dev, "failed to register network device\n"); 989796bec1eSHeiko Stübner goto out_netif_api; 990e4f2379dSAlexey Brodkin } 991e4f2379dSAlexey Brodkin 992a94efbd7SPeter Chen of_node_put(phy_node); 993e4f2379dSAlexey Brodkin return 0; 994e4f2379dSAlexey Brodkin 995796bec1eSHeiko Stübner out_netif_api: 996796bec1eSHeiko Stübner netif_napi_del(&priv->napi); 99701dea536SPhilippe Reynes phy_disconnect(phydev); 998796bec1eSHeiko Stübner out_mdio: 999796bec1eSHeiko Stübner arc_mdio_remove(priv); 100088154c96SHeiko Stübner out_clken: 100123d2d9a6SRomain Perier if (priv->clk) 100288154c96SHeiko Stübner clk_disable_unprepare(priv->clk); 1003a94efbd7SPeter Chen out_put_node: 1004a94efbd7SPeter Chen of_node_put(phy_node); 1005a94efbd7SPeter Chen 1006e4f2379dSAlexey Brodkin return err; 1007e4f2379dSAlexey Brodkin } 100823d2d9a6SRomain Perier EXPORT_SYMBOL_GPL(arc_emac_probe); 1009e4f2379dSAlexey Brodkin 101023d2d9a6SRomain Perier int arc_emac_remove(struct net_device *ndev) 1011e4f2379dSAlexey Brodkin { 1012e4f2379dSAlexey Brodkin struct arc_emac_priv *priv = netdev_priv(ndev); 1013e4f2379dSAlexey Brodkin 101401dea536SPhilippe Reynes phy_disconnect(ndev->phydev); 1015e4f2379dSAlexey Brodkin arc_mdio_remove(priv); 1016e4f2379dSAlexey Brodkin unregister_netdev(ndev); 1017e4f2379dSAlexey Brodkin netif_napi_del(&priv->napi); 101888154c96SHeiko Stübner 1019663713ebSCaesar Wang if (!IS_ERR(priv->clk)) 102088154c96SHeiko Stübner clk_disable_unprepare(priv->clk); 1021e4f2379dSAlexey Brodkin 1022e4f2379dSAlexey Brodkin return 0; 1023e4f2379dSAlexey Brodkin } 102423d2d9a6SRomain Perier EXPORT_SYMBOL_GPL(arc_emac_remove); 1025e4f2379dSAlexey Brodkin 1026e4f2379dSAlexey Brodkin MODULE_AUTHOR("Alexey Brodkin <abrodkin@synopsys.com>"); 1027e4f2379dSAlexey Brodkin MODULE_DESCRIPTION("ARC EMAC driver"); 1028e4f2379dSAlexey Brodkin MODULE_LICENSE("GPL"); 1029