12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 20977f817SJan Ceuleers /* drivers/net/ethernet/freescale/gianfar.c 3ec21e2ecSJeff Kirsher * 4ec21e2ecSJeff Kirsher * Gianfar Ethernet Driver 5ec21e2ecSJeff Kirsher * This driver is designed for the non-CPM ethernet controllers 6ec21e2ecSJeff Kirsher * on the 85xx and 83xx family of integrated processors 7ec21e2ecSJeff Kirsher * Based on 8260_io/fcc_enet.c 8ec21e2ecSJeff Kirsher * 9ec21e2ecSJeff Kirsher * Author: Andy Fleming 10ec21e2ecSJeff Kirsher * Maintainer: Kumar Gala 11ec21e2ecSJeff Kirsher * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 12ec21e2ecSJeff Kirsher * 1320862788SClaudiu Manoil * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc. 14ec21e2ecSJeff Kirsher * Copyright 2007 MontaVista Software, Inc. 15ec21e2ecSJeff Kirsher * 16ec21e2ecSJeff Kirsher * Gianfar: AKA Lambda Draconis, "Dragon" 17ec21e2ecSJeff Kirsher * RA 11 31 24.2 18ec21e2ecSJeff Kirsher * Dec +69 19 52 19ec21e2ecSJeff Kirsher * V 3.84 20ec21e2ecSJeff Kirsher * B-V +1.62 21ec21e2ecSJeff Kirsher * 22ec21e2ecSJeff Kirsher * Theory of operation 23ec21e2ecSJeff Kirsher * 24ec21e2ecSJeff Kirsher * The driver is initialized through of_device. Configuration information 25ec21e2ecSJeff Kirsher * is therefore conveyed through an OF-style device tree. 26ec21e2ecSJeff Kirsher * 27ec21e2ecSJeff Kirsher * The Gianfar Ethernet Controller uses a ring of buffer 28ec21e2ecSJeff Kirsher * descriptors. The beginning is indicated by a register 29ec21e2ecSJeff Kirsher * pointing to the physical address of the start of the ring. 30ec21e2ecSJeff Kirsher * The end is determined by a "wrap" bit being set in the 31ec21e2ecSJeff Kirsher * last descriptor of the ring. 32ec21e2ecSJeff Kirsher * 33ec21e2ecSJeff Kirsher * When a packet is received, the RXF bit in the 34ec21e2ecSJeff Kirsher * IEVENT register is set, triggering an interrupt when the 35ec21e2ecSJeff Kirsher * corresponding bit in the IMASK register is also set (if 36ec21e2ecSJeff Kirsher * interrupt coalescing is active, then the interrupt may not 37ec21e2ecSJeff Kirsher * happen immediately, but will wait until either a set number 38ec21e2ecSJeff Kirsher * of frames or amount of time have passed). In NAPI, the 39ec21e2ecSJeff Kirsher * interrupt handler will signal there is work to be done, and 40ec21e2ecSJeff Kirsher * exit. This method will start at the last known empty 41ec21e2ecSJeff Kirsher * descriptor, and process every subsequent descriptor until there 42ec21e2ecSJeff Kirsher * are none left with data (NAPI will stop after a set number of 43ec21e2ecSJeff Kirsher * packets to give time to other tasks, but will eventually 44ec21e2ecSJeff Kirsher * process all the packets). The data arrives inside a 45ec21e2ecSJeff Kirsher * pre-allocated skb, and so after the skb is passed up to the 46ec21e2ecSJeff Kirsher * stack, a new skb must be allocated, and the address field in 47ec21e2ecSJeff Kirsher * the buffer descriptor must be updated to indicate this new 48ec21e2ecSJeff Kirsher * skb. 49ec21e2ecSJeff Kirsher * 50ec21e2ecSJeff Kirsher * When the kernel requests that a packet be transmitted, the 51ec21e2ecSJeff Kirsher * driver starts where it left off last time, and points the 52ec21e2ecSJeff Kirsher * descriptor at the buffer which was passed in. The driver 53ec21e2ecSJeff Kirsher * then informs the DMA engine that there are packets ready to 54ec21e2ecSJeff Kirsher * be transmitted. Once the controller is finished transmitting 55ec21e2ecSJeff Kirsher * the packet, an interrupt may be triggered (under the same 56ec21e2ecSJeff Kirsher * conditions as for reception, but depending on the TXF bit). 57ec21e2ecSJeff Kirsher * The driver then cleans up the buffer. 58ec21e2ecSJeff Kirsher */ 59ec21e2ecSJeff Kirsher 60ec21e2ecSJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 61ec21e2ecSJeff Kirsher 62ec21e2ecSJeff Kirsher #include <linux/kernel.h> 63ec21e2ecSJeff Kirsher #include <linux/string.h> 64ec21e2ecSJeff Kirsher #include <linux/errno.h> 65ec21e2ecSJeff Kirsher #include <linux/unistd.h> 66ec21e2ecSJeff Kirsher #include <linux/slab.h> 67ec21e2ecSJeff Kirsher #include <linux/interrupt.h> 68ec21e2ecSJeff Kirsher #include <linux/delay.h> 69ec21e2ecSJeff Kirsher #include <linux/netdevice.h> 70ec21e2ecSJeff Kirsher #include <linux/etherdevice.h> 71ec21e2ecSJeff Kirsher #include <linux/skbuff.h> 72ec21e2ecSJeff Kirsher #include <linux/if_vlan.h> 73ec21e2ecSJeff Kirsher #include <linux/spinlock.h> 74ec21e2ecSJeff Kirsher #include <linux/mm.h> 755af50730SRob Herring #include <linux/of_address.h> 765af50730SRob Herring #include <linux/of_irq.h> 77ec21e2ecSJeff Kirsher #include <linux/of_mdio.h> 78ec21e2ecSJeff Kirsher #include <linux/of_platform.h> 79ec21e2ecSJeff Kirsher #include <linux/ip.h> 80ec21e2ecSJeff Kirsher #include <linux/tcp.h> 81ec21e2ecSJeff Kirsher #include <linux/udp.h> 82ec21e2ecSJeff Kirsher #include <linux/in.h> 83ec21e2ecSJeff Kirsher #include <linux/net_tstamp.h> 84ec21e2ecSJeff Kirsher 85ec21e2ecSJeff Kirsher #include <asm/io.h> 86d6ef0bccSClaudiu Manoil #ifdef CONFIG_PPC 87ec21e2ecSJeff Kirsher #include <asm/reg.h> 882969b1f7SClaudiu Manoil #include <asm/mpc85xx.h> 89d6ef0bccSClaudiu Manoil #endif 90ec21e2ecSJeff Kirsher #include <asm/irq.h> 917c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 92ec21e2ecSJeff Kirsher #include <linux/module.h> 93ec21e2ecSJeff Kirsher #include <linux/dma-mapping.h> 94ec21e2ecSJeff Kirsher #include <linux/crc32.h> 95ec21e2ecSJeff Kirsher #include <linux/mii.h> 96ec21e2ecSJeff Kirsher #include <linux/phy.h> 97ec21e2ecSJeff Kirsher #include <linux/phy_fixed.h> 98ec21e2ecSJeff Kirsher #include <linux/of.h> 99ec21e2ecSJeff Kirsher #include <linux/of_net.h> 100ec21e2ecSJeff Kirsher 101ec21e2ecSJeff Kirsher #include "gianfar.h" 102ec21e2ecSJeff Kirsher 1038fcc6033SAbhimanyu #define TX_TIMEOUT (5*HZ) 104ec21e2ecSJeff Kirsher 105ec21e2ecSJeff Kirsher MODULE_AUTHOR("Freescale Semiconductor, Inc"); 106ec21e2ecSJeff Kirsher MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 107ec21e2ecSJeff Kirsher MODULE_LICENSE("GPL"); 108ec21e2ecSJeff Kirsher 109ec21e2ecSJeff Kirsher static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 110ec21e2ecSJeff Kirsher dma_addr_t buf) 111ec21e2ecSJeff Kirsher { 112ec21e2ecSJeff Kirsher u32 lstatus; 113ec21e2ecSJeff Kirsher 114a7312d58SClaudiu Manoil bdp->bufPtr = cpu_to_be32(buf); 115ec21e2ecSJeff Kirsher 116ec21e2ecSJeff Kirsher lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); 117ec21e2ecSJeff Kirsher if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) 118ec21e2ecSJeff Kirsher lstatus |= BD_LFLAG(RXBD_WRAP); 119ec21e2ecSJeff Kirsher 120d55398baSClaudiu Manoil gfar_wmb(); 121ec21e2ecSJeff Kirsher 122a7312d58SClaudiu Manoil bdp->lstatus = cpu_to_be32(lstatus); 123ec21e2ecSJeff Kirsher } 124ec21e2ecSJeff Kirsher 125ec21e2ecSJeff Kirsher static void gfar_init_tx_rx_base(struct gfar_private *priv) 126ec21e2ecSJeff Kirsher { 127ec21e2ecSJeff Kirsher struct gfar __iomem *regs = priv->gfargrp[0].regs; 128ec21e2ecSJeff Kirsher u32 __iomem *baddr; 129ec21e2ecSJeff Kirsher int i; 130ec21e2ecSJeff Kirsher 131ec21e2ecSJeff Kirsher baddr = ®s->tbase0; 132ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_tx_queues; i++) { 133ec21e2ecSJeff Kirsher gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); 134ec21e2ecSJeff Kirsher baddr += 2; 135ec21e2ecSJeff Kirsher } 136ec21e2ecSJeff Kirsher 137ec21e2ecSJeff Kirsher baddr = ®s->rbase0; 138ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_rx_queues; i++) { 139ec21e2ecSJeff Kirsher gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); 140ec21e2ecSJeff Kirsher baddr += 2; 141ec21e2ecSJeff Kirsher } 142ec21e2ecSJeff Kirsher } 143ec21e2ecSJeff Kirsher 14445b679c9SMatei Pavaluca static void gfar_init_rqprm(struct gfar_private *priv) 14545b679c9SMatei Pavaluca { 14645b679c9SMatei Pavaluca struct gfar __iomem *regs = priv->gfargrp[0].regs; 14745b679c9SMatei Pavaluca u32 __iomem *baddr; 14845b679c9SMatei Pavaluca int i; 14945b679c9SMatei Pavaluca 15045b679c9SMatei Pavaluca baddr = ®s->rqprm0; 15145b679c9SMatei Pavaluca for (i = 0; i < priv->num_rx_queues; i++) { 15245b679c9SMatei Pavaluca gfar_write(baddr, priv->rx_queue[i]->rx_ring_size | 15345b679c9SMatei Pavaluca (DEFAULT_RX_LFC_THR << FBTHR_SHIFT)); 15445b679c9SMatei Pavaluca baddr++; 15545b679c9SMatei Pavaluca } 15645b679c9SMatei Pavaluca } 15745b679c9SMatei Pavaluca 15875354148SClaudiu Manoil static void gfar_rx_offload_en(struct gfar_private *priv) 15988302648SClaudiu Manoil { 16088302648SClaudiu Manoil /* set this when rx hw offload (TOE) functions are being used */ 16188302648SClaudiu Manoil priv->uses_rxfcb = 0; 16288302648SClaudiu Manoil 16388302648SClaudiu Manoil if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) 16488302648SClaudiu Manoil priv->uses_rxfcb = 1; 16588302648SClaudiu Manoil 16615bf176dSClaudiu Manoil if (priv->hwts_rx_en || priv->rx_filer_enable) 16788302648SClaudiu Manoil priv->uses_rxfcb = 1; 16888302648SClaudiu Manoil } 16988302648SClaudiu Manoil 170a328ac92SClaudiu Manoil static void gfar_mac_rx_config(struct gfar_private *priv) 171ec21e2ecSJeff Kirsher { 172ec21e2ecSJeff Kirsher struct gfar __iomem *regs = priv->gfargrp[0].regs; 173ec21e2ecSJeff Kirsher u32 rctrl = 0; 174ec21e2ecSJeff Kirsher 175ec21e2ecSJeff Kirsher if (priv->rx_filer_enable) { 17615bf176dSClaudiu Manoil rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; 177ec21e2ecSJeff Kirsher /* Program the RIR0 reg with the required distribution */ 17871ff9e3dSClaudiu Manoil gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); 179ec21e2ecSJeff Kirsher } 180ec21e2ecSJeff Kirsher 181f5ae6279SClaudiu Manoil /* Restore PROMISC mode */ 182a328ac92SClaudiu Manoil if (priv->ndev->flags & IFF_PROMISC) 183f5ae6279SClaudiu Manoil rctrl |= RCTRL_PROM; 184f5ae6279SClaudiu Manoil 18588302648SClaudiu Manoil if (priv->ndev->features & NETIF_F_RXCSUM) 186ec21e2ecSJeff Kirsher rctrl |= RCTRL_CHECKSUMMING; 187ec21e2ecSJeff Kirsher 18888302648SClaudiu Manoil if (priv->extended_hash) 18988302648SClaudiu Manoil rctrl |= RCTRL_EXTHASH | RCTRL_EMEN; 190ec21e2ecSJeff Kirsher 191ec21e2ecSJeff Kirsher if (priv->padding) { 192ec21e2ecSJeff Kirsher rctrl &= ~RCTRL_PAL_MASK; 193ec21e2ecSJeff Kirsher rctrl |= RCTRL_PADDING(priv->padding); 194ec21e2ecSJeff Kirsher } 195ec21e2ecSJeff Kirsher 196ec21e2ecSJeff Kirsher /* Enable HW time stamping if requested from user space */ 19788302648SClaudiu Manoil if (priv->hwts_rx_en) 198ec21e2ecSJeff Kirsher rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; 199ec21e2ecSJeff Kirsher 20088302648SClaudiu Manoil if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) 201ec21e2ecSJeff Kirsher rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; 202ec21e2ecSJeff Kirsher 20345b679c9SMatei Pavaluca /* Clear the LFC bit */ 20445b679c9SMatei Pavaluca gfar_write(®s->rctrl, rctrl); 20545b679c9SMatei Pavaluca /* Init flow control threshold values */ 20645b679c9SMatei Pavaluca gfar_init_rqprm(priv); 20745b679c9SMatei Pavaluca gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL); 20845b679c9SMatei Pavaluca rctrl |= RCTRL_LFC; 20945b679c9SMatei Pavaluca 210ec21e2ecSJeff Kirsher /* Init rctrl based on our settings */ 211ec21e2ecSJeff Kirsher gfar_write(®s->rctrl, rctrl); 212a328ac92SClaudiu Manoil } 213ec21e2ecSJeff Kirsher 214a328ac92SClaudiu Manoil static void gfar_mac_tx_config(struct gfar_private *priv) 215a328ac92SClaudiu Manoil { 216a328ac92SClaudiu Manoil struct gfar __iomem *regs = priv->gfargrp[0].regs; 217a328ac92SClaudiu Manoil u32 tctrl = 0; 218a328ac92SClaudiu Manoil 219a328ac92SClaudiu Manoil if (priv->ndev->features & NETIF_F_IP_CSUM) 220ec21e2ecSJeff Kirsher tctrl |= TCTRL_INIT_CSUM; 221ec21e2ecSJeff Kirsher 222b98b8babSClaudiu Manoil if (priv->prio_sched_en) 223ec21e2ecSJeff Kirsher tctrl |= TCTRL_TXSCHED_PRIO; 224b98b8babSClaudiu Manoil else { 225b98b8babSClaudiu Manoil tctrl |= TCTRL_TXSCHED_WRRS; 226b98b8babSClaudiu Manoil gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT); 227b98b8babSClaudiu Manoil gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT); 228b98b8babSClaudiu Manoil } 229ec21e2ecSJeff Kirsher 23088302648SClaudiu Manoil if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) 23188302648SClaudiu Manoil tctrl |= TCTRL_VLINS; 23288302648SClaudiu Manoil 233ec21e2ecSJeff Kirsher gfar_write(®s->tctrl, tctrl); 234ec21e2ecSJeff Kirsher } 235ec21e2ecSJeff Kirsher 236f19015baSClaudiu Manoil static void gfar_configure_coalescing(struct gfar_private *priv, 237f19015baSClaudiu Manoil unsigned long tx_mask, unsigned long rx_mask) 238f19015baSClaudiu Manoil { 239f19015baSClaudiu Manoil struct gfar __iomem *regs = priv->gfargrp[0].regs; 240f19015baSClaudiu Manoil u32 __iomem *baddr; 241f19015baSClaudiu Manoil 242f19015baSClaudiu Manoil if (priv->mode == MQ_MG_MODE) { 243f19015baSClaudiu Manoil int i = 0; 244f19015baSClaudiu Manoil 245f19015baSClaudiu Manoil baddr = ®s->txic0; 246f19015baSClaudiu Manoil for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { 247f19015baSClaudiu Manoil gfar_write(baddr + i, 0); 248f19015baSClaudiu Manoil if (likely(priv->tx_queue[i]->txcoalescing)) 249f19015baSClaudiu Manoil gfar_write(baddr + i, priv->tx_queue[i]->txic); 250f19015baSClaudiu Manoil } 251f19015baSClaudiu Manoil 252f19015baSClaudiu Manoil baddr = ®s->rxic0; 253f19015baSClaudiu Manoil for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { 254f19015baSClaudiu Manoil gfar_write(baddr + i, 0); 255f19015baSClaudiu Manoil if (likely(priv->rx_queue[i]->rxcoalescing)) 256f19015baSClaudiu Manoil gfar_write(baddr + i, priv->rx_queue[i]->rxic); 257f19015baSClaudiu Manoil } 258f19015baSClaudiu Manoil } else { 259f19015baSClaudiu Manoil /* Backward compatible case -- even if we enable 260f19015baSClaudiu Manoil * multiple queues, there's only single reg to program 261f19015baSClaudiu Manoil */ 262f19015baSClaudiu Manoil gfar_write(®s->txic, 0); 263f19015baSClaudiu Manoil if (likely(priv->tx_queue[0]->txcoalescing)) 264f19015baSClaudiu Manoil gfar_write(®s->txic, priv->tx_queue[0]->txic); 265f19015baSClaudiu Manoil 266f19015baSClaudiu Manoil gfar_write(®s->rxic, 0); 267f19015baSClaudiu Manoil if (unlikely(priv->rx_queue[0]->rxcoalescing)) 268f19015baSClaudiu Manoil gfar_write(®s->rxic, priv->rx_queue[0]->rxic); 269f19015baSClaudiu Manoil } 270f19015baSClaudiu Manoil } 271f19015baSClaudiu Manoil 2727ad38784SArseny Solokha static void gfar_configure_coalescing_all(struct gfar_private *priv) 273f19015baSClaudiu Manoil { 274f19015baSClaudiu Manoil gfar_configure_coalescing(priv, 0xFF, 0xFF); 275f19015baSClaudiu Manoil } 276f19015baSClaudiu Manoil 277d59a24fdSEsben Haabendal static void gfar_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 278ec21e2ecSJeff Kirsher { 279ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(dev); 2803a2e16c8SJan Ceuleers int i; 281ec21e2ecSJeff Kirsher 282ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_rx_queues; i++) { 283d59a24fdSEsben Haabendal stats->rx_packets += priv->rx_queue[i]->stats.rx_packets; 284d59a24fdSEsben Haabendal stats->rx_bytes += priv->rx_queue[i]->stats.rx_bytes; 285d59a24fdSEsben Haabendal stats->rx_dropped += priv->rx_queue[i]->stats.rx_dropped; 286ec21e2ecSJeff Kirsher } 287ec21e2ecSJeff Kirsher 288ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_tx_queues; i++) { 289d59a24fdSEsben Haabendal stats->tx_bytes += priv->tx_queue[i]->stats.tx_bytes; 290d59a24fdSEsben Haabendal stats->tx_packets += priv->tx_queue[i]->stats.tx_packets; 291ec21e2ecSJeff Kirsher } 292ec21e2ecSJeff Kirsher } 293ec21e2ecSJeff Kirsher 2947d993c5fSArseny Solokha /* Set the appropriate hash bit for the given addr */ 2957d993c5fSArseny Solokha /* The algorithm works like so: 2967d993c5fSArseny Solokha * 1) Take the Destination Address (ie the multicast address), and 2977d993c5fSArseny Solokha * do a CRC on it (little endian), and reverse the bits of the 2987d993c5fSArseny Solokha * result. 2997d993c5fSArseny Solokha * 2) Use the 8 most significant bits as a hash into a 256-entry 3007d993c5fSArseny Solokha * table. The table is controlled through 8 32-bit registers: 3017d993c5fSArseny Solokha * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is 3027d993c5fSArseny Solokha * gaddr7. This means that the 3 most significant bits in the 3037d993c5fSArseny Solokha * hash index which gaddr register to use, and the 5 other bits 3047d993c5fSArseny Solokha * indicate which bit (assuming an IBM numbering scheme, which 3057d993c5fSArseny Solokha * for PowerPC (tm) is usually the case) in the register holds 3067d993c5fSArseny Solokha * the entry. 3077d993c5fSArseny Solokha */ 3087d993c5fSArseny Solokha static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) 3097d993c5fSArseny Solokha { 3107d993c5fSArseny Solokha u32 tempval; 3117d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(dev); 3127d993c5fSArseny Solokha u32 result = ether_crc(ETH_ALEN, addr); 3137d993c5fSArseny Solokha int width = priv->hash_width; 3147d993c5fSArseny Solokha u8 whichbit = (result >> (32 - width)) & 0x1f; 3157d993c5fSArseny Solokha u8 whichreg = result >> (32 - width + 5); 3167d993c5fSArseny Solokha u32 value = (1 << (31-whichbit)); 3177d993c5fSArseny Solokha 3187d993c5fSArseny Solokha tempval = gfar_read(priv->hash_regs[whichreg]); 3197d993c5fSArseny Solokha tempval |= value; 3207d993c5fSArseny Solokha gfar_write(priv->hash_regs[whichreg], tempval); 3217d993c5fSArseny Solokha } 3227d993c5fSArseny Solokha 3237d993c5fSArseny Solokha /* There are multiple MAC Address register pairs on some controllers 3247d993c5fSArseny Solokha * This function sets the numth pair to a given address 3257d993c5fSArseny Solokha */ 3267d993c5fSArseny Solokha static void gfar_set_mac_for_addr(struct net_device *dev, int num, 3277d993c5fSArseny Solokha const u8 *addr) 3287d993c5fSArseny Solokha { 3297d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(dev); 3307d993c5fSArseny Solokha struct gfar __iomem *regs = priv->gfargrp[0].regs; 3317d993c5fSArseny Solokha u32 tempval; 3327d993c5fSArseny Solokha u32 __iomem *macptr = ®s->macstnaddr1; 3337d993c5fSArseny Solokha 3347d993c5fSArseny Solokha macptr += num*2; 3357d993c5fSArseny Solokha 3367d993c5fSArseny Solokha /* For a station address of 0x12345678ABCD in transmission 3377d993c5fSArseny Solokha * order (BE), MACnADDR1 is set to 0xCDAB7856 and 3387d993c5fSArseny Solokha * MACnADDR2 is set to 0x34120000. 3397d993c5fSArseny Solokha */ 3407d993c5fSArseny Solokha tempval = (addr[5] << 24) | (addr[4] << 16) | 3417d993c5fSArseny Solokha (addr[3] << 8) | addr[2]; 3427d993c5fSArseny Solokha 3437d993c5fSArseny Solokha gfar_write(macptr, tempval); 3447d993c5fSArseny Solokha 3457d993c5fSArseny Solokha tempval = (addr[1] << 24) | (addr[0] << 16); 3467d993c5fSArseny Solokha 3477d993c5fSArseny Solokha gfar_write(macptr+1, tempval); 3487d993c5fSArseny Solokha } 3497d993c5fSArseny Solokha 3503d23a05cSClaudiu Manoil static int gfar_set_mac_addr(struct net_device *dev, void *p) 3513d23a05cSClaudiu Manoil { 352bff5b625SClaudiu Manoil int ret; 353bff5b625SClaudiu Manoil 354bff5b625SClaudiu Manoil ret = eth_mac_addr(dev, p); 355bff5b625SClaudiu Manoil if (ret) 356bff5b625SClaudiu Manoil return ret; 3573d23a05cSClaudiu Manoil 3583d23a05cSClaudiu Manoil gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 3593d23a05cSClaudiu Manoil 3603d23a05cSClaudiu Manoil return 0; 3613d23a05cSClaudiu Manoil } 3623d23a05cSClaudiu Manoil 363efeddce7SClaudiu Manoil static void gfar_ints_disable(struct gfar_private *priv) 364efeddce7SClaudiu Manoil { 365efeddce7SClaudiu Manoil int i; 366efeddce7SClaudiu Manoil for (i = 0; i < priv->num_grps; i++) { 367efeddce7SClaudiu Manoil struct gfar __iomem *regs = priv->gfargrp[i].regs; 368efeddce7SClaudiu Manoil /* Clear IEVENT */ 369efeddce7SClaudiu Manoil gfar_write(®s->ievent, IEVENT_INIT_CLEAR); 370efeddce7SClaudiu Manoil 371efeddce7SClaudiu Manoil /* Initialize IMASK */ 372efeddce7SClaudiu Manoil gfar_write(®s->imask, IMASK_INIT_CLEAR); 373efeddce7SClaudiu Manoil } 374efeddce7SClaudiu Manoil } 375efeddce7SClaudiu Manoil 376efeddce7SClaudiu Manoil static void gfar_ints_enable(struct gfar_private *priv) 377efeddce7SClaudiu Manoil { 378efeddce7SClaudiu Manoil int i; 379efeddce7SClaudiu Manoil for (i = 0; i < priv->num_grps; i++) { 380efeddce7SClaudiu Manoil struct gfar __iomem *regs = priv->gfargrp[i].regs; 381efeddce7SClaudiu Manoil /* Unmask the interrupts we look for */ 382efeddce7SClaudiu Manoil gfar_write(®s->imask, IMASK_DEFAULT); 383efeddce7SClaudiu Manoil } 384efeddce7SClaudiu Manoil } 385efeddce7SClaudiu Manoil 38620862788SClaudiu Manoil static int gfar_alloc_tx_queues(struct gfar_private *priv) 38720862788SClaudiu Manoil { 38820862788SClaudiu Manoil int i; 38920862788SClaudiu Manoil 39020862788SClaudiu Manoil for (i = 0; i < priv->num_tx_queues; i++) { 39120862788SClaudiu Manoil priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), 39220862788SClaudiu Manoil GFP_KERNEL); 39320862788SClaudiu Manoil if (!priv->tx_queue[i]) 39420862788SClaudiu Manoil return -ENOMEM; 39520862788SClaudiu Manoil 39620862788SClaudiu Manoil priv->tx_queue[i]->tx_skbuff = NULL; 39720862788SClaudiu Manoil priv->tx_queue[i]->qindex = i; 39820862788SClaudiu Manoil priv->tx_queue[i]->dev = priv->ndev; 39920862788SClaudiu Manoil spin_lock_init(&(priv->tx_queue[i]->txlock)); 40020862788SClaudiu Manoil } 40120862788SClaudiu Manoil return 0; 40220862788SClaudiu Manoil } 40320862788SClaudiu Manoil 40420862788SClaudiu Manoil static int gfar_alloc_rx_queues(struct gfar_private *priv) 40520862788SClaudiu Manoil { 40620862788SClaudiu Manoil int i; 40720862788SClaudiu Manoil 40820862788SClaudiu Manoil for (i = 0; i < priv->num_rx_queues; i++) { 40920862788SClaudiu Manoil priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), 41020862788SClaudiu Manoil GFP_KERNEL); 41120862788SClaudiu Manoil if (!priv->rx_queue[i]) 41220862788SClaudiu Manoil return -ENOMEM; 41320862788SClaudiu Manoil 41420862788SClaudiu Manoil priv->rx_queue[i]->qindex = i; 415f23223f1SClaudiu Manoil priv->rx_queue[i]->ndev = priv->ndev; 41620862788SClaudiu Manoil } 41720862788SClaudiu Manoil return 0; 41820862788SClaudiu Manoil } 41920862788SClaudiu Manoil 42020862788SClaudiu Manoil static void gfar_free_tx_queues(struct gfar_private *priv) 421ec21e2ecSJeff Kirsher { 4223a2e16c8SJan Ceuleers int i; 423ec21e2ecSJeff Kirsher 424ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_tx_queues; i++) 425ec21e2ecSJeff Kirsher kfree(priv->tx_queue[i]); 426ec21e2ecSJeff Kirsher } 427ec21e2ecSJeff Kirsher 42820862788SClaudiu Manoil static void gfar_free_rx_queues(struct gfar_private *priv) 429ec21e2ecSJeff Kirsher { 4303a2e16c8SJan Ceuleers int i; 431ec21e2ecSJeff Kirsher 432ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_rx_queues; i++) 433ec21e2ecSJeff Kirsher kfree(priv->rx_queue[i]); 434ec21e2ecSJeff Kirsher } 435ec21e2ecSJeff Kirsher 436ec21e2ecSJeff Kirsher static void unmap_group_regs(struct gfar_private *priv) 437ec21e2ecSJeff Kirsher { 4383a2e16c8SJan Ceuleers int i; 439ec21e2ecSJeff Kirsher 440ec21e2ecSJeff Kirsher for (i = 0; i < MAXGROUPS; i++) 441ec21e2ecSJeff Kirsher if (priv->gfargrp[i].regs) 442ec21e2ecSJeff Kirsher iounmap(priv->gfargrp[i].regs); 443ec21e2ecSJeff Kirsher } 444ec21e2ecSJeff Kirsher 445ee873fdaSClaudiu Manoil static void free_gfar_dev(struct gfar_private *priv) 446ee873fdaSClaudiu Manoil { 447ee873fdaSClaudiu Manoil int i, j; 448ee873fdaSClaudiu Manoil 449ee873fdaSClaudiu Manoil for (i = 0; i < priv->num_grps; i++) 450ee873fdaSClaudiu Manoil for (j = 0; j < GFAR_NUM_IRQS; j++) { 451ee873fdaSClaudiu Manoil kfree(priv->gfargrp[i].irqinfo[j]); 452ee873fdaSClaudiu Manoil priv->gfargrp[i].irqinfo[j] = NULL; 453ee873fdaSClaudiu Manoil } 454ee873fdaSClaudiu Manoil 455ee873fdaSClaudiu Manoil free_netdev(priv->ndev); 456ee873fdaSClaudiu Manoil } 457ee873fdaSClaudiu Manoil 458ec21e2ecSJeff Kirsher static void disable_napi(struct gfar_private *priv) 459ec21e2ecSJeff Kirsher { 4603a2e16c8SJan Ceuleers int i; 461ec21e2ecSJeff Kirsher 462aeb12c5eSClaudiu Manoil for (i = 0; i < priv->num_grps; i++) { 463aeb12c5eSClaudiu Manoil napi_disable(&priv->gfargrp[i].napi_rx); 464aeb12c5eSClaudiu Manoil napi_disable(&priv->gfargrp[i].napi_tx); 465aeb12c5eSClaudiu Manoil } 466ec21e2ecSJeff Kirsher } 467ec21e2ecSJeff Kirsher 468ec21e2ecSJeff Kirsher static void enable_napi(struct gfar_private *priv) 469ec21e2ecSJeff Kirsher { 4703a2e16c8SJan Ceuleers int i; 471ec21e2ecSJeff Kirsher 472aeb12c5eSClaudiu Manoil for (i = 0; i < priv->num_grps; i++) { 473aeb12c5eSClaudiu Manoil napi_enable(&priv->gfargrp[i].napi_rx); 474aeb12c5eSClaudiu Manoil napi_enable(&priv->gfargrp[i].napi_tx); 475aeb12c5eSClaudiu Manoil } 476ec21e2ecSJeff Kirsher } 477ec21e2ecSJeff Kirsher 478ec21e2ecSJeff Kirsher static int gfar_parse_group(struct device_node *np, 479ec21e2ecSJeff Kirsher struct gfar_private *priv, const char *model) 480ec21e2ecSJeff Kirsher { 4815fedcc14SClaudiu Manoil struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; 482ee873fdaSClaudiu Manoil int i; 483ee873fdaSClaudiu Manoil 484ee873fdaSClaudiu Manoil for (i = 0; i < GFAR_NUM_IRQS; i++) { 485ee873fdaSClaudiu Manoil grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo), 486ee873fdaSClaudiu Manoil GFP_KERNEL); 487ee873fdaSClaudiu Manoil if (!grp->irqinfo[i]) 488ee873fdaSClaudiu Manoil return -ENOMEM; 489ee873fdaSClaudiu Manoil } 490ec21e2ecSJeff Kirsher 4915fedcc14SClaudiu Manoil grp->regs = of_iomap(np, 0); 4925fedcc14SClaudiu Manoil if (!grp->regs) 493ec21e2ecSJeff Kirsher return -ENOMEM; 494ec21e2ecSJeff Kirsher 495ee873fdaSClaudiu Manoil gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0); 496ec21e2ecSJeff Kirsher 497ec21e2ecSJeff Kirsher /* If we aren't the FEC we have multiple interrupts */ 498ec21e2ecSJeff Kirsher if (model && strcasecmp(model, "FEC")) { 499ee873fdaSClaudiu Manoil gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); 500ee873fdaSClaudiu Manoil gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); 501fea0f665SMark Brown if (!gfar_irq(grp, TX)->irq || 502fea0f665SMark Brown !gfar_irq(grp, RX)->irq || 503fea0f665SMark Brown !gfar_irq(grp, ER)->irq) 504ec21e2ecSJeff Kirsher return -EINVAL; 505ec21e2ecSJeff Kirsher } 506ec21e2ecSJeff Kirsher 5075fedcc14SClaudiu Manoil grp->priv = priv; 5085fedcc14SClaudiu Manoil spin_lock_init(&grp->grplock); 509ec21e2ecSJeff Kirsher if (priv->mode == MQ_MG_MODE) { 51071ff9e3dSClaudiu Manoil /* One Q per interrupt group: Q0 to G0, Q1 to G1 */ 51171ff9e3dSClaudiu Manoil grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); 51271ff9e3dSClaudiu Manoil grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); 513ec21e2ecSJeff Kirsher } else { 5145fedcc14SClaudiu Manoil grp->rx_bit_map = 0xFF; 5155fedcc14SClaudiu Manoil grp->tx_bit_map = 0xFF; 516ec21e2ecSJeff Kirsher } 51720862788SClaudiu Manoil 51820862788SClaudiu Manoil /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses 51920862788SClaudiu Manoil * right to left, so we need to revert the 8 bits to get the q index 52020862788SClaudiu Manoil */ 52120862788SClaudiu Manoil grp->rx_bit_map = bitrev8(grp->rx_bit_map); 52220862788SClaudiu Manoil grp->tx_bit_map = bitrev8(grp->tx_bit_map); 52320862788SClaudiu Manoil 52420862788SClaudiu Manoil /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, 52520862788SClaudiu Manoil * also assign queues to groups 52620862788SClaudiu Manoil */ 52720862788SClaudiu Manoil for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { 52871ff9e3dSClaudiu Manoil if (!grp->rx_queue) 52971ff9e3dSClaudiu Manoil grp->rx_queue = priv->rx_queue[i]; 53020862788SClaudiu Manoil grp->num_rx_queues++; 53120862788SClaudiu Manoil grp->rstat |= (RSTAT_CLEAR_RHALT >> i); 53220862788SClaudiu Manoil priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i); 53320862788SClaudiu Manoil priv->rx_queue[i]->grp = grp; 53420862788SClaudiu Manoil } 53520862788SClaudiu Manoil 53620862788SClaudiu Manoil for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { 53771ff9e3dSClaudiu Manoil if (!grp->tx_queue) 53871ff9e3dSClaudiu Manoil grp->tx_queue = priv->tx_queue[i]; 53920862788SClaudiu Manoil grp->num_tx_queues++; 54020862788SClaudiu Manoil grp->tstat |= (TSTAT_CLEAR_THALT >> i); 54120862788SClaudiu Manoil priv->tqueue |= (TQUEUE_EN0 >> i); 54220862788SClaudiu Manoil priv->tx_queue[i]->grp = grp; 54320862788SClaudiu Manoil } 54420862788SClaudiu Manoil 545ec21e2ecSJeff Kirsher priv->num_grps++; 546ec21e2ecSJeff Kirsher 547ec21e2ecSJeff Kirsher return 0; 548ec21e2ecSJeff Kirsher } 549ec21e2ecSJeff Kirsher 550f50724cdSTobias Waldekranz static int gfar_of_group_count(struct device_node *np) 551f50724cdSTobias Waldekranz { 552f50724cdSTobias Waldekranz struct device_node *child; 553f50724cdSTobias Waldekranz int num = 0; 554f50724cdSTobias Waldekranz 555f50724cdSTobias Waldekranz for_each_available_child_of_node(np, child) 556bf5849f1SRob Herring if (of_node_name_eq(child, "queue-group")) 557f50724cdSTobias Waldekranz num++; 558f50724cdSTobias Waldekranz 559f50724cdSTobias Waldekranz return num; 560f50724cdSTobias Waldekranz } 561f50724cdSTobias Waldekranz 5627d993c5fSArseny Solokha /* Reads the controller's registers to determine what interface 5637d993c5fSArseny Solokha * connects it to the PHY. 5647d993c5fSArseny Solokha */ 5657d993c5fSArseny Solokha static phy_interface_t gfar_get_interface(struct net_device *dev) 5667d993c5fSArseny Solokha { 5677d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(dev); 5687d993c5fSArseny Solokha struct gfar __iomem *regs = priv->gfargrp[0].regs; 5697d993c5fSArseny Solokha u32 ecntrl; 5707d993c5fSArseny Solokha 5717d993c5fSArseny Solokha ecntrl = gfar_read(®s->ecntrl); 5727d993c5fSArseny Solokha 5737d993c5fSArseny Solokha if (ecntrl & ECNTRL_SGMII_MODE) 5747d993c5fSArseny Solokha return PHY_INTERFACE_MODE_SGMII; 5757d993c5fSArseny Solokha 5767d993c5fSArseny Solokha if (ecntrl & ECNTRL_TBI_MODE) { 5777d993c5fSArseny Solokha if (ecntrl & ECNTRL_REDUCED_MODE) 5787d993c5fSArseny Solokha return PHY_INTERFACE_MODE_RTBI; 5797d993c5fSArseny Solokha else 5807d993c5fSArseny Solokha return PHY_INTERFACE_MODE_TBI; 5817d993c5fSArseny Solokha } 5827d993c5fSArseny Solokha 5837d993c5fSArseny Solokha if (ecntrl & ECNTRL_REDUCED_MODE) { 5847d993c5fSArseny Solokha if (ecntrl & ECNTRL_REDUCED_MII_MODE) { 5857d993c5fSArseny Solokha return PHY_INTERFACE_MODE_RMII; 5867d993c5fSArseny Solokha } 5877d993c5fSArseny Solokha else { 5887d993c5fSArseny Solokha phy_interface_t interface = priv->interface; 5897d993c5fSArseny Solokha 5907d993c5fSArseny Solokha /* This isn't autodetected right now, so it must 5917d993c5fSArseny Solokha * be set by the device tree or platform code. 5927d993c5fSArseny Solokha */ 5937d993c5fSArseny Solokha if (interface == PHY_INTERFACE_MODE_RGMII_ID) 5947d993c5fSArseny Solokha return PHY_INTERFACE_MODE_RGMII_ID; 5957d993c5fSArseny Solokha 5967d993c5fSArseny Solokha return PHY_INTERFACE_MODE_RGMII; 5977d993c5fSArseny Solokha } 5987d993c5fSArseny Solokha } 5997d993c5fSArseny Solokha 6007d993c5fSArseny Solokha if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) 6017d993c5fSArseny Solokha return PHY_INTERFACE_MODE_GMII; 6027d993c5fSArseny Solokha 6037d993c5fSArseny Solokha return PHY_INTERFACE_MODE_MII; 6047d993c5fSArseny Solokha } 6057d993c5fSArseny Solokha 606ec21e2ecSJeff Kirsher static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) 607ec21e2ecSJeff Kirsher { 608ec21e2ecSJeff Kirsher const char *model; 609ec21e2ecSJeff Kirsher int err = 0, i; 6100c65b2b9SAndrew Lunn phy_interface_t interface; 611ec21e2ecSJeff Kirsher struct net_device *dev = NULL; 612ec21e2ecSJeff Kirsher struct gfar_private *priv = NULL; 613ec21e2ecSJeff Kirsher struct device_node *np = ofdev->dev.of_node; 614ec21e2ecSJeff Kirsher struct device_node *child = NULL; 61555917641SJingchang Lu u32 stash_len = 0; 61655917641SJingchang Lu u32 stash_idx = 0; 617ec21e2ecSJeff Kirsher unsigned int num_tx_qs, num_rx_qs; 6188eda54c5SClaudiu Manoil unsigned short mode; 619ec21e2ecSJeff Kirsher 6204b222ca6SKevin Hao if (!np) 621ec21e2ecSJeff Kirsher return -ENODEV; 622ec21e2ecSJeff Kirsher 6238eda54c5SClaudiu Manoil if (of_device_is_compatible(np, "fsl,etsec2")) 624b338ce27SClaudiu Manoil mode = MQ_MG_MODE; 6258eda54c5SClaudiu Manoil else 626b338ce27SClaudiu Manoil mode = SQ_SG_MODE; 627b338ce27SClaudiu Manoil 628b338ce27SClaudiu Manoil if (mode == SQ_SG_MODE) { 62971ff9e3dSClaudiu Manoil num_tx_qs = 1; 63071ff9e3dSClaudiu Manoil num_rx_qs = 1; 63171ff9e3dSClaudiu Manoil } else { /* MQ_MG_MODE */ 632c65d7533SClaudiu Manoil /* get the actual number of supported groups */ 633f50724cdSTobias Waldekranz unsigned int num_grps = gfar_of_group_count(np); 634c65d7533SClaudiu Manoil 635c65d7533SClaudiu Manoil if (num_grps == 0 || num_grps > MAXGROUPS) { 636c65d7533SClaudiu Manoil dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", 637c65d7533SClaudiu Manoil num_grps); 638c65d7533SClaudiu Manoil pr_err("Cannot do alloc_etherdev, aborting\n"); 639c65d7533SClaudiu Manoil return -EINVAL; 640c65d7533SClaudiu Manoil } 641c65d7533SClaudiu Manoil 642c65d7533SClaudiu Manoil num_tx_qs = num_grps; /* one txq per int group */ 643c65d7533SClaudiu Manoil num_rx_qs = num_grps; /* one rxq per int group */ 64471ff9e3dSClaudiu Manoil } 645ec21e2ecSJeff Kirsher 646ec21e2ecSJeff Kirsher if (num_tx_qs > MAX_TX_QS) { 647ec21e2ecSJeff Kirsher pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", 648ec21e2ecSJeff Kirsher num_tx_qs, MAX_TX_QS); 649ec21e2ecSJeff Kirsher pr_err("Cannot do alloc_etherdev, aborting\n"); 650ec21e2ecSJeff Kirsher return -EINVAL; 651ec21e2ecSJeff Kirsher } 652ec21e2ecSJeff Kirsher 653ec21e2ecSJeff Kirsher if (num_rx_qs > MAX_RX_QS) { 654ec21e2ecSJeff Kirsher pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", 655ec21e2ecSJeff Kirsher num_rx_qs, MAX_RX_QS); 656ec21e2ecSJeff Kirsher pr_err("Cannot do alloc_etherdev, aborting\n"); 657ec21e2ecSJeff Kirsher return -EINVAL; 658ec21e2ecSJeff Kirsher } 659ec21e2ecSJeff Kirsher 660ec21e2ecSJeff Kirsher *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); 661ec21e2ecSJeff Kirsher dev = *pdev; 662ec21e2ecSJeff Kirsher if (NULL == dev) 663ec21e2ecSJeff Kirsher return -ENOMEM; 664ec21e2ecSJeff Kirsher 665ec21e2ecSJeff Kirsher priv = netdev_priv(dev); 666ec21e2ecSJeff Kirsher priv->ndev = dev; 667ec21e2ecSJeff Kirsher 668b338ce27SClaudiu Manoil priv->mode = mode; 669b338ce27SClaudiu Manoil 670ec21e2ecSJeff Kirsher priv->num_tx_queues = num_tx_qs; 671ec21e2ecSJeff Kirsher netif_set_real_num_rx_queues(dev, num_rx_qs); 672ec21e2ecSJeff Kirsher priv->num_rx_queues = num_rx_qs; 67320862788SClaudiu Manoil 67420862788SClaudiu Manoil err = gfar_alloc_tx_queues(priv); 67520862788SClaudiu Manoil if (err) 67620862788SClaudiu Manoil goto tx_alloc_failed; 67720862788SClaudiu Manoil 67820862788SClaudiu Manoil err = gfar_alloc_rx_queues(priv); 67920862788SClaudiu Manoil if (err) 68020862788SClaudiu Manoil goto rx_alloc_failed; 681ec21e2ecSJeff Kirsher 68255917641SJingchang Lu err = of_property_read_string(np, "model", &model); 68355917641SJingchang Lu if (err) { 68455917641SJingchang Lu pr_err("Device model property missing, aborting\n"); 68555917641SJingchang Lu goto rx_alloc_failed; 68655917641SJingchang Lu } 68755917641SJingchang Lu 688ec21e2ecSJeff Kirsher /* Init Rx queue filer rule set linked list */ 689ec21e2ecSJeff Kirsher INIT_LIST_HEAD(&priv->rx_list.list); 690ec21e2ecSJeff Kirsher priv->rx_list.count = 0; 691ec21e2ecSJeff Kirsher mutex_init(&priv->rx_queue_access); 692ec21e2ecSJeff Kirsher 693ec21e2ecSJeff Kirsher for (i = 0; i < MAXGROUPS; i++) 694ec21e2ecSJeff Kirsher priv->gfargrp[i].regs = NULL; 695ec21e2ecSJeff Kirsher 696ec21e2ecSJeff Kirsher /* Parse and initialize group specific information */ 697b338ce27SClaudiu Manoil if (priv->mode == MQ_MG_MODE) { 698f50724cdSTobias Waldekranz for_each_available_child_of_node(np, child) { 699bf5849f1SRob Herring if (!of_node_name_eq(child, "queue-group")) 700f50724cdSTobias Waldekranz continue; 701f50724cdSTobias Waldekranz 702ec21e2ecSJeff Kirsher err = gfar_parse_group(child, priv, model); 703989e4da0SSumera Priyadarsini if (err) { 704989e4da0SSumera Priyadarsini of_node_put(child); 705ec21e2ecSJeff Kirsher goto err_grp_init; 706ec21e2ecSJeff Kirsher } 707989e4da0SSumera Priyadarsini } 708b338ce27SClaudiu Manoil } else { /* SQ_SG_MODE */ 709ec21e2ecSJeff Kirsher err = gfar_parse_group(np, priv, model); 710ec21e2ecSJeff Kirsher if (err) 711ec21e2ecSJeff Kirsher goto err_grp_init; 712ec21e2ecSJeff Kirsher } 713ec21e2ecSJeff Kirsher 7143f8c0f7eSSaurabh Sengar if (of_property_read_bool(np, "bd-stash")) { 715ec21e2ecSJeff Kirsher priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; 716ec21e2ecSJeff Kirsher priv->bd_stash_en = 1; 717ec21e2ecSJeff Kirsher } 718ec21e2ecSJeff Kirsher 71955917641SJingchang Lu err = of_property_read_u32(np, "rx-stash-len", &stash_len); 720ec21e2ecSJeff Kirsher 72155917641SJingchang Lu if (err == 0) 72255917641SJingchang Lu priv->rx_stash_size = stash_len; 723ec21e2ecSJeff Kirsher 72455917641SJingchang Lu err = of_property_read_u32(np, "rx-stash-idx", &stash_idx); 725ec21e2ecSJeff Kirsher 72655917641SJingchang Lu if (err == 0) 72755917641SJingchang Lu priv->rx_stash_index = stash_idx; 728ec21e2ecSJeff Kirsher 729ec21e2ecSJeff Kirsher if (stash_len || stash_idx) 730ec21e2ecSJeff Kirsher priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; 731ec21e2ecSJeff Kirsher 73283216e39SMichael Walle err = of_get_mac_address(np, dev->dev_addr); 73383216e39SMichael Walle if (err) { 734ff021f22SMaxim Kochetkov eth_hw_addr_random(dev); 735ff021f22SMaxim Kochetkov dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr); 736ff021f22SMaxim Kochetkov } 737ec21e2ecSJeff Kirsher 738ec21e2ecSJeff Kirsher if (model && !strcasecmp(model, "TSEC")) 73934018fd4SClaudiu Manoil priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | 740ec21e2ecSJeff Kirsher FSL_GIANFAR_DEV_HAS_COALESCE | 741ec21e2ecSJeff Kirsher FSL_GIANFAR_DEV_HAS_RMON | 742ec21e2ecSJeff Kirsher FSL_GIANFAR_DEV_HAS_MULTI_INTR; 743bc4598bcSJan Ceuleers 744ec21e2ecSJeff Kirsher if (model && !strcasecmp(model, "eTSEC")) 74534018fd4SClaudiu Manoil priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | 746ec21e2ecSJeff Kirsher FSL_GIANFAR_DEV_HAS_COALESCE | 747ec21e2ecSJeff Kirsher FSL_GIANFAR_DEV_HAS_RMON | 748ec21e2ecSJeff Kirsher FSL_GIANFAR_DEV_HAS_MULTI_INTR | 749ec21e2ecSJeff Kirsher FSL_GIANFAR_DEV_HAS_CSUM | 750ec21e2ecSJeff Kirsher FSL_GIANFAR_DEV_HAS_VLAN | 751ec21e2ecSJeff Kirsher FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 752ec21e2ecSJeff Kirsher FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | 7537bff47daSHamish Martin FSL_GIANFAR_DEV_HAS_TIMER | 7547bff47daSHamish Martin FSL_GIANFAR_DEV_HAS_RX_FILER; 755ec21e2ecSJeff Kirsher 7568e578e73SArseny Solokha /* Use PHY connection type from the DT node if one is specified there. 7578e578e73SArseny Solokha * rgmii-id really needs to be specified. Other types can be 7588e578e73SArseny Solokha * detected by hardware 7598e578e73SArseny Solokha */ 7600c65b2b9SAndrew Lunn err = of_get_phy_mode(np, &interface); 7610c65b2b9SAndrew Lunn if (!err) 7620c65b2b9SAndrew Lunn priv->interface = interface; 763ec21e2ecSJeff Kirsher else 7648e578e73SArseny Solokha priv->interface = gfar_get_interface(dev); 765ec21e2ecSJeff Kirsher 76655917641SJingchang Lu if (of_find_property(np, "fsl,magic-packet", NULL)) 767ec21e2ecSJeff Kirsher priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; 768ec21e2ecSJeff Kirsher 7693e905b80SClaudiu Manoil if (of_get_property(np, "fsl,wake-on-filer", NULL)) 7703e905b80SClaudiu Manoil priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER; 7713e905b80SClaudiu Manoil 772ec21e2ecSJeff Kirsher priv->phy_node = of_parse_phandle(np, "phy-handle", 0); 773ec21e2ecSJeff Kirsher 774be403645SFlorian Fainelli /* In the case of a fixed PHY, the DT node associated 775be403645SFlorian Fainelli * to the PHY is the Ethernet MAC DT node. 776be403645SFlorian Fainelli */ 7776f2c9bd8SUwe Kleine-König if (!priv->phy_node && of_phy_is_fixed_link(np)) { 778be403645SFlorian Fainelli err = of_phy_register_fixed_link(np); 779be403645SFlorian Fainelli if (err) 780be403645SFlorian Fainelli goto err_grp_init; 781be403645SFlorian Fainelli 7826f2c9bd8SUwe Kleine-König priv->phy_node = of_node_get(np); 783be403645SFlorian Fainelli } 784be403645SFlorian Fainelli 785ec21e2ecSJeff Kirsher /* Find the TBI PHY. If it's not there, we don't support SGMII */ 786ec21e2ecSJeff Kirsher priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 787ec21e2ecSJeff Kirsher 788ec21e2ecSJeff Kirsher return 0; 789ec21e2ecSJeff Kirsher 790ec21e2ecSJeff Kirsher err_grp_init: 791ec21e2ecSJeff Kirsher unmap_group_regs(priv); 79220862788SClaudiu Manoil rx_alloc_failed: 79320862788SClaudiu Manoil gfar_free_rx_queues(priv); 79420862788SClaudiu Manoil tx_alloc_failed: 79520862788SClaudiu Manoil gfar_free_tx_queues(priv); 796ee873fdaSClaudiu Manoil free_gfar_dev(priv); 797ec21e2ecSJeff Kirsher return err; 798ec21e2ecSJeff Kirsher } 799ec21e2ecSJeff Kirsher 800ec21e2ecSJeff Kirsher static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, 801ec21e2ecSJeff Kirsher u32 class) 802ec21e2ecSJeff Kirsher { 803ec21e2ecSJeff Kirsher u32 rqfpr = FPR_FILER_MASK; 804ec21e2ecSJeff Kirsher u32 rqfcr = 0x0; 805ec21e2ecSJeff Kirsher 806ec21e2ecSJeff Kirsher rqfar--; 807ec21e2ecSJeff Kirsher rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; 808ec21e2ecSJeff Kirsher priv->ftp_rqfpr[rqfar] = rqfpr; 809ec21e2ecSJeff Kirsher priv->ftp_rqfcr[rqfar] = rqfcr; 810ec21e2ecSJeff Kirsher gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 811ec21e2ecSJeff Kirsher 812ec21e2ecSJeff Kirsher rqfar--; 813ec21e2ecSJeff Kirsher rqfcr = RQFCR_CMP_NOMATCH; 814ec21e2ecSJeff Kirsher priv->ftp_rqfpr[rqfar] = rqfpr; 815ec21e2ecSJeff Kirsher priv->ftp_rqfcr[rqfar] = rqfcr; 816ec21e2ecSJeff Kirsher gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 817ec21e2ecSJeff Kirsher 818ec21e2ecSJeff Kirsher rqfar--; 819ec21e2ecSJeff Kirsher rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; 820ec21e2ecSJeff Kirsher rqfpr = class; 821ec21e2ecSJeff Kirsher priv->ftp_rqfcr[rqfar] = rqfcr; 822ec21e2ecSJeff Kirsher priv->ftp_rqfpr[rqfar] = rqfpr; 823ec21e2ecSJeff Kirsher gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 824ec21e2ecSJeff Kirsher 825ec21e2ecSJeff Kirsher rqfar--; 826ec21e2ecSJeff Kirsher rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; 827ec21e2ecSJeff Kirsher rqfpr = class; 828ec21e2ecSJeff Kirsher priv->ftp_rqfcr[rqfar] = rqfcr; 829ec21e2ecSJeff Kirsher priv->ftp_rqfpr[rqfar] = rqfpr; 830ec21e2ecSJeff Kirsher gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 831ec21e2ecSJeff Kirsher 832ec21e2ecSJeff Kirsher return rqfar; 833ec21e2ecSJeff Kirsher } 834ec21e2ecSJeff Kirsher 835ec21e2ecSJeff Kirsher static void gfar_init_filer_table(struct gfar_private *priv) 836ec21e2ecSJeff Kirsher { 837ec21e2ecSJeff Kirsher int i = 0x0; 838ec21e2ecSJeff Kirsher u32 rqfar = MAX_FILER_IDX; 839ec21e2ecSJeff Kirsher u32 rqfcr = 0x0; 840ec21e2ecSJeff Kirsher u32 rqfpr = FPR_FILER_MASK; 841ec21e2ecSJeff Kirsher 842ec21e2ecSJeff Kirsher /* Default rule */ 843ec21e2ecSJeff Kirsher rqfcr = RQFCR_CMP_MATCH; 844ec21e2ecSJeff Kirsher priv->ftp_rqfcr[rqfar] = rqfcr; 845ec21e2ecSJeff Kirsher priv->ftp_rqfpr[rqfar] = rqfpr; 846ec21e2ecSJeff Kirsher gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 847ec21e2ecSJeff Kirsher 848ec21e2ecSJeff Kirsher rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); 849ec21e2ecSJeff Kirsher rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); 850ec21e2ecSJeff Kirsher rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); 851ec21e2ecSJeff Kirsher rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); 852ec21e2ecSJeff Kirsher rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); 853ec21e2ecSJeff Kirsher rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); 854ec21e2ecSJeff Kirsher 855ec21e2ecSJeff Kirsher /* cur_filer_idx indicated the first non-masked rule */ 856ec21e2ecSJeff Kirsher priv->cur_filer_idx = rqfar; 857ec21e2ecSJeff Kirsher 858ec21e2ecSJeff Kirsher /* Rest are masked rules */ 859ec21e2ecSJeff Kirsher rqfcr = RQFCR_CMP_NOMATCH; 860ec21e2ecSJeff Kirsher for (i = 0; i < rqfar; i++) { 861ec21e2ecSJeff Kirsher priv->ftp_rqfcr[i] = rqfcr; 862ec21e2ecSJeff Kirsher priv->ftp_rqfpr[i] = rqfpr; 863ec21e2ecSJeff Kirsher gfar_write_filer(priv, i, rqfcr, rqfpr); 864ec21e2ecSJeff Kirsher } 865ec21e2ecSJeff Kirsher } 866ec21e2ecSJeff Kirsher 867d6ef0bccSClaudiu Manoil #ifdef CONFIG_PPC 8682969b1f7SClaudiu Manoil static void __gfar_detect_errata_83xx(struct gfar_private *priv) 869ec21e2ecSJeff Kirsher { 870ec21e2ecSJeff Kirsher unsigned int pvr = mfspr(SPRN_PVR); 871ec21e2ecSJeff Kirsher unsigned int svr = mfspr(SPRN_SVR); 872ec21e2ecSJeff Kirsher unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ 873ec21e2ecSJeff Kirsher unsigned int rev = svr & 0xffff; 874ec21e2ecSJeff Kirsher 875ec21e2ecSJeff Kirsher /* MPC8313 Rev 2.0 and higher; All MPC837x */ 876ec21e2ecSJeff Kirsher if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || 877ec21e2ecSJeff Kirsher (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 878ec21e2ecSJeff Kirsher priv->errata |= GFAR_ERRATA_74; 879ec21e2ecSJeff Kirsher 880ec21e2ecSJeff Kirsher /* MPC8313 and MPC837x all rev */ 881ec21e2ecSJeff Kirsher if ((pvr == 0x80850010 && mod == 0x80b0) || 882ec21e2ecSJeff Kirsher (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 883ec21e2ecSJeff Kirsher priv->errata |= GFAR_ERRATA_76; 884ec21e2ecSJeff Kirsher 8852969b1f7SClaudiu Manoil /* MPC8313 Rev < 2.0 */ 8862969b1f7SClaudiu Manoil if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) 887ec21e2ecSJeff Kirsher priv->errata |= GFAR_ERRATA_12; 8882969b1f7SClaudiu Manoil } 8892969b1f7SClaudiu Manoil 8902969b1f7SClaudiu Manoil static void __gfar_detect_errata_85xx(struct gfar_private *priv) 8912969b1f7SClaudiu Manoil { 8922969b1f7SClaudiu Manoil unsigned int svr = mfspr(SPRN_SVR); 8932969b1f7SClaudiu Manoil 8942969b1f7SClaudiu Manoil if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) 8952969b1f7SClaudiu Manoil priv->errata |= GFAR_ERRATA_12; 8967bfc6082SAtsushi Nemoto /* P2020/P1010 Rev 1; MPC8548 Rev 2 */ 89753fad773SClaudiu Manoil if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || 8987bfc6082SAtsushi Nemoto ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) || 8997bfc6082SAtsushi Nemoto ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31))) 90053fad773SClaudiu Manoil priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ 9012969b1f7SClaudiu Manoil } 902d6ef0bccSClaudiu Manoil #endif 9032969b1f7SClaudiu Manoil 9042969b1f7SClaudiu Manoil static void gfar_detect_errata(struct gfar_private *priv) 9052969b1f7SClaudiu Manoil { 9062969b1f7SClaudiu Manoil struct device *dev = &priv->ofdev->dev; 9072969b1f7SClaudiu Manoil 9082969b1f7SClaudiu Manoil /* no plans to fix */ 9092969b1f7SClaudiu Manoil priv->errata |= GFAR_ERRATA_A002; 9102969b1f7SClaudiu Manoil 911d6ef0bccSClaudiu Manoil #ifdef CONFIG_PPC 9122969b1f7SClaudiu Manoil if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2)) 9132969b1f7SClaudiu Manoil __gfar_detect_errata_85xx(priv); 9142969b1f7SClaudiu Manoil else /* non-mpc85xx parts, i.e. e300 core based */ 9152969b1f7SClaudiu Manoil __gfar_detect_errata_83xx(priv); 916d6ef0bccSClaudiu Manoil #endif 917ec21e2ecSJeff Kirsher 918ec21e2ecSJeff Kirsher if (priv->errata) 919ec21e2ecSJeff Kirsher dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", 920ec21e2ecSJeff Kirsher priv->errata); 921ec21e2ecSJeff Kirsher } 922ec21e2ecSJeff Kirsher 923898157edSXiubo Li static void gfar_init_addr_hash_table(struct gfar_private *priv) 92420862788SClaudiu Manoil { 92520862788SClaudiu Manoil struct gfar __iomem *regs = priv->gfargrp[0].regs; 926ec21e2ecSJeff Kirsher 927ec21e2ecSJeff Kirsher if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 928ec21e2ecSJeff Kirsher priv->extended_hash = 1; 929ec21e2ecSJeff Kirsher priv->hash_width = 9; 930ec21e2ecSJeff Kirsher 931ec21e2ecSJeff Kirsher priv->hash_regs[0] = ®s->igaddr0; 932ec21e2ecSJeff Kirsher priv->hash_regs[1] = ®s->igaddr1; 933ec21e2ecSJeff Kirsher priv->hash_regs[2] = ®s->igaddr2; 934ec21e2ecSJeff Kirsher priv->hash_regs[3] = ®s->igaddr3; 935ec21e2ecSJeff Kirsher priv->hash_regs[4] = ®s->igaddr4; 936ec21e2ecSJeff Kirsher priv->hash_regs[5] = ®s->igaddr5; 937ec21e2ecSJeff Kirsher priv->hash_regs[6] = ®s->igaddr6; 938ec21e2ecSJeff Kirsher priv->hash_regs[7] = ®s->igaddr7; 939ec21e2ecSJeff Kirsher priv->hash_regs[8] = ®s->gaddr0; 940ec21e2ecSJeff Kirsher priv->hash_regs[9] = ®s->gaddr1; 941ec21e2ecSJeff Kirsher priv->hash_regs[10] = ®s->gaddr2; 942ec21e2ecSJeff Kirsher priv->hash_regs[11] = ®s->gaddr3; 943ec21e2ecSJeff Kirsher priv->hash_regs[12] = ®s->gaddr4; 944ec21e2ecSJeff Kirsher priv->hash_regs[13] = ®s->gaddr5; 945ec21e2ecSJeff Kirsher priv->hash_regs[14] = ®s->gaddr6; 946ec21e2ecSJeff Kirsher priv->hash_regs[15] = ®s->gaddr7; 947ec21e2ecSJeff Kirsher 948ec21e2ecSJeff Kirsher } else { 949ec21e2ecSJeff Kirsher priv->extended_hash = 0; 950ec21e2ecSJeff Kirsher priv->hash_width = 8; 951ec21e2ecSJeff Kirsher 952ec21e2ecSJeff Kirsher priv->hash_regs[0] = ®s->gaddr0; 953ec21e2ecSJeff Kirsher priv->hash_regs[1] = ®s->gaddr1; 954ec21e2ecSJeff Kirsher priv->hash_regs[2] = ®s->gaddr2; 955ec21e2ecSJeff Kirsher priv->hash_regs[3] = ®s->gaddr3; 956ec21e2ecSJeff Kirsher priv->hash_regs[4] = ®s->gaddr4; 957ec21e2ecSJeff Kirsher priv->hash_regs[5] = ®s->gaddr5; 958ec21e2ecSJeff Kirsher priv->hash_regs[6] = ®s->gaddr6; 959ec21e2ecSJeff Kirsher priv->hash_regs[7] = ®s->gaddr7; 960ec21e2ecSJeff Kirsher } 96120862788SClaudiu Manoil } 96220862788SClaudiu Manoil 963ec21e2ecSJeff Kirsher static int __gfar_is_rx_idle(struct gfar_private *priv) 964ec21e2ecSJeff Kirsher { 965ec21e2ecSJeff Kirsher u32 res; 966ec21e2ecSJeff Kirsher 9670977f817SJan Ceuleers /* Normaly TSEC should not hang on GRS commands, so we should 968ec21e2ecSJeff Kirsher * actually wait for IEVENT_GRSC flag. 969ec21e2ecSJeff Kirsher */ 970ad3660c2SClaudiu Manoil if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) 971ec21e2ecSJeff Kirsher return 0; 972ec21e2ecSJeff Kirsher 9730977f817SJan Ceuleers /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are 974ec21e2ecSJeff Kirsher * the same as bits 23-30, the eTSEC Rx is assumed to be idle 975ec21e2ecSJeff Kirsher * and the Rx can be safely reset. 976ec21e2ecSJeff Kirsher */ 977ec21e2ecSJeff Kirsher res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); 978ec21e2ecSJeff Kirsher res &= 0x7f807f80; 979ec21e2ecSJeff Kirsher if ((res & 0xffff) == (res >> 16)) 980ec21e2ecSJeff Kirsher return 1; 981ec21e2ecSJeff Kirsher 982ec21e2ecSJeff Kirsher return 0; 983ec21e2ecSJeff Kirsher } 984ec21e2ecSJeff Kirsher 985ec21e2ecSJeff Kirsher /* Halt the receive and transmit queues */ 986c10650b6SClaudiu Manoil static void gfar_halt_nodisable(struct gfar_private *priv) 987ec21e2ecSJeff Kirsher { 988efeddce7SClaudiu Manoil struct gfar __iomem *regs = priv->gfargrp[0].regs; 989ec21e2ecSJeff Kirsher u32 tempval; 990a4feee89SClaudiu Manoil unsigned int timeout; 991a4feee89SClaudiu Manoil int stopped; 992ec21e2ecSJeff Kirsher 993efeddce7SClaudiu Manoil gfar_ints_disable(priv); 994ec21e2ecSJeff Kirsher 995a4feee89SClaudiu Manoil if (gfar_is_dma_stopped(priv)) 996a4feee89SClaudiu Manoil return; 997a4feee89SClaudiu Manoil 998ec21e2ecSJeff Kirsher /* Stop the DMA, and wait for it to stop */ 999ec21e2ecSJeff Kirsher tempval = gfar_read(®s->dmactrl); 1000ec21e2ecSJeff Kirsher tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1001ec21e2ecSJeff Kirsher gfar_write(®s->dmactrl, tempval); 1002ec21e2ecSJeff Kirsher 1003a4feee89SClaudiu Manoil retry: 1004a4feee89SClaudiu Manoil timeout = 1000; 1005a4feee89SClaudiu Manoil while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) { 1006a4feee89SClaudiu Manoil cpu_relax(); 1007a4feee89SClaudiu Manoil timeout--; 1008ec21e2ecSJeff Kirsher } 1009a4feee89SClaudiu Manoil 1010a4feee89SClaudiu Manoil if (!timeout) 1011a4feee89SClaudiu Manoil stopped = gfar_is_dma_stopped(priv); 1012a4feee89SClaudiu Manoil 1013a4feee89SClaudiu Manoil if (!stopped && !gfar_is_rx_dma_stopped(priv) && 1014a4feee89SClaudiu Manoil !__gfar_is_rx_idle(priv)) 1015a4feee89SClaudiu Manoil goto retry; 1016ec21e2ecSJeff Kirsher } 1017ec21e2ecSJeff Kirsher 1018ec21e2ecSJeff Kirsher /* Halt the receive and transmit queues */ 10197ad38784SArseny Solokha static void gfar_halt(struct gfar_private *priv) 1020ec21e2ecSJeff Kirsher { 1021ec21e2ecSJeff Kirsher struct gfar __iomem *regs = priv->gfargrp[0].regs; 1022ec21e2ecSJeff Kirsher u32 tempval; 1023ec21e2ecSJeff Kirsher 1024c10650b6SClaudiu Manoil /* Dissable the Rx/Tx hw queues */ 1025c10650b6SClaudiu Manoil gfar_write(®s->rqueue, 0); 1026c10650b6SClaudiu Manoil gfar_write(®s->tqueue, 0); 1027ec21e2ecSJeff Kirsher 1028c10650b6SClaudiu Manoil mdelay(10); 1029c10650b6SClaudiu Manoil 1030c10650b6SClaudiu Manoil gfar_halt_nodisable(priv); 1031c10650b6SClaudiu Manoil 1032c10650b6SClaudiu Manoil /* Disable Rx/Tx DMA */ 1033ec21e2ecSJeff Kirsher tempval = gfar_read(®s->maccfg1); 1034ec21e2ecSJeff Kirsher tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 1035ec21e2ecSJeff Kirsher gfar_write(®s->maccfg1, tempval); 1036ec21e2ecSJeff Kirsher } 1037ec21e2ecSJeff Kirsher 1038ec21e2ecSJeff Kirsher static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) 1039ec21e2ecSJeff Kirsher { 1040ec21e2ecSJeff Kirsher struct txbd8 *txbdp; 1041ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(tx_queue->dev); 1042ec21e2ecSJeff Kirsher int i, j; 1043ec21e2ecSJeff Kirsher 1044ec21e2ecSJeff Kirsher txbdp = tx_queue->tx_bd_base; 1045ec21e2ecSJeff Kirsher 1046ec21e2ecSJeff Kirsher for (i = 0; i < tx_queue->tx_ring_size; i++) { 1047ec21e2ecSJeff Kirsher if (!tx_queue->tx_skbuff[i]) 1048ec21e2ecSJeff Kirsher continue; 1049ec21e2ecSJeff Kirsher 1050a7312d58SClaudiu Manoil dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr), 1051a7312d58SClaudiu Manoil be16_to_cpu(txbdp->length), DMA_TO_DEVICE); 1052ec21e2ecSJeff Kirsher txbdp->lstatus = 0; 1053ec21e2ecSJeff Kirsher for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; 1054ec21e2ecSJeff Kirsher j++) { 1055ec21e2ecSJeff Kirsher txbdp++; 1056a7312d58SClaudiu Manoil dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr), 1057a7312d58SClaudiu Manoil be16_to_cpu(txbdp->length), 1058a7312d58SClaudiu Manoil DMA_TO_DEVICE); 1059ec21e2ecSJeff Kirsher } 1060ec21e2ecSJeff Kirsher txbdp++; 1061ec21e2ecSJeff Kirsher dev_kfree_skb_any(tx_queue->tx_skbuff[i]); 1062ec21e2ecSJeff Kirsher tx_queue->tx_skbuff[i] = NULL; 1063ec21e2ecSJeff Kirsher } 1064ec21e2ecSJeff Kirsher kfree(tx_queue->tx_skbuff); 10651eb8f7a7SClaudiu Manoil tx_queue->tx_skbuff = NULL; 1066ec21e2ecSJeff Kirsher } 1067ec21e2ecSJeff Kirsher 1068ec21e2ecSJeff Kirsher static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) 1069ec21e2ecSJeff Kirsher { 1070ec21e2ecSJeff Kirsher int i; 1071ec21e2ecSJeff Kirsher 107275354148SClaudiu Manoil struct rxbd8 *rxbdp = rx_queue->rx_bd_base; 107375354148SClaudiu Manoil 107475354148SClaudiu Manoil dev_kfree_skb(rx_queue->skb); 1075ec21e2ecSJeff Kirsher 1076ec21e2ecSJeff Kirsher for (i = 0; i < rx_queue->rx_ring_size; i++) { 107775354148SClaudiu Manoil struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; 107875354148SClaudiu Manoil 1079ec21e2ecSJeff Kirsher rxbdp->lstatus = 0; 1080ec21e2ecSJeff Kirsher rxbdp->bufPtr = 0; 1081ec21e2ecSJeff Kirsher rxbdp++; 108275354148SClaudiu Manoil 108375354148SClaudiu Manoil if (!rxb->page) 108475354148SClaudiu Manoil continue; 108575354148SClaudiu Manoil 10864af0e5bbSArseny Solokha dma_unmap_page(rx_queue->dev, rxb->dma, 108775354148SClaudiu Manoil PAGE_SIZE, DMA_FROM_DEVICE); 108875354148SClaudiu Manoil __free_page(rxb->page); 108975354148SClaudiu Manoil 109075354148SClaudiu Manoil rxb->page = NULL; 1091ec21e2ecSJeff Kirsher } 109275354148SClaudiu Manoil 109375354148SClaudiu Manoil kfree(rx_queue->rx_buff); 109475354148SClaudiu Manoil rx_queue->rx_buff = NULL; 1095ec21e2ecSJeff Kirsher } 1096ec21e2ecSJeff Kirsher 1097ec21e2ecSJeff Kirsher /* If there are any tx skbs or rx skbs still around, free them. 10980977f817SJan Ceuleers * Then free tx_skbuff and rx_skbuff 10990977f817SJan Ceuleers */ 1100ec21e2ecSJeff Kirsher static void free_skb_resources(struct gfar_private *priv) 1101ec21e2ecSJeff Kirsher { 1102ec21e2ecSJeff Kirsher struct gfar_priv_tx_q *tx_queue = NULL; 1103ec21e2ecSJeff Kirsher struct gfar_priv_rx_q *rx_queue = NULL; 1104ec21e2ecSJeff Kirsher int i; 1105ec21e2ecSJeff Kirsher 1106ec21e2ecSJeff Kirsher /* Go through all the buffer descriptors and free their data buffers */ 1107ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_tx_queues; i++) { 1108d8a0f1b0SPaul Gortmaker struct netdev_queue *txq; 1109bc4598bcSJan Ceuleers 1110ec21e2ecSJeff Kirsher tx_queue = priv->tx_queue[i]; 1111d8a0f1b0SPaul Gortmaker txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); 1112ec21e2ecSJeff Kirsher if (tx_queue->tx_skbuff) 1113ec21e2ecSJeff Kirsher free_skb_tx_queue(tx_queue); 1114d8a0f1b0SPaul Gortmaker netdev_tx_reset_queue(txq); 1115ec21e2ecSJeff Kirsher } 1116ec21e2ecSJeff Kirsher 1117ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_rx_queues; i++) { 1118ec21e2ecSJeff Kirsher rx_queue = priv->rx_queue[i]; 111975354148SClaudiu Manoil if (rx_queue->rx_buff) 1120ec21e2ecSJeff Kirsher free_skb_rx_queue(rx_queue); 1121ec21e2ecSJeff Kirsher } 1122ec21e2ecSJeff Kirsher 1123369ec162SClaudiu Manoil dma_free_coherent(priv->dev, 1124ec21e2ecSJeff Kirsher sizeof(struct txbd8) * priv->total_tx_ring_size + 1125ec21e2ecSJeff Kirsher sizeof(struct rxbd8) * priv->total_rx_ring_size, 1126ec21e2ecSJeff Kirsher priv->tx_queue[0]->tx_bd_base, 1127ec21e2ecSJeff Kirsher priv->tx_queue[0]->tx_bd_dma_base); 1128ec21e2ecSJeff Kirsher } 1129ec21e2ecSJeff Kirsher 11307d993c5fSArseny Solokha void stop_gfar(struct net_device *dev) 11317d993c5fSArseny Solokha { 11327d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(dev); 11337d993c5fSArseny Solokha 11347d993c5fSArseny Solokha netif_tx_stop_all_queues(dev); 11357d993c5fSArseny Solokha 11367d993c5fSArseny Solokha smp_mb__before_atomic(); 11377d993c5fSArseny Solokha set_bit(GFAR_DOWN, &priv->state); 11387d993c5fSArseny Solokha smp_mb__after_atomic(); 11397d993c5fSArseny Solokha 11407d993c5fSArseny Solokha disable_napi(priv); 11417d993c5fSArseny Solokha 11427d993c5fSArseny Solokha /* disable ints and gracefully shut down Rx/Tx DMA */ 11437d993c5fSArseny Solokha gfar_halt(priv); 11447d993c5fSArseny Solokha 11457d993c5fSArseny Solokha phy_stop(dev->phydev); 11467d993c5fSArseny Solokha 11477d993c5fSArseny Solokha free_skb_resources(priv); 11487d993c5fSArseny Solokha } 11497d993c5fSArseny Solokha 11507ad38784SArseny Solokha static void gfar_start(struct gfar_private *priv) 1151ec21e2ecSJeff Kirsher { 1152ec21e2ecSJeff Kirsher struct gfar __iomem *regs = priv->gfargrp[0].regs; 1153ec21e2ecSJeff Kirsher u32 tempval; 1154ec21e2ecSJeff Kirsher int i = 0; 1155ec21e2ecSJeff Kirsher 1156c10650b6SClaudiu Manoil /* Enable Rx/Tx hw queues */ 1157c10650b6SClaudiu Manoil gfar_write(®s->rqueue, priv->rqueue); 1158c10650b6SClaudiu Manoil gfar_write(®s->tqueue, priv->tqueue); 1159ec21e2ecSJeff Kirsher 1160ec21e2ecSJeff Kirsher /* Initialize DMACTRL to have WWR and WOP */ 1161ec21e2ecSJeff Kirsher tempval = gfar_read(®s->dmactrl); 1162ec21e2ecSJeff Kirsher tempval |= DMACTRL_INIT_SETTINGS; 1163ec21e2ecSJeff Kirsher gfar_write(®s->dmactrl, tempval); 1164ec21e2ecSJeff Kirsher 1165ec21e2ecSJeff Kirsher /* Make sure we aren't stopped */ 1166ec21e2ecSJeff Kirsher tempval = gfar_read(®s->dmactrl); 1167ec21e2ecSJeff Kirsher tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 1168ec21e2ecSJeff Kirsher gfar_write(®s->dmactrl, tempval); 1169ec21e2ecSJeff Kirsher 1170ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_grps; i++) { 1171ec21e2ecSJeff Kirsher regs = priv->gfargrp[i].regs; 1172ec21e2ecSJeff Kirsher /* Clear THLT/RHLT, so that the DMA starts polling now */ 1173ec21e2ecSJeff Kirsher gfar_write(®s->tstat, priv->gfargrp[i].tstat); 1174ec21e2ecSJeff Kirsher gfar_write(®s->rstat, priv->gfargrp[i].rstat); 1175ec21e2ecSJeff Kirsher } 1176ec21e2ecSJeff Kirsher 1177c10650b6SClaudiu Manoil /* Enable Rx/Tx DMA */ 1178c10650b6SClaudiu Manoil tempval = gfar_read(®s->maccfg1); 1179c10650b6SClaudiu Manoil tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 1180c10650b6SClaudiu Manoil gfar_write(®s->maccfg1, tempval); 1181c10650b6SClaudiu Manoil 1182efeddce7SClaudiu Manoil gfar_ints_enable(priv); 1183efeddce7SClaudiu Manoil 1184860e9538SFlorian Westphal netif_trans_update(priv->ndev); /* prevent tx timeout */ 1185ec21e2ecSJeff Kirsher } 1186ec21e2ecSJeff Kirsher 11877d993c5fSArseny Solokha static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb) 118880ec396cSClaudiu Manoil { 11897d993c5fSArseny Solokha struct page *page; 11907d993c5fSArseny Solokha dma_addr_t addr; 11917d993c5fSArseny Solokha 11927d993c5fSArseny Solokha page = dev_alloc_page(); 11937d993c5fSArseny Solokha if (unlikely(!page)) 11947d993c5fSArseny Solokha return false; 11957d993c5fSArseny Solokha 11967d993c5fSArseny Solokha addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); 11977d993c5fSArseny Solokha if (unlikely(dma_mapping_error(rxq->dev, addr))) { 11987d993c5fSArseny Solokha __free_page(page); 11997d993c5fSArseny Solokha 12007d993c5fSArseny Solokha return false; 120180ec396cSClaudiu Manoil } 120280ec396cSClaudiu Manoil 12037d993c5fSArseny Solokha rxb->dma = addr; 12047d993c5fSArseny Solokha rxb->page = page; 12057d993c5fSArseny Solokha rxb->page_offset = 0; 12067d993c5fSArseny Solokha 12077d993c5fSArseny Solokha return true; 12087d993c5fSArseny Solokha } 12097d993c5fSArseny Solokha 12107d993c5fSArseny Solokha static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue) 1211ec21e2ecSJeff Kirsher { 12127d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(rx_queue->ndev); 12137d993c5fSArseny Solokha struct gfar_extra_stats *estats = &priv->extra_stats; 1214ec21e2ecSJeff Kirsher 12157d993c5fSArseny Solokha netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); 12167d993c5fSArseny Solokha atomic64_inc(&estats->rx_alloc_err); 1217ec21e2ecSJeff Kirsher } 1218ec21e2ecSJeff Kirsher 12197d993c5fSArseny Solokha static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, 12207d993c5fSArseny Solokha int alloc_cnt) 122180ec396cSClaudiu Manoil { 12227d993c5fSArseny Solokha struct rxbd8 *bdp; 12237d993c5fSArseny Solokha struct gfar_rx_buff *rxb; 122480ec396cSClaudiu Manoil int i; 122580ec396cSClaudiu Manoil 12267d993c5fSArseny Solokha i = rx_queue->next_to_use; 12277d993c5fSArseny Solokha bdp = &rx_queue->rx_bd_base[i]; 12287d993c5fSArseny Solokha rxb = &rx_queue->rx_buff[i]; 12297d993c5fSArseny Solokha 12307d993c5fSArseny Solokha while (alloc_cnt--) { 12317d993c5fSArseny Solokha /* try reuse page */ 12327d993c5fSArseny Solokha if (unlikely(!rxb->page)) { 12337d993c5fSArseny Solokha if (unlikely(!gfar_new_page(rx_queue, rxb))) { 12347d993c5fSArseny Solokha gfar_rx_alloc_err(rx_queue); 12357d993c5fSArseny Solokha break; 123680ec396cSClaudiu Manoil } 123780ec396cSClaudiu Manoil } 123880ec396cSClaudiu Manoil 12397d993c5fSArseny Solokha /* Setup the new RxBD */ 12407d993c5fSArseny Solokha gfar_init_rxbdp(rx_queue, bdp, 12417d993c5fSArseny Solokha rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); 12427d993c5fSArseny Solokha 12437d993c5fSArseny Solokha /* Update to the next pointer */ 12447d993c5fSArseny Solokha bdp++; 12457d993c5fSArseny Solokha rxb++; 12467d993c5fSArseny Solokha 12477d993c5fSArseny Solokha if (unlikely(++i == rx_queue->rx_ring_size)) { 12487d993c5fSArseny Solokha i = 0; 12497d993c5fSArseny Solokha bdp = rx_queue->rx_bd_base; 12507d993c5fSArseny Solokha rxb = rx_queue->rx_buff; 12517d993c5fSArseny Solokha } 12527d993c5fSArseny Solokha } 12537d993c5fSArseny Solokha 12547d993c5fSArseny Solokha rx_queue->next_to_use = i; 12557d993c5fSArseny Solokha rx_queue->next_to_alloc = i; 12567d993c5fSArseny Solokha } 12577d993c5fSArseny Solokha 12587d993c5fSArseny Solokha static void gfar_init_bds(struct net_device *ndev) 125980ec396cSClaudiu Manoil { 12607d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(ndev); 12617d993c5fSArseny Solokha struct gfar __iomem *regs = priv->gfargrp[0].regs; 12627d993c5fSArseny Solokha struct gfar_priv_tx_q *tx_queue = NULL; 12637d993c5fSArseny Solokha struct gfar_priv_rx_q *rx_queue = NULL; 12647d993c5fSArseny Solokha struct txbd8 *txbdp; 12657d993c5fSArseny Solokha u32 __iomem *rfbptr; 12667d993c5fSArseny Solokha int i, j; 126780ec396cSClaudiu Manoil 12687d993c5fSArseny Solokha for (i = 0; i < priv->num_tx_queues; i++) { 12697d993c5fSArseny Solokha tx_queue = priv->tx_queue[i]; 12707d993c5fSArseny Solokha /* Initialize some variables in our dev structure */ 12717d993c5fSArseny Solokha tx_queue->num_txbdfree = tx_queue->tx_ring_size; 12727d993c5fSArseny Solokha tx_queue->dirty_tx = tx_queue->tx_bd_base; 12737d993c5fSArseny Solokha tx_queue->cur_tx = tx_queue->tx_bd_base; 12747d993c5fSArseny Solokha tx_queue->skb_curtx = 0; 12757d993c5fSArseny Solokha tx_queue->skb_dirtytx = 0; 12767d993c5fSArseny Solokha 12777d993c5fSArseny Solokha /* Initialize Transmit Descriptor Ring */ 12787d993c5fSArseny Solokha txbdp = tx_queue->tx_bd_base; 12797d993c5fSArseny Solokha for (j = 0; j < tx_queue->tx_ring_size; j++) { 12807d993c5fSArseny Solokha txbdp->lstatus = 0; 12817d993c5fSArseny Solokha txbdp->bufPtr = 0; 12827d993c5fSArseny Solokha txbdp++; 12837d993c5fSArseny Solokha } 12847d993c5fSArseny Solokha 12857d993c5fSArseny Solokha /* Set the last descriptor in the ring to indicate wrap */ 12867d993c5fSArseny Solokha txbdp--; 12877d993c5fSArseny Solokha txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) | 12887d993c5fSArseny Solokha TXBD_WRAP); 12897d993c5fSArseny Solokha } 12907d993c5fSArseny Solokha 12917d993c5fSArseny Solokha rfbptr = ®s->rfbptr0; 12927d993c5fSArseny Solokha for (i = 0; i < priv->num_rx_queues; i++) { 12937d993c5fSArseny Solokha rx_queue = priv->rx_queue[i]; 12947d993c5fSArseny Solokha 12957d993c5fSArseny Solokha rx_queue->next_to_clean = 0; 12967d993c5fSArseny Solokha rx_queue->next_to_use = 0; 12977d993c5fSArseny Solokha rx_queue->next_to_alloc = 0; 12987d993c5fSArseny Solokha 12997d993c5fSArseny Solokha /* make sure next_to_clean != next_to_use after this 13007d993c5fSArseny Solokha * by leaving at least 1 unused descriptor 13017d993c5fSArseny Solokha */ 13027d993c5fSArseny Solokha gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue)); 13037d993c5fSArseny Solokha 13047d993c5fSArseny Solokha rx_queue->rfbptr = rfbptr; 13057d993c5fSArseny Solokha rfbptr += 2; 130680ec396cSClaudiu Manoil } 130780ec396cSClaudiu Manoil } 130880ec396cSClaudiu Manoil 13097d993c5fSArseny Solokha static int gfar_alloc_skb_resources(struct net_device *ndev) 13107d993c5fSArseny Solokha { 13117d993c5fSArseny Solokha void *vaddr; 13127d993c5fSArseny Solokha dma_addr_t addr; 13137d993c5fSArseny Solokha int i, j; 13147d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(ndev); 13157d993c5fSArseny Solokha struct device *dev = priv->dev; 13167d993c5fSArseny Solokha struct gfar_priv_tx_q *tx_queue = NULL; 13177d993c5fSArseny Solokha struct gfar_priv_rx_q *rx_queue = NULL; 13187d993c5fSArseny Solokha 13197d993c5fSArseny Solokha priv->total_tx_ring_size = 0; 13207d993c5fSArseny Solokha for (i = 0; i < priv->num_tx_queues; i++) 13217d993c5fSArseny Solokha priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; 13227d993c5fSArseny Solokha 13237d993c5fSArseny Solokha priv->total_rx_ring_size = 0; 13247d993c5fSArseny Solokha for (i = 0; i < priv->num_rx_queues; i++) 13257d993c5fSArseny Solokha priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; 13267d993c5fSArseny Solokha 13277d993c5fSArseny Solokha /* Allocate memory for the buffer descriptors */ 13287d993c5fSArseny Solokha vaddr = dma_alloc_coherent(dev, 13297d993c5fSArseny Solokha (priv->total_tx_ring_size * 13307d993c5fSArseny Solokha sizeof(struct txbd8)) + 13317d993c5fSArseny Solokha (priv->total_rx_ring_size * 13327d993c5fSArseny Solokha sizeof(struct rxbd8)), 13337d993c5fSArseny Solokha &addr, GFP_KERNEL); 13347d993c5fSArseny Solokha if (!vaddr) 13357d993c5fSArseny Solokha return -ENOMEM; 13367d993c5fSArseny Solokha 13377d993c5fSArseny Solokha for (i = 0; i < priv->num_tx_queues; i++) { 13387d993c5fSArseny Solokha tx_queue = priv->tx_queue[i]; 13397d993c5fSArseny Solokha tx_queue->tx_bd_base = vaddr; 13407d993c5fSArseny Solokha tx_queue->tx_bd_dma_base = addr; 13417d993c5fSArseny Solokha tx_queue->dev = ndev; 13427d993c5fSArseny Solokha /* enet DMA only understands physical addresses */ 13437d993c5fSArseny Solokha addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; 13447d993c5fSArseny Solokha vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; 13457d993c5fSArseny Solokha } 13467d993c5fSArseny Solokha 13477d993c5fSArseny Solokha /* Start the rx descriptor ring where the tx ring leaves off */ 13487d993c5fSArseny Solokha for (i = 0; i < priv->num_rx_queues; i++) { 13497d993c5fSArseny Solokha rx_queue = priv->rx_queue[i]; 13507d993c5fSArseny Solokha rx_queue->rx_bd_base = vaddr; 13517d993c5fSArseny Solokha rx_queue->rx_bd_dma_base = addr; 13527d993c5fSArseny Solokha rx_queue->ndev = ndev; 13537d993c5fSArseny Solokha rx_queue->dev = dev; 13547d993c5fSArseny Solokha addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; 13557d993c5fSArseny Solokha vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; 13567d993c5fSArseny Solokha } 13577d993c5fSArseny Solokha 13587d993c5fSArseny Solokha /* Setup the skbuff rings */ 13597d993c5fSArseny Solokha for (i = 0; i < priv->num_tx_queues; i++) { 13607d993c5fSArseny Solokha tx_queue = priv->tx_queue[i]; 13617d993c5fSArseny Solokha tx_queue->tx_skbuff = 13627d993c5fSArseny Solokha kmalloc_array(tx_queue->tx_ring_size, 13637d993c5fSArseny Solokha sizeof(*tx_queue->tx_skbuff), 13647d993c5fSArseny Solokha GFP_KERNEL); 13657d993c5fSArseny Solokha if (!tx_queue->tx_skbuff) 13667d993c5fSArseny Solokha goto cleanup; 13677d993c5fSArseny Solokha 13687d993c5fSArseny Solokha for (j = 0; j < tx_queue->tx_ring_size; j++) 13697d993c5fSArseny Solokha tx_queue->tx_skbuff[j] = NULL; 13707d993c5fSArseny Solokha } 13717d993c5fSArseny Solokha 13727d993c5fSArseny Solokha for (i = 0; i < priv->num_rx_queues; i++) { 13737d993c5fSArseny Solokha rx_queue = priv->rx_queue[i]; 13747d993c5fSArseny Solokha rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, 13757d993c5fSArseny Solokha sizeof(*rx_queue->rx_buff), 13767d993c5fSArseny Solokha GFP_KERNEL); 13777d993c5fSArseny Solokha if (!rx_queue->rx_buff) 13787d993c5fSArseny Solokha goto cleanup; 13797d993c5fSArseny Solokha } 13807d993c5fSArseny Solokha 13817d993c5fSArseny Solokha gfar_init_bds(ndev); 13827d993c5fSArseny Solokha 138380ec396cSClaudiu Manoil return 0; 13847d993c5fSArseny Solokha 13857d993c5fSArseny Solokha cleanup: 13867d993c5fSArseny Solokha free_skb_resources(priv); 13877d993c5fSArseny Solokha return -ENOMEM; 138880ec396cSClaudiu Manoil } 138980ec396cSClaudiu Manoil 1390ec21e2ecSJeff Kirsher /* Bring the controller up and running */ 1391ec21e2ecSJeff Kirsher int startup_gfar(struct net_device *ndev) 1392ec21e2ecSJeff Kirsher { 1393ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(ndev); 139480ec396cSClaudiu Manoil int err; 1395ec21e2ecSJeff Kirsher 1396a328ac92SClaudiu Manoil gfar_mac_reset(priv); 1397ec21e2ecSJeff Kirsher 1398ec21e2ecSJeff Kirsher err = gfar_alloc_skb_resources(ndev); 1399ec21e2ecSJeff Kirsher if (err) 1400ec21e2ecSJeff Kirsher return err; 1401ec21e2ecSJeff Kirsher 1402a328ac92SClaudiu Manoil gfar_init_tx_rx_base(priv); 1403ec21e2ecSJeff Kirsher 14044e857c58SPeter Zijlstra smp_mb__before_atomic(); 14050851133bSClaudiu Manoil clear_bit(GFAR_DOWN, &priv->state); 14064e857c58SPeter Zijlstra smp_mb__after_atomic(); 14070851133bSClaudiu Manoil 14080851133bSClaudiu Manoil /* Start Rx/Tx DMA and enable the interrupts */ 1409c10650b6SClaudiu Manoil gfar_start(priv); 1410ec21e2ecSJeff Kirsher 14112a4eebf0SClaudiu Manoil /* force link state update after mac reset */ 14122a4eebf0SClaudiu Manoil priv->oldlink = 0; 14132a4eebf0SClaudiu Manoil priv->oldspeed = 0; 14142a4eebf0SClaudiu Manoil priv->oldduplex = -1; 14152a4eebf0SClaudiu Manoil 14164c4a6b0eSPhilippe Reynes phy_start(ndev->phydev); 1417ec21e2ecSJeff Kirsher 14180851133bSClaudiu Manoil enable_napi(priv); 14190851133bSClaudiu Manoil 14200851133bSClaudiu Manoil netif_tx_wake_all_queues(ndev); 14210851133bSClaudiu Manoil 1422ec21e2ecSJeff Kirsher return 0; 1423ec21e2ecSJeff Kirsher } 1424ec21e2ecSJeff Kirsher 14257d993c5fSArseny Solokha static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) 14267d993c5fSArseny Solokha { 14277d993c5fSArseny Solokha struct net_device *ndev = priv->ndev; 14287d993c5fSArseny Solokha struct phy_device *phydev = ndev->phydev; 14297d993c5fSArseny Solokha u32 val = 0; 14307d993c5fSArseny Solokha 14317d993c5fSArseny Solokha if (!phydev->duplex) 14327d993c5fSArseny Solokha return val; 14337d993c5fSArseny Solokha 14347d993c5fSArseny Solokha if (!priv->pause_aneg_en) { 14357d993c5fSArseny Solokha if (priv->tx_pause_en) 14367d993c5fSArseny Solokha val |= MACCFG1_TX_FLOW; 14377d993c5fSArseny Solokha if (priv->rx_pause_en) 14387d993c5fSArseny Solokha val |= MACCFG1_RX_FLOW; 14397d993c5fSArseny Solokha } else { 14407d993c5fSArseny Solokha u16 lcl_adv, rmt_adv; 14417d993c5fSArseny Solokha u8 flowctrl; 14427d993c5fSArseny Solokha /* get link partner capabilities */ 14437d993c5fSArseny Solokha rmt_adv = 0; 14447d993c5fSArseny Solokha if (phydev->pause) 14457d993c5fSArseny Solokha rmt_adv = LPA_PAUSE_CAP; 14467d993c5fSArseny Solokha if (phydev->asym_pause) 14477d993c5fSArseny Solokha rmt_adv |= LPA_PAUSE_ASYM; 14487d993c5fSArseny Solokha 14497d993c5fSArseny Solokha lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising); 14507d993c5fSArseny Solokha flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); 14517d993c5fSArseny Solokha if (flowctrl & FLOW_CTRL_TX) 14527d993c5fSArseny Solokha val |= MACCFG1_TX_FLOW; 14537d993c5fSArseny Solokha if (flowctrl & FLOW_CTRL_RX) 14547d993c5fSArseny Solokha val |= MACCFG1_RX_FLOW; 14557d993c5fSArseny Solokha } 14567d993c5fSArseny Solokha 14577d993c5fSArseny Solokha return val; 14587d993c5fSArseny Solokha } 14597d993c5fSArseny Solokha 14607d993c5fSArseny Solokha static noinline void gfar_update_link_state(struct gfar_private *priv) 14617d993c5fSArseny Solokha { 14627d993c5fSArseny Solokha struct gfar __iomem *regs = priv->gfargrp[0].regs; 14637d993c5fSArseny Solokha struct net_device *ndev = priv->ndev; 14647d993c5fSArseny Solokha struct phy_device *phydev = ndev->phydev; 14657d993c5fSArseny Solokha struct gfar_priv_rx_q *rx_queue = NULL; 14667d993c5fSArseny Solokha int i; 14677d993c5fSArseny Solokha 14687d993c5fSArseny Solokha if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) 14697d993c5fSArseny Solokha return; 14707d993c5fSArseny Solokha 14717d993c5fSArseny Solokha if (phydev->link) { 14727d993c5fSArseny Solokha u32 tempval1 = gfar_read(®s->maccfg1); 14737d993c5fSArseny Solokha u32 tempval = gfar_read(®s->maccfg2); 14747d993c5fSArseny Solokha u32 ecntrl = gfar_read(®s->ecntrl); 14757d993c5fSArseny Solokha u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW); 14767d993c5fSArseny Solokha 14777d993c5fSArseny Solokha if (phydev->duplex != priv->oldduplex) { 14787d993c5fSArseny Solokha if (!(phydev->duplex)) 14797d993c5fSArseny Solokha tempval &= ~(MACCFG2_FULL_DUPLEX); 14807d993c5fSArseny Solokha else 14817d993c5fSArseny Solokha tempval |= MACCFG2_FULL_DUPLEX; 14827d993c5fSArseny Solokha 14837d993c5fSArseny Solokha priv->oldduplex = phydev->duplex; 14847d993c5fSArseny Solokha } 14857d993c5fSArseny Solokha 14867d993c5fSArseny Solokha if (phydev->speed != priv->oldspeed) { 14877d993c5fSArseny Solokha switch (phydev->speed) { 14887d993c5fSArseny Solokha case 1000: 14897d993c5fSArseny Solokha tempval = 14907d993c5fSArseny Solokha ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); 14917d993c5fSArseny Solokha 14927d993c5fSArseny Solokha ecntrl &= ~(ECNTRL_R100); 14937d993c5fSArseny Solokha break; 14947d993c5fSArseny Solokha case 100: 14957d993c5fSArseny Solokha case 10: 14967d993c5fSArseny Solokha tempval = 14977d993c5fSArseny Solokha ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 14987d993c5fSArseny Solokha 14997d993c5fSArseny Solokha /* Reduced mode distinguishes 15007d993c5fSArseny Solokha * between 10 and 100 15010977f817SJan Ceuleers */ 15027d993c5fSArseny Solokha if (phydev->speed == SPEED_100) 15037d993c5fSArseny Solokha ecntrl |= ECNTRL_R100; 15047d993c5fSArseny Solokha else 15057d993c5fSArseny Solokha ecntrl &= ~(ECNTRL_R100); 15067d993c5fSArseny Solokha break; 15077d993c5fSArseny Solokha default: 15087d993c5fSArseny Solokha netif_warn(priv, link, priv->ndev, 15097d993c5fSArseny Solokha "Ack! Speed (%d) is not 10/100/1000!\n", 15107d993c5fSArseny Solokha phydev->speed); 15117d993c5fSArseny Solokha break; 15127d993c5fSArseny Solokha } 15137d993c5fSArseny Solokha 15147d993c5fSArseny Solokha priv->oldspeed = phydev->speed; 15157d993c5fSArseny Solokha } 15167d993c5fSArseny Solokha 15177d993c5fSArseny Solokha tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 15187d993c5fSArseny Solokha tempval1 |= gfar_get_flowctrl_cfg(priv); 15197d993c5fSArseny Solokha 15207d993c5fSArseny Solokha /* Turn last free buffer recording on */ 15217d993c5fSArseny Solokha if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { 15227d993c5fSArseny Solokha for (i = 0; i < priv->num_rx_queues; i++) { 15237d993c5fSArseny Solokha u32 bdp_dma; 15247d993c5fSArseny Solokha 15257d993c5fSArseny Solokha rx_queue = priv->rx_queue[i]; 15267d993c5fSArseny Solokha bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); 15277d993c5fSArseny Solokha gfar_write(rx_queue->rfbptr, bdp_dma); 15287d993c5fSArseny Solokha } 15297d993c5fSArseny Solokha 15307d993c5fSArseny Solokha priv->tx_actual_en = 1; 15317d993c5fSArseny Solokha } 15327d993c5fSArseny Solokha 15337d993c5fSArseny Solokha if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval)) 15347d993c5fSArseny Solokha priv->tx_actual_en = 0; 15357d993c5fSArseny Solokha 15367d993c5fSArseny Solokha gfar_write(®s->maccfg1, tempval1); 15377d993c5fSArseny Solokha gfar_write(®s->maccfg2, tempval); 15387d993c5fSArseny Solokha gfar_write(®s->ecntrl, ecntrl); 15397d993c5fSArseny Solokha 15407d993c5fSArseny Solokha if (!priv->oldlink) 15417d993c5fSArseny Solokha priv->oldlink = 1; 15427d993c5fSArseny Solokha 15437d993c5fSArseny Solokha } else if (priv->oldlink) { 15447d993c5fSArseny Solokha priv->oldlink = 0; 15457d993c5fSArseny Solokha priv->oldspeed = 0; 15467d993c5fSArseny Solokha priv->oldduplex = -1; 15477d993c5fSArseny Solokha } 15487d993c5fSArseny Solokha 15497d993c5fSArseny Solokha if (netif_msg_link(priv)) 15507d993c5fSArseny Solokha phy_print_status(phydev); 15517d993c5fSArseny Solokha } 15527d993c5fSArseny Solokha 15537d993c5fSArseny Solokha /* Called every time the controller might need to be made 15547d993c5fSArseny Solokha * aware of new link state. The PHY code conveys this 15557d993c5fSArseny Solokha * information through variables in the phydev structure, and this 15567d993c5fSArseny Solokha * function converts those variables into the appropriate 15577d993c5fSArseny Solokha * register values, and can bring down the device if needed. 15587d993c5fSArseny Solokha */ 15597d993c5fSArseny Solokha static void adjust_link(struct net_device *dev) 1560ec21e2ecSJeff Kirsher { 1561ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(dev); 15627d993c5fSArseny Solokha struct phy_device *phydev = dev->phydev; 1563ec21e2ecSJeff Kirsher 15647d993c5fSArseny Solokha if (unlikely(phydev->link != priv->oldlink || 15657d993c5fSArseny Solokha (phydev->link && (phydev->duplex != priv->oldduplex || 15667d993c5fSArseny Solokha phydev->speed != priv->oldspeed)))) 15677d993c5fSArseny Solokha gfar_update_link_state(priv); 15687d993c5fSArseny Solokha } 1569ec21e2ecSJeff Kirsher 15707d993c5fSArseny Solokha /* Initialize TBI PHY interface for communicating with the 15717d993c5fSArseny Solokha * SERDES lynx PHY on the chip. We communicate with this PHY 15727d993c5fSArseny Solokha * through the MDIO bus on each controller, treating it as a 15737d993c5fSArseny Solokha * "normal" PHY at the address found in the TBIPA register. We assume 15747d993c5fSArseny Solokha * that the TBIPA register is valid. Either the MDIO bus code will set 15757d993c5fSArseny Solokha * it to a value that doesn't conflict with other PHYs on the bus, or the 15767d993c5fSArseny Solokha * value doesn't matter, as there are no other PHYs on the bus. 15777d993c5fSArseny Solokha */ 15787d993c5fSArseny Solokha static void gfar_configure_serdes(struct net_device *dev) 15797d993c5fSArseny Solokha { 15807d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(dev); 15817d993c5fSArseny Solokha struct phy_device *tbiphy; 158280ec396cSClaudiu Manoil 15837d993c5fSArseny Solokha if (!priv->tbi_node) { 15847d993c5fSArseny Solokha dev_warn(&dev->dev, "error: SGMII mode requires that the " 15857d993c5fSArseny Solokha "device tree specify a tbi-handle\n"); 15867d993c5fSArseny Solokha return; 15877d993c5fSArseny Solokha } 1588ec21e2ecSJeff Kirsher 15897d993c5fSArseny Solokha tbiphy = of_phy_find_device(priv->tbi_node); 15907d993c5fSArseny Solokha if (!tbiphy) { 15917d993c5fSArseny Solokha dev_err(&dev->dev, "error: Could not get TBI device\n"); 15927d993c5fSArseny Solokha return; 15937d993c5fSArseny Solokha } 15947d993c5fSArseny Solokha 15957d993c5fSArseny Solokha /* If the link is already up, we must already be ok, and don't need to 15967d993c5fSArseny Solokha * configure and reset the TBI<->SerDes link. Maybe U-Boot configured 15977d993c5fSArseny Solokha * everything for us? Resetting it takes the link down and requires 15987d993c5fSArseny Solokha * several seconds for it to come back. 15997d993c5fSArseny Solokha */ 16007d993c5fSArseny Solokha if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { 16017d993c5fSArseny Solokha put_device(&tbiphy->mdio.dev); 16027d993c5fSArseny Solokha return; 16037d993c5fSArseny Solokha } 16047d993c5fSArseny Solokha 16057d993c5fSArseny Solokha /* Single clk mode, mii mode off(for serdes communication) */ 16067d993c5fSArseny Solokha phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); 16077d993c5fSArseny Solokha 16087d993c5fSArseny Solokha phy_write(tbiphy, MII_ADVERTISE, 16097d993c5fSArseny Solokha ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | 16107d993c5fSArseny Solokha ADVERTISE_1000XPSE_ASYM); 16117d993c5fSArseny Solokha 16127d993c5fSArseny Solokha phy_write(tbiphy, MII_BMCR, 16137d993c5fSArseny Solokha BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | 16147d993c5fSArseny Solokha BMCR_SPEED1000); 16157d993c5fSArseny Solokha 16167d993c5fSArseny Solokha put_device(&tbiphy->mdio.dev); 16177d993c5fSArseny Solokha } 16187d993c5fSArseny Solokha 16197d993c5fSArseny Solokha /* Initializes driver's PHY state, and attaches to the PHY. 16207d993c5fSArseny Solokha * Returns 0 on success. 16217d993c5fSArseny Solokha */ 16227d993c5fSArseny Solokha static int init_phy(struct net_device *dev) 16237d993c5fSArseny Solokha { 16247d993c5fSArseny Solokha __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 16257d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(dev); 16268e578e73SArseny Solokha phy_interface_t interface = priv->interface; 16277d993c5fSArseny Solokha struct phy_device *phydev; 16287d993c5fSArseny Solokha struct ethtool_eee edata; 16297d993c5fSArseny Solokha 16307d993c5fSArseny Solokha linkmode_set_bit_array(phy_10_100_features_array, 16317d993c5fSArseny Solokha ARRAY_SIZE(phy_10_100_features_array), 16327d993c5fSArseny Solokha mask); 16337d993c5fSArseny Solokha linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); 16347d993c5fSArseny Solokha linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask); 16357d993c5fSArseny Solokha if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) 16367d993c5fSArseny Solokha linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask); 16377d993c5fSArseny Solokha 16387d993c5fSArseny Solokha priv->oldlink = 0; 16397d993c5fSArseny Solokha priv->oldspeed = 0; 16407d993c5fSArseny Solokha priv->oldduplex = -1; 16417d993c5fSArseny Solokha 16427d993c5fSArseny Solokha phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, 16437d993c5fSArseny Solokha interface); 16447d993c5fSArseny Solokha if (!phydev) { 16457d993c5fSArseny Solokha dev_err(&dev->dev, "could not attach to PHY\n"); 16467d993c5fSArseny Solokha return -ENODEV; 16477d993c5fSArseny Solokha } 16487d993c5fSArseny Solokha 16497d993c5fSArseny Solokha if (interface == PHY_INTERFACE_MODE_SGMII) 16507d993c5fSArseny Solokha gfar_configure_serdes(dev); 16517d993c5fSArseny Solokha 16527d993c5fSArseny Solokha /* Remove any features not supported by the controller */ 16537d993c5fSArseny Solokha linkmode_and(phydev->supported, phydev->supported, mask); 16547d993c5fSArseny Solokha linkmode_copy(phydev->advertising, phydev->supported); 16557d993c5fSArseny Solokha 16567d993c5fSArseny Solokha /* Add support for flow control */ 16577d993c5fSArseny Solokha phy_support_asym_pause(phydev); 16587d993c5fSArseny Solokha 16597d993c5fSArseny Solokha /* disable EEE autoneg, EEE not supported by eTSEC */ 16607d993c5fSArseny Solokha memset(&edata, 0, sizeof(struct ethtool_eee)); 16617d993c5fSArseny Solokha phy_ethtool_set_eee(phydev, &edata); 16627d993c5fSArseny Solokha 16637d993c5fSArseny Solokha return 0; 1664ec21e2ecSJeff Kirsher } 1665ec21e2ecSJeff Kirsher 1666ec21e2ecSJeff Kirsher static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) 1667ec21e2ecSJeff Kirsher { 1668d58ff351SJohannes Berg struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN); 1669ec21e2ecSJeff Kirsher 1670ec21e2ecSJeff Kirsher memset(fcb, 0, GMAC_FCB_LEN); 1671ec21e2ecSJeff Kirsher 1672ec21e2ecSJeff Kirsher return fcb; 1673ec21e2ecSJeff Kirsher } 1674ec21e2ecSJeff Kirsher 16759c4886e5SManfred Rudigier static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, 16769c4886e5SManfred Rudigier int fcb_length) 1677ec21e2ecSJeff Kirsher { 1678ec21e2ecSJeff Kirsher /* If we're here, it's a IP packet with a TCP or UDP 1679ec21e2ecSJeff Kirsher * payload. We set it to checksum, using a pseudo-header 1680ec21e2ecSJeff Kirsher * we provide 1681ec21e2ecSJeff Kirsher */ 16823a2e16c8SJan Ceuleers u8 flags = TXFCB_DEFAULT; 1683ec21e2ecSJeff Kirsher 16840977f817SJan Ceuleers /* Tell the controller what the protocol is 16850977f817SJan Ceuleers * And provide the already calculated phcs 16860977f817SJan Ceuleers */ 1687ec21e2ecSJeff Kirsher if (ip_hdr(skb)->protocol == IPPROTO_UDP) { 1688ec21e2ecSJeff Kirsher flags |= TXFCB_UDP; 168926eb9374SClaudiu Manoil fcb->phcs = (__force __be16)(udp_hdr(skb)->check); 1690ec21e2ecSJeff Kirsher } else 169126eb9374SClaudiu Manoil fcb->phcs = (__force __be16)(tcp_hdr(skb)->check); 1692ec21e2ecSJeff Kirsher 1693ec21e2ecSJeff Kirsher /* l3os is the distance between the start of the 1694ec21e2ecSJeff Kirsher * frame (skb->data) and the start of the IP hdr. 1695ec21e2ecSJeff Kirsher * l4os is the distance between the start of the 16960977f817SJan Ceuleers * l3 hdr and the l4 hdr 16970977f817SJan Ceuleers */ 169826eb9374SClaudiu Manoil fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length); 1699ec21e2ecSJeff Kirsher fcb->l4os = skb_network_header_len(skb); 1700ec21e2ecSJeff Kirsher 1701ec21e2ecSJeff Kirsher fcb->flags = flags; 1702ec21e2ecSJeff Kirsher } 1703ec21e2ecSJeff Kirsher 1704278af574SArnd Bergmann static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) 1705ec21e2ecSJeff Kirsher { 1706ec21e2ecSJeff Kirsher fcb->flags |= TXFCB_VLN; 170726eb9374SClaudiu Manoil fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb)); 1708ec21e2ecSJeff Kirsher } 1709ec21e2ecSJeff Kirsher 1710ec21e2ecSJeff Kirsher static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, 1711ec21e2ecSJeff Kirsher struct txbd8 *base, int ring_size) 1712ec21e2ecSJeff Kirsher { 1713ec21e2ecSJeff Kirsher struct txbd8 *new_bd = bdp + stride; 1714ec21e2ecSJeff Kirsher 1715ec21e2ecSJeff Kirsher return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; 1716ec21e2ecSJeff Kirsher } 1717ec21e2ecSJeff Kirsher 1718ec21e2ecSJeff Kirsher static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, 1719ec21e2ecSJeff Kirsher int ring_size) 1720ec21e2ecSJeff Kirsher { 1721ec21e2ecSJeff Kirsher return skip_txbd(bdp, 1, base, ring_size); 1722ec21e2ecSJeff Kirsher } 1723ec21e2ecSJeff Kirsher 172402d88fb4SClaudiu Manoil /* eTSEC12: csum generation not supported for some fcb offsets */ 172502d88fb4SClaudiu Manoil static inline bool gfar_csum_errata_12(struct gfar_private *priv, 172602d88fb4SClaudiu Manoil unsigned long fcb_addr) 172702d88fb4SClaudiu Manoil { 172802d88fb4SClaudiu Manoil return (gfar_has_errata(priv, GFAR_ERRATA_12) && 172902d88fb4SClaudiu Manoil (fcb_addr % 0x20) > 0x18); 173002d88fb4SClaudiu Manoil } 173102d88fb4SClaudiu Manoil 173202d88fb4SClaudiu Manoil /* eTSEC76: csum generation for frames larger than 2500 may 173302d88fb4SClaudiu Manoil * cause excess delays before start of transmission 173402d88fb4SClaudiu Manoil */ 173502d88fb4SClaudiu Manoil static inline bool gfar_csum_errata_76(struct gfar_private *priv, 173602d88fb4SClaudiu Manoil unsigned int len) 173702d88fb4SClaudiu Manoil { 173802d88fb4SClaudiu Manoil return (gfar_has_errata(priv, GFAR_ERRATA_76) && 173902d88fb4SClaudiu Manoil (len > 2500)); 174002d88fb4SClaudiu Manoil } 174102d88fb4SClaudiu Manoil 17420977f817SJan Ceuleers /* This is called by the kernel when a frame is ready for transmission. 17430977f817SJan Ceuleers * It is pointed to by the dev->hard_start_xmit function pointer 17440977f817SJan Ceuleers */ 174506983aa5SYueHaibing static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 1746ec21e2ecSJeff Kirsher { 1747ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(dev); 1748ec21e2ecSJeff Kirsher struct gfar_priv_tx_q *tx_queue = NULL; 1749ec21e2ecSJeff Kirsher struct netdev_queue *txq; 1750ec21e2ecSJeff Kirsher struct gfar __iomem *regs = NULL; 1751ec21e2ecSJeff Kirsher struct txfcb *fcb = NULL; 1752ec21e2ecSJeff Kirsher struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; 1753ec21e2ecSJeff Kirsher u32 lstatus; 175442f397adSClaudiu Manoil skb_frag_t *frag; 17550d0cffdcSClaudiu Manoil int i, rq = 0; 17560d0cffdcSClaudiu Manoil int do_tstamp, do_csum, do_vlan; 1757ec21e2ecSJeff Kirsher u32 bufaddr; 175850ad076bSClaudiu Manoil unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0; 1759ec21e2ecSJeff Kirsher 1760ec21e2ecSJeff Kirsher rq = skb->queue_mapping; 1761ec21e2ecSJeff Kirsher tx_queue = priv->tx_queue[rq]; 1762ec21e2ecSJeff Kirsher txq = netdev_get_tx_queue(dev, rq); 1763ec21e2ecSJeff Kirsher base = tx_queue->tx_bd_base; 1764ec21e2ecSJeff Kirsher regs = tx_queue->grp->regs; 1765ec21e2ecSJeff Kirsher 17660d0cffdcSClaudiu Manoil do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); 1767df8a39deSJiri Pirko do_vlan = skb_vlan_tag_present(skb); 17680d0cffdcSClaudiu Manoil do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 17690d0cffdcSClaudiu Manoil priv->hwts_tx_en; 17700d0cffdcSClaudiu Manoil 17710d0cffdcSClaudiu Manoil if (do_csum || do_vlan) 17720d0cffdcSClaudiu Manoil fcb_len = GMAC_FCB_LEN; 17730d0cffdcSClaudiu Manoil 1774ec21e2ecSJeff Kirsher /* check if time stamp should be generated */ 17750d0cffdcSClaudiu Manoil if (unlikely(do_tstamp)) 17760d0cffdcSClaudiu Manoil fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN; 1777ec21e2ecSJeff Kirsher 1778ec21e2ecSJeff Kirsher /* make space for additional header when fcb is needed */ 1779d145c903SClaudiu Manoil if (fcb_len) { 1780d145c903SClaudiu Manoil if (unlikely(skb_cow_head(skb, fcb_len))) { 1781ec21e2ecSJeff Kirsher dev->stats.tx_errors++; 1782c9974ad4SEric W. Biederman dev_kfree_skb_any(skb); 1783ec21e2ecSJeff Kirsher return NETDEV_TX_OK; 1784ec21e2ecSJeff Kirsher } 1785ec21e2ecSJeff Kirsher } 1786ec21e2ecSJeff Kirsher 1787ec21e2ecSJeff Kirsher /* total number of fragments in the SKB */ 1788ec21e2ecSJeff Kirsher nr_frags = skb_shinfo(skb)->nr_frags; 1789ec21e2ecSJeff Kirsher 1790ec21e2ecSJeff Kirsher /* calculate the required number of TxBDs for this skb */ 1791ec21e2ecSJeff Kirsher if (unlikely(do_tstamp)) 1792ec21e2ecSJeff Kirsher nr_txbds = nr_frags + 2; 1793ec21e2ecSJeff Kirsher else 1794ec21e2ecSJeff Kirsher nr_txbds = nr_frags + 1; 1795ec21e2ecSJeff Kirsher 1796ec21e2ecSJeff Kirsher /* check if there is space to queue this packet */ 1797ec21e2ecSJeff Kirsher if (nr_txbds > tx_queue->num_txbdfree) { 1798ec21e2ecSJeff Kirsher /* no space, stop the queue */ 1799ec21e2ecSJeff Kirsher netif_tx_stop_queue(txq); 1800ec21e2ecSJeff Kirsher dev->stats.tx_fifo_errors++; 1801ec21e2ecSJeff Kirsher return NETDEV_TX_BUSY; 1802ec21e2ecSJeff Kirsher } 1803ec21e2ecSJeff Kirsher 1804ec21e2ecSJeff Kirsher /* Update transmit stats */ 180550ad076bSClaudiu Manoil bytes_sent = skb->len; 180650ad076bSClaudiu Manoil tx_queue->stats.tx_bytes += bytes_sent; 180750ad076bSClaudiu Manoil /* keep Tx bytes on wire for BQL accounting */ 180850ad076bSClaudiu Manoil GFAR_CB(skb)->bytes_sent = bytes_sent; 1809ec21e2ecSJeff Kirsher tx_queue->stats.tx_packets++; 1810ec21e2ecSJeff Kirsher 1811ec21e2ecSJeff Kirsher txbdp = txbdp_start = tx_queue->cur_tx; 1812a7312d58SClaudiu Manoil lstatus = be32_to_cpu(txbdp->lstatus); 1813ec21e2ecSJeff Kirsher 18149c4886e5SManfred Rudigier /* Add TxPAL between FCB and frame if required */ 18159c4886e5SManfred Rudigier if (unlikely(do_tstamp)) { 18169c4886e5SManfred Rudigier skb_push(skb, GMAC_TXPAL_LEN); 18179c4886e5SManfred Rudigier memset(skb->data, 0, GMAC_TXPAL_LEN); 18189c4886e5SManfred Rudigier } 18199c4886e5SManfred Rudigier 18200d0cffdcSClaudiu Manoil /* Add TxFCB if required */ 18210d0cffdcSClaudiu Manoil if (fcb_len) { 1822ec21e2ecSJeff Kirsher fcb = gfar_add_fcb(skb); 1823ec21e2ecSJeff Kirsher lstatus |= BD_LFLAG(TXBD_TOE); 18240d0cffdcSClaudiu Manoil } 18250d0cffdcSClaudiu Manoil 18260d0cffdcSClaudiu Manoil /* Set up checksumming */ 18270d0cffdcSClaudiu Manoil if (do_csum) { 18280d0cffdcSClaudiu Manoil gfar_tx_checksum(skb, fcb, fcb_len); 182902d88fb4SClaudiu Manoil 183002d88fb4SClaudiu Manoil if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) || 183102d88fb4SClaudiu Manoil unlikely(gfar_csum_errata_76(priv, skb->len))) { 183202d88fb4SClaudiu Manoil __skb_pull(skb, GMAC_FCB_LEN); 183302d88fb4SClaudiu Manoil skb_checksum_help(skb); 18340d0cffdcSClaudiu Manoil if (do_vlan || do_tstamp) { 18350d0cffdcSClaudiu Manoil /* put back a new fcb for vlan/tstamp TOE */ 18360d0cffdcSClaudiu Manoil fcb = gfar_add_fcb(skb); 18370d0cffdcSClaudiu Manoil } else { 18380d0cffdcSClaudiu Manoil /* Tx TOE not used */ 183902d88fb4SClaudiu Manoil lstatus &= ~(BD_LFLAG(TXBD_TOE)); 184002d88fb4SClaudiu Manoil fcb = NULL; 1841ec21e2ecSJeff Kirsher } 1842ec21e2ecSJeff Kirsher } 1843ec21e2ecSJeff Kirsher } 1844ec21e2ecSJeff Kirsher 18450d0cffdcSClaudiu Manoil if (do_vlan) 1846ec21e2ecSJeff Kirsher gfar_tx_vlan(skb, fcb); 1847ec21e2ecSJeff Kirsher 18480a4b5a24SKevin Hao bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb), 18490a4b5a24SKevin Hao DMA_TO_DEVICE); 18500a4b5a24SKevin Hao if (unlikely(dma_mapping_error(priv->dev, bufaddr))) 18510a4b5a24SKevin Hao goto dma_map_err; 18520a4b5a24SKevin Hao 1853a7312d58SClaudiu Manoil txbdp_start->bufPtr = cpu_to_be32(bufaddr); 1854ec21e2ecSJeff Kirsher 1855e19d0839SClaudiu Manoil /* Time stamp insertion requires one additional TxBD */ 1856e19d0839SClaudiu Manoil if (unlikely(do_tstamp)) 1857e19d0839SClaudiu Manoil txbdp_tstamp = txbdp = next_txbd(txbdp, base, 1858e19d0839SClaudiu Manoil tx_queue->tx_ring_size); 1859e19d0839SClaudiu Manoil 186048963b44SClaudiu Manoil if (likely(!nr_frags)) { 18619c8b0778SYangbo Lu if (likely(!do_tstamp)) 1862e19d0839SClaudiu Manoil lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 1863e19d0839SClaudiu Manoil } else { 1864e19d0839SClaudiu Manoil u32 lstatus_start = lstatus; 1865e19d0839SClaudiu Manoil 1866e19d0839SClaudiu Manoil /* Place the fragment addresses and lengths into the TxBDs */ 186742f397adSClaudiu Manoil frag = &skb_shinfo(skb)->frags[0]; 186842f397adSClaudiu Manoil for (i = 0; i < nr_frags; i++, frag++) { 186942f397adSClaudiu Manoil unsigned int size; 187042f397adSClaudiu Manoil 1871e19d0839SClaudiu Manoil /* Point at the next BD, wrapping as needed */ 1872e19d0839SClaudiu Manoil txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); 1873e19d0839SClaudiu Manoil 187442f397adSClaudiu Manoil size = skb_frag_size(frag); 1875e19d0839SClaudiu Manoil 187642f397adSClaudiu Manoil lstatus = be32_to_cpu(txbdp->lstatus) | size | 1877e19d0839SClaudiu Manoil BD_LFLAG(TXBD_READY); 1878e19d0839SClaudiu Manoil 1879e19d0839SClaudiu Manoil /* Handle the last BD specially */ 1880e19d0839SClaudiu Manoil if (i == nr_frags - 1) 1881e19d0839SClaudiu Manoil lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 1882e19d0839SClaudiu Manoil 188342f397adSClaudiu Manoil bufaddr = skb_frag_dma_map(priv->dev, frag, 0, 188442f397adSClaudiu Manoil size, DMA_TO_DEVICE); 1885e19d0839SClaudiu Manoil if (unlikely(dma_mapping_error(priv->dev, bufaddr))) 1886e19d0839SClaudiu Manoil goto dma_map_err; 1887e19d0839SClaudiu Manoil 1888e19d0839SClaudiu Manoil /* set the TxBD length and buffer pointer */ 1889e19d0839SClaudiu Manoil txbdp->bufPtr = cpu_to_be32(bufaddr); 1890e19d0839SClaudiu Manoil txbdp->lstatus = cpu_to_be32(lstatus); 1891e19d0839SClaudiu Manoil } 1892e19d0839SClaudiu Manoil 1893e19d0839SClaudiu Manoil lstatus = lstatus_start; 1894e19d0839SClaudiu Manoil } 1895e19d0839SClaudiu Manoil 18960977f817SJan Ceuleers /* If time stamping is requested one additional TxBD must be set up. The 1897ec21e2ecSJeff Kirsher * first TxBD points to the FCB and must have a data length of 1898ec21e2ecSJeff Kirsher * GMAC_FCB_LEN. The second TxBD points to the actual frame data with 1899ec21e2ecSJeff Kirsher * the full frame length. 1900ec21e2ecSJeff Kirsher */ 1901ec21e2ecSJeff Kirsher if (unlikely(do_tstamp)) { 1902a7312d58SClaudiu Manoil u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); 1903a7312d58SClaudiu Manoil 1904a7312d58SClaudiu Manoil bufaddr = be32_to_cpu(txbdp_start->bufPtr); 1905a7312d58SClaudiu Manoil bufaddr += fcb_len; 190648963b44SClaudiu Manoil 1907a7312d58SClaudiu Manoil lstatus_ts |= BD_LFLAG(TXBD_READY) | 19080d0cffdcSClaudiu Manoil (skb_headlen(skb) - fcb_len); 190948963b44SClaudiu Manoil if (!nr_frags) 191048963b44SClaudiu Manoil lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 1911a7312d58SClaudiu Manoil 1912a7312d58SClaudiu Manoil txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr); 1913a7312d58SClaudiu Manoil txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); 1914ec21e2ecSJeff Kirsher lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; 1915e19d0839SClaudiu Manoil 1916e19d0839SClaudiu Manoil /* Setup tx hardware time stamping */ 1917e19d0839SClaudiu Manoil skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1918e19d0839SClaudiu Manoil fcb->ptp = 1; 1919ec21e2ecSJeff Kirsher } else { 1920ec21e2ecSJeff Kirsher lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 1921ec21e2ecSJeff Kirsher } 1922ec21e2ecSJeff Kirsher 192350ad076bSClaudiu Manoil netdev_tx_sent_queue(txq, bytes_sent); 1924d8a0f1b0SPaul Gortmaker 1925d55398baSClaudiu Manoil gfar_wmb(); 1926ec21e2ecSJeff Kirsher 1927a7312d58SClaudiu Manoil txbdp_start->lstatus = cpu_to_be32(lstatus); 1928ec21e2ecSJeff Kirsher 1929d55398baSClaudiu Manoil gfar_wmb(); /* force lstatus write before tx_skbuff */ 1930ec21e2ecSJeff Kirsher 1931ec21e2ecSJeff Kirsher tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; 1932ec21e2ecSJeff Kirsher 1933ec21e2ecSJeff Kirsher /* Update the current skb pointer to the next entry we will use 19340977f817SJan Ceuleers * (wrapping if necessary) 19350977f817SJan Ceuleers */ 1936ec21e2ecSJeff Kirsher tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & 1937ec21e2ecSJeff Kirsher TX_RING_MOD_MASK(tx_queue->tx_ring_size); 1938ec21e2ecSJeff Kirsher 1939ec21e2ecSJeff Kirsher tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); 1940ec21e2ecSJeff Kirsher 1941bc602280SClaudiu Manoil /* We can work in parallel with gfar_clean_tx_ring(), except 1942bc602280SClaudiu Manoil * when modifying num_txbdfree. Note that we didn't grab the lock 1943bc602280SClaudiu Manoil * when we were reading the num_txbdfree and checking for available 1944bc602280SClaudiu Manoil * space, that's because outside of this function it can only grow. 1945bc602280SClaudiu Manoil */ 1946bc602280SClaudiu Manoil spin_lock_bh(&tx_queue->txlock); 1947ec21e2ecSJeff Kirsher /* reduce TxBD free count */ 1948ec21e2ecSJeff Kirsher tx_queue->num_txbdfree -= (nr_txbds); 1949bc602280SClaudiu Manoil spin_unlock_bh(&tx_queue->txlock); 1950ec21e2ecSJeff Kirsher 1951ec21e2ecSJeff Kirsher /* If the next BD still needs to be cleaned up, then the bds 19520977f817SJan Ceuleers * are full. We need to tell the kernel to stop sending us stuff. 19530977f817SJan Ceuleers */ 1954ec21e2ecSJeff Kirsher if (!tx_queue->num_txbdfree) { 1955ec21e2ecSJeff Kirsher netif_tx_stop_queue(txq); 1956ec21e2ecSJeff Kirsher 1957ec21e2ecSJeff Kirsher dev->stats.tx_fifo_errors++; 1958ec21e2ecSJeff Kirsher } 1959ec21e2ecSJeff Kirsher 1960ec21e2ecSJeff Kirsher /* Tell the DMA to go go go */ 1961ec21e2ecSJeff Kirsher gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); 1962ec21e2ecSJeff Kirsher 1963ec21e2ecSJeff Kirsher return NETDEV_TX_OK; 19640a4b5a24SKevin Hao 19650a4b5a24SKevin Hao dma_map_err: 19660a4b5a24SKevin Hao txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size); 19670a4b5a24SKevin Hao if (do_tstamp) 19680a4b5a24SKevin Hao txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); 19690a4b5a24SKevin Hao for (i = 0; i < nr_frags; i++) { 1970a7312d58SClaudiu Manoil lstatus = be32_to_cpu(txbdp->lstatus); 19710a4b5a24SKevin Hao if (!(lstatus & BD_LFLAG(TXBD_READY))) 19720a4b5a24SKevin Hao break; 19730a4b5a24SKevin Hao 1974a7312d58SClaudiu Manoil lstatus &= ~BD_LFLAG(TXBD_READY); 1975a7312d58SClaudiu Manoil txbdp->lstatus = cpu_to_be32(lstatus); 1976a7312d58SClaudiu Manoil bufaddr = be32_to_cpu(txbdp->bufPtr); 1977a7312d58SClaudiu Manoil dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length), 19780a4b5a24SKevin Hao DMA_TO_DEVICE); 19790a4b5a24SKevin Hao txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); 19800a4b5a24SKevin Hao } 19810a4b5a24SKevin Hao gfar_wmb(); 19820a4b5a24SKevin Hao dev_kfree_skb_any(skb); 19830a4b5a24SKevin Hao return NETDEV_TX_OK; 1984ec21e2ecSJeff Kirsher } 1985ec21e2ecSJeff Kirsher 1986ec21e2ecSJeff Kirsher /* Changes the mac address if the controller is not running. */ 1987ec21e2ecSJeff Kirsher static int gfar_set_mac_address(struct net_device *dev) 1988ec21e2ecSJeff Kirsher { 1989ec21e2ecSJeff Kirsher gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 1990ec21e2ecSJeff Kirsher 1991ec21e2ecSJeff Kirsher return 0; 1992ec21e2ecSJeff Kirsher } 1993ec21e2ecSJeff Kirsher 1994ec21e2ecSJeff Kirsher static int gfar_change_mtu(struct net_device *dev, int new_mtu) 1995ec21e2ecSJeff Kirsher { 1996ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(dev); 1997ec21e2ecSJeff Kirsher 19980851133bSClaudiu Manoil while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) 19990851133bSClaudiu Manoil cpu_relax(); 20000851133bSClaudiu Manoil 200188302648SClaudiu Manoil if (dev->flags & IFF_UP) 2002ec21e2ecSJeff Kirsher stop_gfar(dev); 2003ec21e2ecSJeff Kirsher 2004ec21e2ecSJeff Kirsher dev->mtu = new_mtu; 2005ec21e2ecSJeff Kirsher 200688302648SClaudiu Manoil if (dev->flags & IFF_UP) 2007ec21e2ecSJeff Kirsher startup_gfar(dev); 2008ec21e2ecSJeff Kirsher 20090851133bSClaudiu Manoil clear_bit_unlock(GFAR_RESETTING, &priv->state); 20100851133bSClaudiu Manoil 2011ec21e2ecSJeff Kirsher return 0; 2012ec21e2ecSJeff Kirsher } 2013ec21e2ecSJeff Kirsher 20149f5c44cfSYueHaibing static void reset_gfar(struct net_device *ndev) 20150851133bSClaudiu Manoil { 20160851133bSClaudiu Manoil struct gfar_private *priv = netdev_priv(ndev); 20170851133bSClaudiu Manoil 20180851133bSClaudiu Manoil while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) 20190851133bSClaudiu Manoil cpu_relax(); 20200851133bSClaudiu Manoil 20210851133bSClaudiu Manoil stop_gfar(ndev); 20220851133bSClaudiu Manoil startup_gfar(ndev); 20230851133bSClaudiu Manoil 20240851133bSClaudiu Manoil clear_bit_unlock(GFAR_RESETTING, &priv->state); 20250851133bSClaudiu Manoil } 20260851133bSClaudiu Manoil 2027ec21e2ecSJeff Kirsher /* gfar_reset_task gets scheduled when a packet has not been 2028ec21e2ecSJeff Kirsher * transmitted after a set amount of time. 2029ec21e2ecSJeff Kirsher * For now, assume that clearing out all the structures, and 2030ec21e2ecSJeff Kirsher * starting over will fix the problem. 2031ec21e2ecSJeff Kirsher */ 2032ec21e2ecSJeff Kirsher static void gfar_reset_task(struct work_struct *work) 2033ec21e2ecSJeff Kirsher { 2034ec21e2ecSJeff Kirsher struct gfar_private *priv = container_of(work, struct gfar_private, 2035ec21e2ecSJeff Kirsher reset_task); 20360851133bSClaudiu Manoil reset_gfar(priv->ndev); 2037ec21e2ecSJeff Kirsher } 2038ec21e2ecSJeff Kirsher 20390290bd29SMichael S. Tsirkin static void gfar_timeout(struct net_device *dev, unsigned int txqueue) 2040ec21e2ecSJeff Kirsher { 2041ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(dev); 2042ec21e2ecSJeff Kirsher 2043ec21e2ecSJeff Kirsher dev->stats.tx_errors++; 2044ec21e2ecSJeff Kirsher schedule_work(&priv->reset_task); 2045ec21e2ecSJeff Kirsher } 2046ec21e2ecSJeff Kirsher 20477d993c5fSArseny Solokha static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) 20487d993c5fSArseny Solokha { 20497d993c5fSArseny Solokha struct hwtstamp_config config; 20507d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(netdev); 20517d993c5fSArseny Solokha 20527d993c5fSArseny Solokha if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 20537d993c5fSArseny Solokha return -EFAULT; 20547d993c5fSArseny Solokha 20557d993c5fSArseny Solokha /* reserved for future extensions */ 20567d993c5fSArseny Solokha if (config.flags) 20577d993c5fSArseny Solokha return -EINVAL; 20587d993c5fSArseny Solokha 20597d993c5fSArseny Solokha switch (config.tx_type) { 20607d993c5fSArseny Solokha case HWTSTAMP_TX_OFF: 20617d993c5fSArseny Solokha priv->hwts_tx_en = 0; 20627d993c5fSArseny Solokha break; 20637d993c5fSArseny Solokha case HWTSTAMP_TX_ON: 20647d993c5fSArseny Solokha if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 20657d993c5fSArseny Solokha return -ERANGE; 20667d993c5fSArseny Solokha priv->hwts_tx_en = 1; 20677d993c5fSArseny Solokha break; 20687d993c5fSArseny Solokha default: 20697d993c5fSArseny Solokha return -ERANGE; 20707d993c5fSArseny Solokha } 20717d993c5fSArseny Solokha 20727d993c5fSArseny Solokha switch (config.rx_filter) { 20737d993c5fSArseny Solokha case HWTSTAMP_FILTER_NONE: 20747d993c5fSArseny Solokha if (priv->hwts_rx_en) { 20757d993c5fSArseny Solokha priv->hwts_rx_en = 0; 20767d993c5fSArseny Solokha reset_gfar(netdev); 20777d993c5fSArseny Solokha } 20787d993c5fSArseny Solokha break; 20797d993c5fSArseny Solokha default: 20807d993c5fSArseny Solokha if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 20817d993c5fSArseny Solokha return -ERANGE; 20827d993c5fSArseny Solokha if (!priv->hwts_rx_en) { 20837d993c5fSArseny Solokha priv->hwts_rx_en = 1; 20847d993c5fSArseny Solokha reset_gfar(netdev); 20857d993c5fSArseny Solokha } 20867d993c5fSArseny Solokha config.rx_filter = HWTSTAMP_FILTER_ALL; 20877d993c5fSArseny Solokha break; 20887d993c5fSArseny Solokha } 20897d993c5fSArseny Solokha 20907d993c5fSArseny Solokha return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 20917d993c5fSArseny Solokha -EFAULT : 0; 20927d993c5fSArseny Solokha } 20937d993c5fSArseny Solokha 20947d993c5fSArseny Solokha static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) 20957d993c5fSArseny Solokha { 20967d993c5fSArseny Solokha struct hwtstamp_config config; 20977d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(netdev); 20987d993c5fSArseny Solokha 20997d993c5fSArseny Solokha config.flags = 0; 21007d993c5fSArseny Solokha config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 21017d993c5fSArseny Solokha config.rx_filter = (priv->hwts_rx_en ? 21027d993c5fSArseny Solokha HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); 21037d993c5fSArseny Solokha 21047d993c5fSArseny Solokha return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 21057d993c5fSArseny Solokha -EFAULT : 0; 21067d993c5fSArseny Solokha } 21077d993c5fSArseny Solokha 21087d993c5fSArseny Solokha static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 21097d993c5fSArseny Solokha { 21107d993c5fSArseny Solokha struct phy_device *phydev = dev->phydev; 21117d993c5fSArseny Solokha 21127d993c5fSArseny Solokha if (!netif_running(dev)) 21137d993c5fSArseny Solokha return -EINVAL; 21147d993c5fSArseny Solokha 21157d993c5fSArseny Solokha if (cmd == SIOCSHWTSTAMP) 21167d993c5fSArseny Solokha return gfar_hwtstamp_set(dev, rq); 21177d993c5fSArseny Solokha if (cmd == SIOCGHWTSTAMP) 21187d993c5fSArseny Solokha return gfar_hwtstamp_get(dev, rq); 21197d993c5fSArseny Solokha 21207d993c5fSArseny Solokha if (!phydev) 21217d993c5fSArseny Solokha return -ENODEV; 21227d993c5fSArseny Solokha 21237d993c5fSArseny Solokha return phy_mii_ioctl(phydev, rq, cmd); 21247d993c5fSArseny Solokha } 21257d993c5fSArseny Solokha 2126ec21e2ecSJeff Kirsher /* Interrupt Handler for Transmit complete */ 2127c233cf40SClaudiu Manoil static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) 2128ec21e2ecSJeff Kirsher { 2129ec21e2ecSJeff Kirsher struct net_device *dev = tx_queue->dev; 2130d8a0f1b0SPaul Gortmaker struct netdev_queue *txq; 2131ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(dev); 2132ec21e2ecSJeff Kirsher struct txbd8 *bdp, *next = NULL; 2133ec21e2ecSJeff Kirsher struct txbd8 *lbdp = NULL; 2134ec21e2ecSJeff Kirsher struct txbd8 *base = tx_queue->tx_bd_base; 2135ec21e2ecSJeff Kirsher struct sk_buff *skb; 2136ec21e2ecSJeff Kirsher int skb_dirtytx; 2137ec21e2ecSJeff Kirsher int tx_ring_size = tx_queue->tx_ring_size; 2138ec21e2ecSJeff Kirsher int frags = 0, nr_txbds = 0; 2139ec21e2ecSJeff Kirsher int i; 2140ec21e2ecSJeff Kirsher int howmany = 0; 2141d8a0f1b0SPaul Gortmaker int tqi = tx_queue->qindex; 2142d8a0f1b0SPaul Gortmaker unsigned int bytes_sent = 0; 2143ec21e2ecSJeff Kirsher u32 lstatus; 2144ec21e2ecSJeff Kirsher size_t buflen; 2145ec21e2ecSJeff Kirsher 2146d8a0f1b0SPaul Gortmaker txq = netdev_get_tx_queue(dev, tqi); 2147ec21e2ecSJeff Kirsher bdp = tx_queue->dirty_tx; 2148ec21e2ecSJeff Kirsher skb_dirtytx = tx_queue->skb_dirtytx; 2149ec21e2ecSJeff Kirsher 2150ec21e2ecSJeff Kirsher while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { 2151c26a2c2dSVladimir Oltean bool do_tstamp; 2152c26a2c2dSVladimir Oltean 2153c26a2c2dSVladimir Oltean do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 2154c26a2c2dSVladimir Oltean priv->hwts_tx_en; 2155ec21e2ecSJeff Kirsher 2156ec21e2ecSJeff Kirsher frags = skb_shinfo(skb)->nr_frags; 2157ec21e2ecSJeff Kirsher 21580977f817SJan Ceuleers /* When time stamping, one additional TxBD must be freed. 2159ec21e2ecSJeff Kirsher * Also, we need to dma_unmap_single() the TxPAL. 2160ec21e2ecSJeff Kirsher */ 2161c26a2c2dSVladimir Oltean if (unlikely(do_tstamp)) 2162ec21e2ecSJeff Kirsher nr_txbds = frags + 2; 2163ec21e2ecSJeff Kirsher else 2164ec21e2ecSJeff Kirsher nr_txbds = frags + 1; 2165ec21e2ecSJeff Kirsher 2166ec21e2ecSJeff Kirsher lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); 2167ec21e2ecSJeff Kirsher 2168a7312d58SClaudiu Manoil lstatus = be32_to_cpu(lbdp->lstatus); 2169ec21e2ecSJeff Kirsher 2170ec21e2ecSJeff Kirsher /* Only clean completed frames */ 2171ec21e2ecSJeff Kirsher if ((lstatus & BD_LFLAG(TXBD_READY)) && 2172ec21e2ecSJeff Kirsher (lstatus & BD_LENGTH_MASK)) 2173ec21e2ecSJeff Kirsher break; 2174ec21e2ecSJeff Kirsher 2175c26a2c2dSVladimir Oltean if (unlikely(do_tstamp)) { 2176ec21e2ecSJeff Kirsher next = next_txbd(bdp, base, tx_ring_size); 2177a7312d58SClaudiu Manoil buflen = be16_to_cpu(next->length) + 2178a7312d58SClaudiu Manoil GMAC_FCB_LEN + GMAC_TXPAL_LEN; 2179ec21e2ecSJeff Kirsher } else 2180a7312d58SClaudiu Manoil buflen = be16_to_cpu(bdp->length); 2181ec21e2ecSJeff Kirsher 2182a7312d58SClaudiu Manoil dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), 2183ec21e2ecSJeff Kirsher buflen, DMA_TO_DEVICE); 2184ec21e2ecSJeff Kirsher 2185c26a2c2dSVladimir Oltean if (unlikely(do_tstamp)) { 2186ec21e2ecSJeff Kirsher struct skb_shared_hwtstamps shhwtstamps; 2187b4b67f26SScott Wood u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) & 2188b4b67f26SScott Wood ~0x7UL); 2189bc4598bcSJan Ceuleers 2190ec21e2ecSJeff Kirsher memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 2191f54af12fSYangbo Lu shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); 21929c4886e5SManfred Rudigier skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); 2193ec21e2ecSJeff Kirsher skb_tstamp_tx(skb, &shhwtstamps); 2194a7312d58SClaudiu Manoil gfar_clear_txbd_status(bdp); 2195ec21e2ecSJeff Kirsher bdp = next; 2196ec21e2ecSJeff Kirsher } 2197ec21e2ecSJeff Kirsher 2198a7312d58SClaudiu Manoil gfar_clear_txbd_status(bdp); 2199ec21e2ecSJeff Kirsher bdp = next_txbd(bdp, base, tx_ring_size); 2200ec21e2ecSJeff Kirsher 2201ec21e2ecSJeff Kirsher for (i = 0; i < frags; i++) { 2202a7312d58SClaudiu Manoil dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr), 2203a7312d58SClaudiu Manoil be16_to_cpu(bdp->length), 2204a7312d58SClaudiu Manoil DMA_TO_DEVICE); 2205a7312d58SClaudiu Manoil gfar_clear_txbd_status(bdp); 2206ec21e2ecSJeff Kirsher bdp = next_txbd(bdp, base, tx_ring_size); 2207ec21e2ecSJeff Kirsher } 2208ec21e2ecSJeff Kirsher 220950ad076bSClaudiu Manoil bytes_sent += GFAR_CB(skb)->bytes_sent; 2210d8a0f1b0SPaul Gortmaker 2211ec21e2ecSJeff Kirsher dev_kfree_skb_any(skb); 2212ec21e2ecSJeff Kirsher 2213ec21e2ecSJeff Kirsher tx_queue->tx_skbuff[skb_dirtytx] = NULL; 2214ec21e2ecSJeff Kirsher 2215ec21e2ecSJeff Kirsher skb_dirtytx = (skb_dirtytx + 1) & 2216ec21e2ecSJeff Kirsher TX_RING_MOD_MASK(tx_ring_size); 2217ec21e2ecSJeff Kirsher 2218ec21e2ecSJeff Kirsher howmany++; 2219bc602280SClaudiu Manoil spin_lock(&tx_queue->txlock); 2220ec21e2ecSJeff Kirsher tx_queue->num_txbdfree += nr_txbds; 2221bc602280SClaudiu Manoil spin_unlock(&tx_queue->txlock); 2222ec21e2ecSJeff Kirsher } 2223ec21e2ecSJeff Kirsher 2224ec21e2ecSJeff Kirsher /* If we freed a buffer, we can restart transmission, if necessary */ 22250851133bSClaudiu Manoil if (tx_queue->num_txbdfree && 22260851133bSClaudiu Manoil netif_tx_queue_stopped(txq) && 22270851133bSClaudiu Manoil !(test_bit(GFAR_DOWN, &priv->state))) 22280851133bSClaudiu Manoil netif_wake_subqueue(priv->ndev, tqi); 2229ec21e2ecSJeff Kirsher 2230ec21e2ecSJeff Kirsher /* Update dirty indicators */ 2231ec21e2ecSJeff Kirsher tx_queue->skb_dirtytx = skb_dirtytx; 2232ec21e2ecSJeff Kirsher tx_queue->dirty_tx = bdp; 2233ec21e2ecSJeff Kirsher 2234d8a0f1b0SPaul Gortmaker netdev_tx_completed_queue(txq, howmany, bytes_sent); 2235ec21e2ecSJeff Kirsher } 2236ec21e2ecSJeff Kirsher 2237f23223f1SClaudiu Manoil static void count_errors(u32 lstatus, struct net_device *ndev) 2238ec21e2ecSJeff Kirsher { 2239f23223f1SClaudiu Manoil struct gfar_private *priv = netdev_priv(ndev); 2240f23223f1SClaudiu Manoil struct net_device_stats *stats = &ndev->stats; 2241ec21e2ecSJeff Kirsher struct gfar_extra_stats *estats = &priv->extra_stats; 2242ec21e2ecSJeff Kirsher 22430977f817SJan Ceuleers /* If the packet was truncated, none of the other errors matter */ 2244f966082eSClaudiu Manoil if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) { 2245ec21e2ecSJeff Kirsher stats->rx_length_errors++; 2246ec21e2ecSJeff Kirsher 2247212079dfSPaul Gortmaker atomic64_inc(&estats->rx_trunc); 2248ec21e2ecSJeff Kirsher 2249ec21e2ecSJeff Kirsher return; 2250ec21e2ecSJeff Kirsher } 2251ec21e2ecSJeff Kirsher /* Count the errors, if there were any */ 2252f966082eSClaudiu Manoil if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) { 2253ec21e2ecSJeff Kirsher stats->rx_length_errors++; 2254ec21e2ecSJeff Kirsher 2255f966082eSClaudiu Manoil if (lstatus & BD_LFLAG(RXBD_LARGE)) 2256212079dfSPaul Gortmaker atomic64_inc(&estats->rx_large); 2257ec21e2ecSJeff Kirsher else 2258212079dfSPaul Gortmaker atomic64_inc(&estats->rx_short); 2259ec21e2ecSJeff Kirsher } 2260f966082eSClaudiu Manoil if (lstatus & BD_LFLAG(RXBD_NONOCTET)) { 2261ec21e2ecSJeff Kirsher stats->rx_frame_errors++; 2262212079dfSPaul Gortmaker atomic64_inc(&estats->rx_nonoctet); 2263ec21e2ecSJeff Kirsher } 2264f966082eSClaudiu Manoil if (lstatus & BD_LFLAG(RXBD_CRCERR)) { 2265212079dfSPaul Gortmaker atomic64_inc(&estats->rx_crcerr); 2266ec21e2ecSJeff Kirsher stats->rx_crc_errors++; 2267ec21e2ecSJeff Kirsher } 2268f966082eSClaudiu Manoil if (lstatus & BD_LFLAG(RXBD_OVERRUN)) { 2269212079dfSPaul Gortmaker atomic64_inc(&estats->rx_overrun); 2270f966082eSClaudiu Manoil stats->rx_over_errors++; 2271ec21e2ecSJeff Kirsher } 2272ec21e2ecSJeff Kirsher } 2273ec21e2ecSJeff Kirsher 22747ad38784SArseny Solokha static irqreturn_t gfar_receive(int irq, void *grp_id) 2275ec21e2ecSJeff Kirsher { 2276aeb12c5eSClaudiu Manoil struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; 2277aeb12c5eSClaudiu Manoil unsigned long flags; 22783e905b80SClaudiu Manoil u32 imask, ievent; 22793e905b80SClaudiu Manoil 22803e905b80SClaudiu Manoil ievent = gfar_read(&grp->regs->ievent); 22813e905b80SClaudiu Manoil 22823e905b80SClaudiu Manoil if (unlikely(ievent & IEVENT_FGPI)) { 22833e905b80SClaudiu Manoil gfar_write(&grp->regs->ievent, IEVENT_FGPI); 22843e905b80SClaudiu Manoil return IRQ_HANDLED; 22853e905b80SClaudiu Manoil } 2286aeb12c5eSClaudiu Manoil 2287aeb12c5eSClaudiu Manoil if (likely(napi_schedule_prep(&grp->napi_rx))) { 2288aeb12c5eSClaudiu Manoil spin_lock_irqsave(&grp->grplock, flags); 2289aeb12c5eSClaudiu Manoil imask = gfar_read(&grp->regs->imask); 2290aeb12c5eSClaudiu Manoil imask &= IMASK_RX_DISABLED; 2291aeb12c5eSClaudiu Manoil gfar_write(&grp->regs->imask, imask); 2292aeb12c5eSClaudiu Manoil spin_unlock_irqrestore(&grp->grplock, flags); 2293aeb12c5eSClaudiu Manoil __napi_schedule(&grp->napi_rx); 2294aeb12c5eSClaudiu Manoil } else { 2295aeb12c5eSClaudiu Manoil /* Clear IEVENT, so interrupts aren't called again 2296aeb12c5eSClaudiu Manoil * because of the packets that have already arrived. 2297aeb12c5eSClaudiu Manoil */ 2298aeb12c5eSClaudiu Manoil gfar_write(&grp->regs->ievent, IEVENT_RX_MASK); 2299aeb12c5eSClaudiu Manoil } 2300aeb12c5eSClaudiu Manoil 2301aeb12c5eSClaudiu Manoil return IRQ_HANDLED; 2302aeb12c5eSClaudiu Manoil } 2303aeb12c5eSClaudiu Manoil 2304aeb12c5eSClaudiu Manoil /* Interrupt Handler for Transmit complete */ 2305aeb12c5eSClaudiu Manoil static irqreturn_t gfar_transmit(int irq, void *grp_id) 2306aeb12c5eSClaudiu Manoil { 2307aeb12c5eSClaudiu Manoil struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; 2308aeb12c5eSClaudiu Manoil unsigned long flags; 2309aeb12c5eSClaudiu Manoil u32 imask; 2310aeb12c5eSClaudiu Manoil 2311aeb12c5eSClaudiu Manoil if (likely(napi_schedule_prep(&grp->napi_tx))) { 2312aeb12c5eSClaudiu Manoil spin_lock_irqsave(&grp->grplock, flags); 2313aeb12c5eSClaudiu Manoil imask = gfar_read(&grp->regs->imask); 2314aeb12c5eSClaudiu Manoil imask &= IMASK_TX_DISABLED; 2315aeb12c5eSClaudiu Manoil gfar_write(&grp->regs->imask, imask); 2316aeb12c5eSClaudiu Manoil spin_unlock_irqrestore(&grp->grplock, flags); 2317aeb12c5eSClaudiu Manoil __napi_schedule(&grp->napi_tx); 2318aeb12c5eSClaudiu Manoil } else { 2319aeb12c5eSClaudiu Manoil /* Clear IEVENT, so interrupts aren't called again 2320aeb12c5eSClaudiu Manoil * because of the packets that have already arrived. 2321aeb12c5eSClaudiu Manoil */ 2322aeb12c5eSClaudiu Manoil gfar_write(&grp->regs->ievent, IEVENT_TX_MASK); 2323aeb12c5eSClaudiu Manoil } 2324aeb12c5eSClaudiu Manoil 2325ec21e2ecSJeff Kirsher return IRQ_HANDLED; 2326ec21e2ecSJeff Kirsher } 2327ec21e2ecSJeff Kirsher 232875354148SClaudiu Manoil static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, 232975354148SClaudiu Manoil struct sk_buff *skb, bool first) 233075354148SClaudiu Manoil { 2331202a0a70SAndy Spencer int size = lstatus & BD_LENGTH_MASK; 233275354148SClaudiu Manoil struct page *page = rxb->page; 233375354148SClaudiu Manoil 23346c389fc9SZefir Kurtisi if (likely(first)) { 233575354148SClaudiu Manoil skb_put(skb, size); 23366c389fc9SZefir Kurtisi } else { 23376c389fc9SZefir Kurtisi /* the last fragments' length contains the full frame length */ 2338d903ec77SAndy Spencer if (lstatus & BD_LFLAG(RXBD_LAST)) 23396c389fc9SZefir Kurtisi size -= skb->len; 23406c389fc9SZefir Kurtisi 2341d8861babSMichael Braun WARN(size < 0, "gianfar: rx fragment size underflow"); 2342d8861babSMichael Braun if (size < 0) 2343d8861babSMichael Braun return false; 2344d8861babSMichael Braun 234575354148SClaudiu Manoil skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 234675354148SClaudiu Manoil rxb->page_offset + RXBUF_ALIGNMENT, 234775354148SClaudiu Manoil size, GFAR_RXB_TRUESIZE); 23486c389fc9SZefir Kurtisi } 234975354148SClaudiu Manoil 235075354148SClaudiu Manoil /* try reuse page */ 235169fed99bSEric Dumazet if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page))) 235275354148SClaudiu Manoil return false; 235375354148SClaudiu Manoil 235475354148SClaudiu Manoil /* change offset to the other half */ 235575354148SClaudiu Manoil rxb->page_offset ^= GFAR_RXB_TRUESIZE; 235675354148SClaudiu Manoil 2357fe896d18SJoonsoo Kim page_ref_inc(page); 235875354148SClaudiu Manoil 235975354148SClaudiu Manoil return true; 236075354148SClaudiu Manoil } 236175354148SClaudiu Manoil 236275354148SClaudiu Manoil static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq, 236375354148SClaudiu Manoil struct gfar_rx_buff *old_rxb) 236475354148SClaudiu Manoil { 236575354148SClaudiu Manoil struct gfar_rx_buff *new_rxb; 236675354148SClaudiu Manoil u16 nta = rxq->next_to_alloc; 236775354148SClaudiu Manoil 236875354148SClaudiu Manoil new_rxb = &rxq->rx_buff[nta]; 236975354148SClaudiu Manoil 237075354148SClaudiu Manoil /* find next buf that can reuse a page */ 237175354148SClaudiu Manoil nta++; 237275354148SClaudiu Manoil rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0; 237375354148SClaudiu Manoil 237475354148SClaudiu Manoil /* copy page reference */ 237575354148SClaudiu Manoil *new_rxb = *old_rxb; 237675354148SClaudiu Manoil 237775354148SClaudiu Manoil /* sync for use by the device */ 237875354148SClaudiu Manoil dma_sync_single_range_for_device(rxq->dev, old_rxb->dma, 237975354148SClaudiu Manoil old_rxb->page_offset, 238075354148SClaudiu Manoil GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); 238175354148SClaudiu Manoil } 238275354148SClaudiu Manoil 238375354148SClaudiu Manoil static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue, 238475354148SClaudiu Manoil u32 lstatus, struct sk_buff *skb) 238575354148SClaudiu Manoil { 238675354148SClaudiu Manoil struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean]; 238775354148SClaudiu Manoil struct page *page = rxb->page; 238875354148SClaudiu Manoil bool first = false; 238975354148SClaudiu Manoil 239075354148SClaudiu Manoil if (likely(!skb)) { 239175354148SClaudiu Manoil void *buff_addr = page_address(page) + rxb->page_offset; 239275354148SClaudiu Manoil 239375354148SClaudiu Manoil skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE); 239475354148SClaudiu Manoil if (unlikely(!skb)) { 239575354148SClaudiu Manoil gfar_rx_alloc_err(rx_queue); 239675354148SClaudiu Manoil return NULL; 239775354148SClaudiu Manoil } 239875354148SClaudiu Manoil skb_reserve(skb, RXBUF_ALIGNMENT); 239975354148SClaudiu Manoil first = true; 240075354148SClaudiu Manoil } 240175354148SClaudiu Manoil 240275354148SClaudiu Manoil dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset, 240375354148SClaudiu Manoil GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); 240475354148SClaudiu Manoil 240575354148SClaudiu Manoil if (gfar_add_rx_frag(rxb, lstatus, skb, first)) { 240675354148SClaudiu Manoil /* reuse the free half of the page */ 240775354148SClaudiu Manoil gfar_reuse_rx_page(rx_queue, rxb); 240875354148SClaudiu Manoil } else { 240975354148SClaudiu Manoil /* page cannot be reused, unmap it */ 241075354148SClaudiu Manoil dma_unmap_page(rx_queue->dev, rxb->dma, 241175354148SClaudiu Manoil PAGE_SIZE, DMA_FROM_DEVICE); 241275354148SClaudiu Manoil } 241375354148SClaudiu Manoil 241475354148SClaudiu Manoil /* clear rxb content */ 241575354148SClaudiu Manoil rxb->page = NULL; 241675354148SClaudiu Manoil 241775354148SClaudiu Manoil return skb; 241875354148SClaudiu Manoil } 241975354148SClaudiu Manoil 2420ec21e2ecSJeff Kirsher static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) 2421ec21e2ecSJeff Kirsher { 2422ec21e2ecSJeff Kirsher /* If valid headers were found, and valid sums 2423ec21e2ecSJeff Kirsher * were verified, then we tell the kernel that no 24240977f817SJan Ceuleers * checksumming is necessary. Otherwise, it is [FIXME] 24250977f817SJan Ceuleers */ 242626eb9374SClaudiu Manoil if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) == 242726eb9374SClaudiu Manoil (RXFCB_CIP | RXFCB_CTU)) 2428ec21e2ecSJeff Kirsher skb->ip_summed = CHECKSUM_UNNECESSARY; 2429ec21e2ecSJeff Kirsher else 2430ec21e2ecSJeff Kirsher skb_checksum_none_assert(skb); 2431ec21e2ecSJeff Kirsher } 2432ec21e2ecSJeff Kirsher 24330977f817SJan Ceuleers /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ 2434f23223f1SClaudiu Manoil static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) 2435ec21e2ecSJeff Kirsher { 2436f23223f1SClaudiu Manoil struct gfar_private *priv = netdev_priv(ndev); 2437ec21e2ecSJeff Kirsher struct rxfcb *fcb = NULL; 2438ec21e2ecSJeff Kirsher 2439ec21e2ecSJeff Kirsher /* fcb is at the beginning if exists */ 2440ec21e2ecSJeff Kirsher fcb = (struct rxfcb *)skb->data; 2441ec21e2ecSJeff Kirsher 24420977f817SJan Ceuleers /* Remove the FCB from the skb 24430977f817SJan Ceuleers * Remove the padded bytes, if there are any 24440977f817SJan Ceuleers */ 2445f23223f1SClaudiu Manoil if (priv->uses_rxfcb) 244676f31e8bSClaudiu Manoil skb_pull(skb, GMAC_FCB_LEN); 2447ec21e2ecSJeff Kirsher 2448ec21e2ecSJeff Kirsher /* Get receive timestamp from the skb */ 2449ec21e2ecSJeff Kirsher if (priv->hwts_rx_en) { 2450ec21e2ecSJeff Kirsher struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 2451ec21e2ecSJeff Kirsher u64 *ns = (u64 *) skb->data; 2452bc4598bcSJan Ceuleers 2453ec21e2ecSJeff Kirsher memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 2454f54af12fSYangbo Lu shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); 2455ec21e2ecSJeff Kirsher } 2456ec21e2ecSJeff Kirsher 2457ec21e2ecSJeff Kirsher if (priv->padding) 2458ec21e2ecSJeff Kirsher skb_pull(skb, priv->padding); 2459ec21e2ecSJeff Kirsher 2460d903ec77SAndy Spencer /* Trim off the FCS */ 2461d903ec77SAndy Spencer pskb_trim(skb, skb->len - ETH_FCS_LEN); 2462d903ec77SAndy Spencer 2463f23223f1SClaudiu Manoil if (ndev->features & NETIF_F_RXCSUM) 2464ec21e2ecSJeff Kirsher gfar_rx_checksum(skb, fcb); 2465ec21e2ecSJeff Kirsher 2466f646968fSPatrick McHardy /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. 2467823dcd25SDavid S. Miller * Even if vlan rx accel is disabled, on some chips 2468823dcd25SDavid S. Miller * RXFCB_VLN is pseudo randomly set. 2469823dcd25SDavid S. Miller */ 2470f23223f1SClaudiu Manoil if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX && 247126eb9374SClaudiu Manoil be16_to_cpu(fcb->flags) & RXFCB_VLN) 247226eb9374SClaudiu Manoil __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 247326eb9374SClaudiu Manoil be16_to_cpu(fcb->vlctl)); 2474ec21e2ecSJeff Kirsher } 2475ec21e2ecSJeff Kirsher 2476ec21e2ecSJeff Kirsher /* gfar_clean_rx_ring() -- Processes each frame in the rx ring 2477ec21e2ecSJeff Kirsher * until the budget/quota has been reached. Returns the number 2478ec21e2ecSJeff Kirsher * of frames handled 2479ec21e2ecSJeff Kirsher */ 24807ad38784SArseny Solokha static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, 24817ad38784SArseny Solokha int rx_work_limit) 2482ec21e2ecSJeff Kirsher { 2483f23223f1SClaudiu Manoil struct net_device *ndev = rx_queue->ndev; 2484f23223f1SClaudiu Manoil struct gfar_private *priv = netdev_priv(ndev); 248575354148SClaudiu Manoil struct rxbd8 *bdp; 248675354148SClaudiu Manoil int i, howmany = 0; 248775354148SClaudiu Manoil struct sk_buff *skb = rx_queue->skb; 248875354148SClaudiu Manoil int cleaned_cnt = gfar_rxbd_unused(rx_queue); 248975354148SClaudiu Manoil unsigned int total_bytes = 0, total_pkts = 0; 2490ec21e2ecSJeff Kirsher 2491ec21e2ecSJeff Kirsher /* Get the first full descriptor */ 249276f31e8bSClaudiu Manoil i = rx_queue->next_to_clean; 2493ec21e2ecSJeff Kirsher 249476f31e8bSClaudiu Manoil while (rx_work_limit--) { 2495f966082eSClaudiu Manoil u32 lstatus; 2496ec21e2ecSJeff Kirsher 249776f31e8bSClaudiu Manoil if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) { 249876f31e8bSClaudiu Manoil gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); 249976f31e8bSClaudiu Manoil cleaned_cnt = 0; 250076f31e8bSClaudiu Manoil } 2501bc4598bcSJan Ceuleers 250276f31e8bSClaudiu Manoil bdp = &rx_queue->rx_bd_base[i]; 2503f966082eSClaudiu Manoil lstatus = be32_to_cpu(bdp->lstatus); 2504f966082eSClaudiu Manoil if (lstatus & BD_LFLAG(RXBD_EMPTY)) 250576f31e8bSClaudiu Manoil break; 250676f31e8bSClaudiu Manoil 2507d8861babSMichael Braun /* lost RXBD_LAST descriptor due to overrun */ 2508d8861babSMichael Braun if (skb && 2509d8861babSMichael Braun (lstatus & BD_LFLAG(RXBD_FIRST))) { 2510d8861babSMichael Braun /* discard faulty buffer */ 2511d8861babSMichael Braun dev_kfree_skb(skb); 2512d8861babSMichael Braun skb = NULL; 2513d8861babSMichael Braun rx_queue->stats.rx_dropped++; 2514d8861babSMichael Braun 2515d8861babSMichael Braun /* can continue normally */ 2516d8861babSMichael Braun } 2517d8861babSMichael Braun 251876f31e8bSClaudiu Manoil /* order rx buffer descriptor reads */ 2519ec21e2ecSJeff Kirsher rmb(); 2520ec21e2ecSJeff Kirsher 252176f31e8bSClaudiu Manoil /* fetch next to clean buffer from the ring */ 252275354148SClaudiu Manoil skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb); 252375354148SClaudiu Manoil if (unlikely(!skb)) 252475354148SClaudiu Manoil break; 2525ec21e2ecSJeff Kirsher 252675354148SClaudiu Manoil cleaned_cnt++; 252775354148SClaudiu Manoil howmany++; 2528ec21e2ecSJeff Kirsher 252975354148SClaudiu Manoil if (unlikely(++i == rx_queue->rx_ring_size)) 253075354148SClaudiu Manoil i = 0; 2531ec21e2ecSJeff Kirsher 253275354148SClaudiu Manoil rx_queue->next_to_clean = i; 253375354148SClaudiu Manoil 253475354148SClaudiu Manoil /* fetch next buffer if not the last in frame */ 253575354148SClaudiu Manoil if (!(lstatus & BD_LFLAG(RXBD_LAST))) 253675354148SClaudiu Manoil continue; 253775354148SClaudiu Manoil 253875354148SClaudiu Manoil if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) { 2539f23223f1SClaudiu Manoil count_errors(lstatus, ndev); 2540ec21e2ecSJeff Kirsher 254176f31e8bSClaudiu Manoil /* discard faulty buffer */ 2542acb600deSEric Dumazet dev_kfree_skb(skb); 254375354148SClaudiu Manoil skb = NULL; 254475354148SClaudiu Manoil rx_queue->stats.rx_dropped++; 254575354148SClaudiu Manoil continue; 254675354148SClaudiu Manoil } 254776f31e8bSClaudiu Manoil 2548590399ddSClaudiu Manoil gfar_process_frame(ndev, skb); 2549590399ddSClaudiu Manoil 2550ec21e2ecSJeff Kirsher /* Increment the number of packets */ 255175354148SClaudiu Manoil total_pkts++; 255275354148SClaudiu Manoil total_bytes += skb->len; 2553ec21e2ecSJeff Kirsher 2554ec21e2ecSJeff Kirsher skb_record_rx_queue(skb, rx_queue->qindex); 255575354148SClaudiu Manoil 2556590399ddSClaudiu Manoil skb->protocol = eth_type_trans(skb, ndev); 2557f23223f1SClaudiu Manoil 2558f23223f1SClaudiu Manoil /* Send the packet up the stack */ 2559f23223f1SClaudiu Manoil napi_gro_receive(&rx_queue->grp->napi_rx, skb); 2560ec21e2ecSJeff Kirsher 256175354148SClaudiu Manoil skb = NULL; 2562ec21e2ecSJeff Kirsher } 2563ec21e2ecSJeff Kirsher 256475354148SClaudiu Manoil /* Store incomplete frames for completion */ 256575354148SClaudiu Manoil rx_queue->skb = skb; 2566ec21e2ecSJeff Kirsher 256775354148SClaudiu Manoil rx_queue->stats.rx_packets += total_pkts; 256875354148SClaudiu Manoil rx_queue->stats.rx_bytes += total_bytes; 256976f31e8bSClaudiu Manoil 257076f31e8bSClaudiu Manoil if (cleaned_cnt) 257176f31e8bSClaudiu Manoil gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); 257276f31e8bSClaudiu Manoil 257376f31e8bSClaudiu Manoil /* Update Last Free RxBD pointer for LFC */ 257476f31e8bSClaudiu Manoil if (unlikely(priv->tx_actual_en)) { 2575b4b67f26SScott Wood u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); 2576b4b67f26SScott Wood 2577b4b67f26SScott Wood gfar_write(rx_queue->rfbptr, bdp_dma); 257876f31e8bSClaudiu Manoil } 2579ec21e2ecSJeff Kirsher 2580ec21e2ecSJeff Kirsher return howmany; 2581ec21e2ecSJeff Kirsher } 2582ec21e2ecSJeff Kirsher 2583aeb12c5eSClaudiu Manoil static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) 25845eaedf31SClaudiu Manoil { 25855eaedf31SClaudiu Manoil struct gfar_priv_grp *gfargrp = 2586aeb12c5eSClaudiu Manoil container_of(napi, struct gfar_priv_grp, napi_rx); 25875eaedf31SClaudiu Manoil struct gfar __iomem *regs = gfargrp->regs; 258871ff9e3dSClaudiu Manoil struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue; 25895eaedf31SClaudiu Manoil int work_done = 0; 25905eaedf31SClaudiu Manoil 25915eaedf31SClaudiu Manoil /* Clear IEVENT, so interrupts aren't called again 25925eaedf31SClaudiu Manoil * because of the packets that have already arrived 25935eaedf31SClaudiu Manoil */ 2594aeb12c5eSClaudiu Manoil gfar_write(®s->ievent, IEVENT_RX_MASK); 25955eaedf31SClaudiu Manoil 25965eaedf31SClaudiu Manoil work_done = gfar_clean_rx_ring(rx_queue, budget); 25975eaedf31SClaudiu Manoil 25985eaedf31SClaudiu Manoil if (work_done < budget) { 2599aeb12c5eSClaudiu Manoil u32 imask; 26006ad20165SEric Dumazet napi_complete_done(napi, work_done); 26015eaedf31SClaudiu Manoil /* Clear the halt bit in RSTAT */ 26025eaedf31SClaudiu Manoil gfar_write(®s->rstat, gfargrp->rstat); 26035eaedf31SClaudiu Manoil 2604aeb12c5eSClaudiu Manoil spin_lock_irq(&gfargrp->grplock); 2605aeb12c5eSClaudiu Manoil imask = gfar_read(®s->imask); 2606aeb12c5eSClaudiu Manoil imask |= IMASK_RX_DEFAULT; 2607aeb12c5eSClaudiu Manoil gfar_write(®s->imask, imask); 2608aeb12c5eSClaudiu Manoil spin_unlock_irq(&gfargrp->grplock); 26095eaedf31SClaudiu Manoil } 26105eaedf31SClaudiu Manoil 26115eaedf31SClaudiu Manoil return work_done; 26125eaedf31SClaudiu Manoil } 26135eaedf31SClaudiu Manoil 2614aeb12c5eSClaudiu Manoil static int gfar_poll_tx_sq(struct napi_struct *napi, int budget) 2615ec21e2ecSJeff Kirsher { 2616bc4598bcSJan Ceuleers struct gfar_priv_grp *gfargrp = 2617aeb12c5eSClaudiu Manoil container_of(napi, struct gfar_priv_grp, napi_tx); 2618aeb12c5eSClaudiu Manoil struct gfar __iomem *regs = gfargrp->regs; 261971ff9e3dSClaudiu Manoil struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; 2620aeb12c5eSClaudiu Manoil u32 imask; 2621aeb12c5eSClaudiu Manoil 2622aeb12c5eSClaudiu Manoil /* Clear IEVENT, so interrupts aren't called again 2623aeb12c5eSClaudiu Manoil * because of the packets that have already arrived 2624aeb12c5eSClaudiu Manoil */ 2625aeb12c5eSClaudiu Manoil gfar_write(®s->ievent, IEVENT_TX_MASK); 2626aeb12c5eSClaudiu Manoil 2627aeb12c5eSClaudiu Manoil /* run Tx cleanup to completion */ 2628aeb12c5eSClaudiu Manoil if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) 2629aeb12c5eSClaudiu Manoil gfar_clean_tx_ring(tx_queue); 2630aeb12c5eSClaudiu Manoil 2631aeb12c5eSClaudiu Manoil napi_complete(napi); 2632aeb12c5eSClaudiu Manoil 2633aeb12c5eSClaudiu Manoil spin_lock_irq(&gfargrp->grplock); 2634aeb12c5eSClaudiu Manoil imask = gfar_read(®s->imask); 2635aeb12c5eSClaudiu Manoil imask |= IMASK_TX_DEFAULT; 2636aeb12c5eSClaudiu Manoil gfar_write(®s->imask, imask); 2637aeb12c5eSClaudiu Manoil spin_unlock_irq(&gfargrp->grplock); 2638aeb12c5eSClaudiu Manoil 2639aeb12c5eSClaudiu Manoil return 0; 2640aeb12c5eSClaudiu Manoil } 2641aeb12c5eSClaudiu Manoil 26427d993c5fSArseny Solokha /* GFAR error interrupt handler */ 26437d993c5fSArseny Solokha static irqreturn_t gfar_error(int irq, void *grp_id) 26447d993c5fSArseny Solokha { 26457d993c5fSArseny Solokha struct gfar_priv_grp *gfargrp = grp_id; 26467d993c5fSArseny Solokha struct gfar __iomem *regs = gfargrp->regs; 26477d993c5fSArseny Solokha struct gfar_private *priv= gfargrp->priv; 26487d993c5fSArseny Solokha struct net_device *dev = priv->ndev; 26497d993c5fSArseny Solokha 26507d993c5fSArseny Solokha /* Save ievent for future reference */ 26517d993c5fSArseny Solokha u32 events = gfar_read(®s->ievent); 26527d993c5fSArseny Solokha 26537d993c5fSArseny Solokha /* Clear IEVENT */ 26547d993c5fSArseny Solokha gfar_write(®s->ievent, events & IEVENT_ERR_MASK); 26557d993c5fSArseny Solokha 26567d993c5fSArseny Solokha /* Magic Packet is not an error. */ 26577d993c5fSArseny Solokha if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 26587d993c5fSArseny Solokha (events & IEVENT_MAG)) 26597d993c5fSArseny Solokha events &= ~IEVENT_MAG; 26607d993c5fSArseny Solokha 26617d993c5fSArseny Solokha /* Hmm... */ 26627d993c5fSArseny Solokha if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 26637d993c5fSArseny Solokha netdev_dbg(dev, 26647d993c5fSArseny Solokha "error interrupt (ievent=0x%08x imask=0x%08x)\n", 26657d993c5fSArseny Solokha events, gfar_read(®s->imask)); 26667d993c5fSArseny Solokha 26677d993c5fSArseny Solokha /* Update the error counters */ 26687d993c5fSArseny Solokha if (events & IEVENT_TXE) { 26697d993c5fSArseny Solokha dev->stats.tx_errors++; 26707d993c5fSArseny Solokha 26717d993c5fSArseny Solokha if (events & IEVENT_LC) 26727d993c5fSArseny Solokha dev->stats.tx_window_errors++; 26737d993c5fSArseny Solokha if (events & IEVENT_CRL) 26747d993c5fSArseny Solokha dev->stats.tx_aborted_errors++; 26757d993c5fSArseny Solokha if (events & IEVENT_XFUN) { 26767d993c5fSArseny Solokha netif_dbg(priv, tx_err, dev, 26777d993c5fSArseny Solokha "TX FIFO underrun, packet dropped\n"); 26787d993c5fSArseny Solokha dev->stats.tx_dropped++; 26797d993c5fSArseny Solokha atomic64_inc(&priv->extra_stats.tx_underrun); 26807d993c5fSArseny Solokha 26817d993c5fSArseny Solokha schedule_work(&priv->reset_task); 26827d993c5fSArseny Solokha } 26837d993c5fSArseny Solokha netif_dbg(priv, tx_err, dev, "Transmit Error\n"); 26847d993c5fSArseny Solokha } 26857d993c5fSArseny Solokha if (events & IEVENT_BSY) { 26867d993c5fSArseny Solokha dev->stats.rx_over_errors++; 26877d993c5fSArseny Solokha atomic64_inc(&priv->extra_stats.rx_bsy); 26887d993c5fSArseny Solokha 26897d993c5fSArseny Solokha netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", 26907d993c5fSArseny Solokha gfar_read(®s->rstat)); 26917d993c5fSArseny Solokha } 26927d993c5fSArseny Solokha if (events & IEVENT_BABR) { 26937d993c5fSArseny Solokha dev->stats.rx_errors++; 26947d993c5fSArseny Solokha atomic64_inc(&priv->extra_stats.rx_babr); 26957d993c5fSArseny Solokha 26967d993c5fSArseny Solokha netif_dbg(priv, rx_err, dev, "babbling RX error\n"); 26977d993c5fSArseny Solokha } 26987d993c5fSArseny Solokha if (events & IEVENT_EBERR) { 26997d993c5fSArseny Solokha atomic64_inc(&priv->extra_stats.eberr); 27007d993c5fSArseny Solokha netif_dbg(priv, rx_err, dev, "bus error\n"); 27017d993c5fSArseny Solokha } 27027d993c5fSArseny Solokha if (events & IEVENT_RXC) 27037d993c5fSArseny Solokha netif_dbg(priv, rx_status, dev, "control frame\n"); 27047d993c5fSArseny Solokha 27057d993c5fSArseny Solokha if (events & IEVENT_BABT) { 27067d993c5fSArseny Solokha atomic64_inc(&priv->extra_stats.tx_babt); 27077d993c5fSArseny Solokha netif_dbg(priv, tx_err, dev, "babbling TX error\n"); 27087d993c5fSArseny Solokha } 27097d993c5fSArseny Solokha return IRQ_HANDLED; 27107d993c5fSArseny Solokha } 27117d993c5fSArseny Solokha 27127d993c5fSArseny Solokha /* The interrupt handler for devices with one interrupt */ 27137d993c5fSArseny Solokha static irqreturn_t gfar_interrupt(int irq, void *grp_id) 27147d993c5fSArseny Solokha { 27157d993c5fSArseny Solokha struct gfar_priv_grp *gfargrp = grp_id; 27167d993c5fSArseny Solokha 27177d993c5fSArseny Solokha /* Save ievent for future reference */ 27187d993c5fSArseny Solokha u32 events = gfar_read(&gfargrp->regs->ievent); 27197d993c5fSArseny Solokha 27207d993c5fSArseny Solokha /* Check for reception */ 27217d993c5fSArseny Solokha if (events & IEVENT_RX_MASK) 27227d993c5fSArseny Solokha gfar_receive(irq, grp_id); 27237d993c5fSArseny Solokha 27247d993c5fSArseny Solokha /* Check for transmit completion */ 27257d993c5fSArseny Solokha if (events & IEVENT_TX_MASK) 27267d993c5fSArseny Solokha gfar_transmit(irq, grp_id); 27277d993c5fSArseny Solokha 27287d993c5fSArseny Solokha /* Check for errors */ 27297d993c5fSArseny Solokha if (events & IEVENT_ERR_MASK) 27307d993c5fSArseny Solokha gfar_error(irq, grp_id); 27317d993c5fSArseny Solokha 27327d993c5fSArseny Solokha return IRQ_HANDLED; 27337d993c5fSArseny Solokha } 2734aeb12c5eSClaudiu Manoil 2735ec21e2ecSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 27360977f817SJan Ceuleers /* Polling 'interrupt' - used by things like netconsole to send skbs 2737ec21e2ecSJeff Kirsher * without having to re-enable interrupts. It's not called while 2738ec21e2ecSJeff Kirsher * the interrupt routine is executing. 2739ec21e2ecSJeff Kirsher */ 2740ec21e2ecSJeff Kirsher static void gfar_netpoll(struct net_device *dev) 2741ec21e2ecSJeff Kirsher { 2742ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(dev); 27433a2e16c8SJan Ceuleers int i; 2744ec21e2ecSJeff Kirsher 2745ec21e2ecSJeff Kirsher /* If the device has multiple interrupts, run tx/rx */ 2746ec21e2ecSJeff Kirsher if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2747ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_grps; i++) { 274862ed839dSPaul Gortmaker struct gfar_priv_grp *grp = &priv->gfargrp[i]; 274962ed839dSPaul Gortmaker 275062ed839dSPaul Gortmaker disable_irq(gfar_irq(grp, TX)->irq); 275162ed839dSPaul Gortmaker disable_irq(gfar_irq(grp, RX)->irq); 275262ed839dSPaul Gortmaker disable_irq(gfar_irq(grp, ER)->irq); 275362ed839dSPaul Gortmaker gfar_interrupt(gfar_irq(grp, TX)->irq, grp); 275462ed839dSPaul Gortmaker enable_irq(gfar_irq(grp, ER)->irq); 275562ed839dSPaul Gortmaker enable_irq(gfar_irq(grp, RX)->irq); 275662ed839dSPaul Gortmaker enable_irq(gfar_irq(grp, TX)->irq); 2757ec21e2ecSJeff Kirsher } 2758ec21e2ecSJeff Kirsher } else { 2759ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_grps; i++) { 276062ed839dSPaul Gortmaker struct gfar_priv_grp *grp = &priv->gfargrp[i]; 276162ed839dSPaul Gortmaker 276262ed839dSPaul Gortmaker disable_irq(gfar_irq(grp, TX)->irq); 276362ed839dSPaul Gortmaker gfar_interrupt(gfar_irq(grp, TX)->irq, grp); 276462ed839dSPaul Gortmaker enable_irq(gfar_irq(grp, TX)->irq); 2765ec21e2ecSJeff Kirsher } 2766ec21e2ecSJeff Kirsher } 2767ec21e2ecSJeff Kirsher } 2768ec21e2ecSJeff Kirsher #endif 2769ec21e2ecSJeff Kirsher 27707d993c5fSArseny Solokha static void free_grp_irqs(struct gfar_priv_grp *grp) 2771ec21e2ecSJeff Kirsher { 27727d993c5fSArseny Solokha free_irq(gfar_irq(grp, TX)->irq, grp); 27737d993c5fSArseny Solokha free_irq(gfar_irq(grp, RX)->irq, grp); 27747d993c5fSArseny Solokha free_irq(gfar_irq(grp, ER)->irq, grp); 2775ec21e2ecSJeff Kirsher } 2776ec21e2ecSJeff Kirsher 27777d993c5fSArseny Solokha static int register_grp_irqs(struct gfar_priv_grp *grp) 27787d993c5fSArseny Solokha { 27797d993c5fSArseny Solokha struct gfar_private *priv = grp->priv; 27807d993c5fSArseny Solokha struct net_device *dev = priv->ndev; 27817d993c5fSArseny Solokha int err; 27827d993c5fSArseny Solokha 27837d993c5fSArseny Solokha /* If the device has multiple interrupts, register for 27847d993c5fSArseny Solokha * them. Otherwise, only register for the one 2785ec21e2ecSJeff Kirsher */ 27867d993c5fSArseny Solokha if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 27877d993c5fSArseny Solokha /* Install our interrupt handlers for Error, 27887d993c5fSArseny Solokha * Transmit, and Receive 27897d993c5fSArseny Solokha */ 27907d993c5fSArseny Solokha err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, 27917d993c5fSArseny Solokha gfar_irq(grp, ER)->name, grp); 27927d993c5fSArseny Solokha if (err < 0) { 27937d993c5fSArseny Solokha netif_err(priv, intr, dev, "Can't get IRQ %d\n", 27947d993c5fSArseny Solokha gfar_irq(grp, ER)->irq); 27957d993c5fSArseny Solokha 27967d993c5fSArseny Solokha goto err_irq_fail; 27977d993c5fSArseny Solokha } 27987d993c5fSArseny Solokha enable_irq_wake(gfar_irq(grp, ER)->irq); 27997d993c5fSArseny Solokha 28007d993c5fSArseny Solokha err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, 28017d993c5fSArseny Solokha gfar_irq(grp, TX)->name, grp); 28027d993c5fSArseny Solokha if (err < 0) { 28037d993c5fSArseny Solokha netif_err(priv, intr, dev, "Can't get IRQ %d\n", 28047d993c5fSArseny Solokha gfar_irq(grp, TX)->irq); 28057d993c5fSArseny Solokha goto tx_irq_fail; 28067d993c5fSArseny Solokha } 28077d993c5fSArseny Solokha err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, 28087d993c5fSArseny Solokha gfar_irq(grp, RX)->name, grp); 28097d993c5fSArseny Solokha if (err < 0) { 28107d993c5fSArseny Solokha netif_err(priv, intr, dev, "Can't get IRQ %d\n", 28117d993c5fSArseny Solokha gfar_irq(grp, RX)->irq); 28127d993c5fSArseny Solokha goto rx_irq_fail; 28137d993c5fSArseny Solokha } 28147d993c5fSArseny Solokha enable_irq_wake(gfar_irq(grp, RX)->irq); 28157d993c5fSArseny Solokha 28167d993c5fSArseny Solokha } else { 28177d993c5fSArseny Solokha err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, 28187d993c5fSArseny Solokha gfar_irq(grp, TX)->name, grp); 28197d993c5fSArseny Solokha if (err < 0) { 28207d993c5fSArseny Solokha netif_err(priv, intr, dev, "Can't get IRQ %d\n", 28217d993c5fSArseny Solokha gfar_irq(grp, TX)->irq); 28227d993c5fSArseny Solokha goto err_irq_fail; 28237d993c5fSArseny Solokha } 28247d993c5fSArseny Solokha enable_irq_wake(gfar_irq(grp, TX)->irq); 28257d993c5fSArseny Solokha } 28267d993c5fSArseny Solokha 28277d993c5fSArseny Solokha return 0; 28287d993c5fSArseny Solokha 28297d993c5fSArseny Solokha rx_irq_fail: 28307d993c5fSArseny Solokha free_irq(gfar_irq(grp, TX)->irq, grp); 28317d993c5fSArseny Solokha tx_irq_fail: 28327d993c5fSArseny Solokha free_irq(gfar_irq(grp, ER)->irq, grp); 28337d993c5fSArseny Solokha err_irq_fail: 28347d993c5fSArseny Solokha return err; 28357d993c5fSArseny Solokha 28367d993c5fSArseny Solokha } 28377d993c5fSArseny Solokha 28387d993c5fSArseny Solokha static void gfar_free_irq(struct gfar_private *priv) 28397d993c5fSArseny Solokha { 28407d993c5fSArseny Solokha int i; 28417d993c5fSArseny Solokha 28427d993c5fSArseny Solokha /* Free the IRQs */ 28437d993c5fSArseny Solokha if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 28447d993c5fSArseny Solokha for (i = 0; i < priv->num_grps; i++) 28457d993c5fSArseny Solokha free_grp_irqs(&priv->gfargrp[i]); 28467d993c5fSArseny Solokha } else { 28477d993c5fSArseny Solokha for (i = 0; i < priv->num_grps; i++) 28487d993c5fSArseny Solokha free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, 28497d993c5fSArseny Solokha &priv->gfargrp[i]); 28507d993c5fSArseny Solokha } 28517d993c5fSArseny Solokha } 28527d993c5fSArseny Solokha 28537d993c5fSArseny Solokha static int gfar_request_irq(struct gfar_private *priv) 28547d993c5fSArseny Solokha { 28557d993c5fSArseny Solokha int err, i, j; 28567d993c5fSArseny Solokha 28577d993c5fSArseny Solokha for (i = 0; i < priv->num_grps; i++) { 28587d993c5fSArseny Solokha err = register_grp_irqs(&priv->gfargrp[i]); 28597d993c5fSArseny Solokha if (err) { 28607d993c5fSArseny Solokha for (j = 0; j < i; j++) 28617d993c5fSArseny Solokha free_grp_irqs(&priv->gfargrp[j]); 28627d993c5fSArseny Solokha return err; 28637d993c5fSArseny Solokha } 28647d993c5fSArseny Solokha } 28657d993c5fSArseny Solokha 28667d993c5fSArseny Solokha return 0; 28677d993c5fSArseny Solokha } 28687d993c5fSArseny Solokha 28697d993c5fSArseny Solokha /* Called when something needs to use the ethernet device 28707d993c5fSArseny Solokha * Returns 0 for success. 28717d993c5fSArseny Solokha */ 28727d993c5fSArseny Solokha static int gfar_enet_open(struct net_device *dev) 2873ec21e2ecSJeff Kirsher { 2874ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(dev); 28757d993c5fSArseny Solokha int err; 2876ec21e2ecSJeff Kirsher 28777d993c5fSArseny Solokha err = init_phy(dev); 28787d993c5fSArseny Solokha if (err) 28797d993c5fSArseny Solokha return err; 28807d993c5fSArseny Solokha 28817d993c5fSArseny Solokha err = gfar_request_irq(priv); 28827d993c5fSArseny Solokha if (err) 28837d993c5fSArseny Solokha return err; 28847d993c5fSArseny Solokha 28857d993c5fSArseny Solokha err = startup_gfar(dev); 28867d993c5fSArseny Solokha if (err) 28877d993c5fSArseny Solokha return err; 28887d993c5fSArseny Solokha 28897d993c5fSArseny Solokha return err; 28907d993c5fSArseny Solokha } 28917d993c5fSArseny Solokha 28927d993c5fSArseny Solokha /* Stops the kernel queue, and halts the controller */ 28937d993c5fSArseny Solokha static int gfar_close(struct net_device *dev) 28947d993c5fSArseny Solokha { 28957d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(dev); 28967d993c5fSArseny Solokha 28977d993c5fSArseny Solokha cancel_work_sync(&priv->reset_task); 28987d993c5fSArseny Solokha stop_gfar(dev); 28997d993c5fSArseny Solokha 29007d993c5fSArseny Solokha /* Disconnect from the PHY */ 29017d993c5fSArseny Solokha phy_disconnect(dev->phydev); 29027d993c5fSArseny Solokha 29037d993c5fSArseny Solokha gfar_free_irq(priv); 29047d993c5fSArseny Solokha 29057d993c5fSArseny Solokha return 0; 29067d993c5fSArseny Solokha } 29077d993c5fSArseny Solokha 29087d993c5fSArseny Solokha /* Clears each of the exact match registers to zero, so they 29097d993c5fSArseny Solokha * don't interfere with normal reception 29107d993c5fSArseny Solokha */ 29117d993c5fSArseny Solokha static void gfar_clear_exact_match(struct net_device *dev) 29127d993c5fSArseny Solokha { 29137d993c5fSArseny Solokha int idx; 29147d993c5fSArseny Solokha static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; 29157d993c5fSArseny Solokha 29167d993c5fSArseny Solokha for (idx = 1; idx < GFAR_EM_NUM + 1; idx++) 29177d993c5fSArseny Solokha gfar_set_mac_for_addr(dev, idx, zero_arr); 2918ec21e2ecSJeff Kirsher } 2919ec21e2ecSJeff Kirsher 2920ec21e2ecSJeff Kirsher /* Update the hash table based on the current list of multicast 2921ec21e2ecSJeff Kirsher * addresses we subscribe to. Also, change the promiscuity of 2922ec21e2ecSJeff Kirsher * the device based on the flags (this function is called 29230977f817SJan Ceuleers * whenever dev->flags is changed 29240977f817SJan Ceuleers */ 2925ec21e2ecSJeff Kirsher static void gfar_set_multi(struct net_device *dev) 2926ec21e2ecSJeff Kirsher { 2927ec21e2ecSJeff Kirsher struct netdev_hw_addr *ha; 2928ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(dev); 2929ec21e2ecSJeff Kirsher struct gfar __iomem *regs = priv->gfargrp[0].regs; 2930ec21e2ecSJeff Kirsher u32 tempval; 2931ec21e2ecSJeff Kirsher 2932ec21e2ecSJeff Kirsher if (dev->flags & IFF_PROMISC) { 2933ec21e2ecSJeff Kirsher /* Set RCTRL to PROM */ 2934ec21e2ecSJeff Kirsher tempval = gfar_read(®s->rctrl); 2935ec21e2ecSJeff Kirsher tempval |= RCTRL_PROM; 2936ec21e2ecSJeff Kirsher gfar_write(®s->rctrl, tempval); 2937ec21e2ecSJeff Kirsher } else { 2938ec21e2ecSJeff Kirsher /* Set RCTRL to not PROM */ 2939ec21e2ecSJeff Kirsher tempval = gfar_read(®s->rctrl); 2940ec21e2ecSJeff Kirsher tempval &= ~(RCTRL_PROM); 2941ec21e2ecSJeff Kirsher gfar_write(®s->rctrl, tempval); 2942ec21e2ecSJeff Kirsher } 2943ec21e2ecSJeff Kirsher 2944ec21e2ecSJeff Kirsher if (dev->flags & IFF_ALLMULTI) { 2945ec21e2ecSJeff Kirsher /* Set the hash to rx all multicast frames */ 2946ec21e2ecSJeff Kirsher gfar_write(®s->igaddr0, 0xffffffff); 2947ec21e2ecSJeff Kirsher gfar_write(®s->igaddr1, 0xffffffff); 2948ec21e2ecSJeff Kirsher gfar_write(®s->igaddr2, 0xffffffff); 2949ec21e2ecSJeff Kirsher gfar_write(®s->igaddr3, 0xffffffff); 2950ec21e2ecSJeff Kirsher gfar_write(®s->igaddr4, 0xffffffff); 2951ec21e2ecSJeff Kirsher gfar_write(®s->igaddr5, 0xffffffff); 2952ec21e2ecSJeff Kirsher gfar_write(®s->igaddr6, 0xffffffff); 2953ec21e2ecSJeff Kirsher gfar_write(®s->igaddr7, 0xffffffff); 2954ec21e2ecSJeff Kirsher gfar_write(®s->gaddr0, 0xffffffff); 2955ec21e2ecSJeff Kirsher gfar_write(®s->gaddr1, 0xffffffff); 2956ec21e2ecSJeff Kirsher gfar_write(®s->gaddr2, 0xffffffff); 2957ec21e2ecSJeff Kirsher gfar_write(®s->gaddr3, 0xffffffff); 2958ec21e2ecSJeff Kirsher gfar_write(®s->gaddr4, 0xffffffff); 2959ec21e2ecSJeff Kirsher gfar_write(®s->gaddr5, 0xffffffff); 2960ec21e2ecSJeff Kirsher gfar_write(®s->gaddr6, 0xffffffff); 2961ec21e2ecSJeff Kirsher gfar_write(®s->gaddr7, 0xffffffff); 2962ec21e2ecSJeff Kirsher } else { 2963ec21e2ecSJeff Kirsher int em_num; 2964ec21e2ecSJeff Kirsher int idx; 2965ec21e2ecSJeff Kirsher 2966ec21e2ecSJeff Kirsher /* zero out the hash */ 2967ec21e2ecSJeff Kirsher gfar_write(®s->igaddr0, 0x0); 2968ec21e2ecSJeff Kirsher gfar_write(®s->igaddr1, 0x0); 2969ec21e2ecSJeff Kirsher gfar_write(®s->igaddr2, 0x0); 2970ec21e2ecSJeff Kirsher gfar_write(®s->igaddr3, 0x0); 2971ec21e2ecSJeff Kirsher gfar_write(®s->igaddr4, 0x0); 2972ec21e2ecSJeff Kirsher gfar_write(®s->igaddr5, 0x0); 2973ec21e2ecSJeff Kirsher gfar_write(®s->igaddr6, 0x0); 2974ec21e2ecSJeff Kirsher gfar_write(®s->igaddr7, 0x0); 2975ec21e2ecSJeff Kirsher gfar_write(®s->gaddr0, 0x0); 2976ec21e2ecSJeff Kirsher gfar_write(®s->gaddr1, 0x0); 2977ec21e2ecSJeff Kirsher gfar_write(®s->gaddr2, 0x0); 2978ec21e2ecSJeff Kirsher gfar_write(®s->gaddr3, 0x0); 2979ec21e2ecSJeff Kirsher gfar_write(®s->gaddr4, 0x0); 2980ec21e2ecSJeff Kirsher gfar_write(®s->gaddr5, 0x0); 2981ec21e2ecSJeff Kirsher gfar_write(®s->gaddr6, 0x0); 2982ec21e2ecSJeff Kirsher gfar_write(®s->gaddr7, 0x0); 2983ec21e2ecSJeff Kirsher 2984ec21e2ecSJeff Kirsher /* If we have extended hash tables, we need to 2985ec21e2ecSJeff Kirsher * clear the exact match registers to prepare for 29860977f817SJan Ceuleers * setting them 29870977f817SJan Ceuleers */ 2988ec21e2ecSJeff Kirsher if (priv->extended_hash) { 2989ec21e2ecSJeff Kirsher em_num = GFAR_EM_NUM + 1; 2990ec21e2ecSJeff Kirsher gfar_clear_exact_match(dev); 2991ec21e2ecSJeff Kirsher idx = 1; 2992ec21e2ecSJeff Kirsher } else { 2993ec21e2ecSJeff Kirsher idx = 0; 2994ec21e2ecSJeff Kirsher em_num = 0; 2995ec21e2ecSJeff Kirsher } 2996ec21e2ecSJeff Kirsher 2997ec21e2ecSJeff Kirsher if (netdev_mc_empty(dev)) 2998ec21e2ecSJeff Kirsher return; 2999ec21e2ecSJeff Kirsher 3000ec21e2ecSJeff Kirsher /* Parse the list, and set the appropriate bits */ 3001ec21e2ecSJeff Kirsher netdev_for_each_mc_addr(ha, dev) { 3002ec21e2ecSJeff Kirsher if (idx < em_num) { 3003ec21e2ecSJeff Kirsher gfar_set_mac_for_addr(dev, idx, ha->addr); 3004ec21e2ecSJeff Kirsher idx++; 3005ec21e2ecSJeff Kirsher } else 3006ec21e2ecSJeff Kirsher gfar_set_hash_for_addr(dev, ha->addr); 3007ec21e2ecSJeff Kirsher } 3008ec21e2ecSJeff Kirsher } 3009ec21e2ecSJeff Kirsher } 3010ec21e2ecSJeff Kirsher 30117d993c5fSArseny Solokha void gfar_mac_reset(struct gfar_private *priv) 30126ce29b0eSClaudiu Manoil { 30136ce29b0eSClaudiu Manoil struct gfar __iomem *regs = priv->gfargrp[0].regs; 30147d993c5fSArseny Solokha u32 tempval; 30156ce29b0eSClaudiu Manoil 30167d993c5fSArseny Solokha /* Reset MAC layer */ 30177d993c5fSArseny Solokha gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); 30186ce29b0eSClaudiu Manoil 30197d993c5fSArseny Solokha /* We need to delay at least 3 TX clocks */ 30207d993c5fSArseny Solokha udelay(3); 30216ce29b0eSClaudiu Manoil 30227d993c5fSArseny Solokha /* the soft reset bit is not self-resetting, so we need to 30237d993c5fSArseny Solokha * clear it before resuming normal operation 30246ce29b0eSClaudiu Manoil */ 30257d993c5fSArseny Solokha gfar_write(®s->maccfg1, 0); 30266ce29b0eSClaudiu Manoil 30277d993c5fSArseny Solokha udelay(3); 30286ce29b0eSClaudiu Manoil 30297d993c5fSArseny Solokha gfar_rx_offload_en(priv); 30306ce29b0eSClaudiu Manoil 30317d993c5fSArseny Solokha /* Initialize the max receive frame/buffer lengths */ 30327d993c5fSArseny Solokha gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE); 30337d993c5fSArseny Solokha gfar_write(®s->mrblr, GFAR_RXB_SIZE); 3034b4b67f26SScott Wood 30357d993c5fSArseny Solokha /* Initialize the Minimum Frame Length Register */ 30367d993c5fSArseny Solokha gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); 303745b679c9SMatei Pavaluca 30387d993c5fSArseny Solokha /* Initialize MACCFG2. */ 30397d993c5fSArseny Solokha tempval = MACCFG2_INIT_SETTINGS; 304045b679c9SMatei Pavaluca 30417d993c5fSArseny Solokha /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1 30427d993c5fSArseny Solokha * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1, 30437d993c5fSArseny Solokha * and by checking RxBD[LG] and discarding larger than MAXFRM. 30447d993c5fSArseny Solokha */ 30457d993c5fSArseny Solokha if (gfar_has_errata(priv, GFAR_ERRATA_74)) 30467d993c5fSArseny Solokha tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; 304745b679c9SMatei Pavaluca 30486ce29b0eSClaudiu Manoil gfar_write(®s->maccfg2, tempval); 30496ce29b0eSClaudiu Manoil 30507d993c5fSArseny Solokha /* Clear mac addr hash registers */ 30517d993c5fSArseny Solokha gfar_write(®s->igaddr0, 0); 30527d993c5fSArseny Solokha gfar_write(®s->igaddr1, 0); 30537d993c5fSArseny Solokha gfar_write(®s->igaddr2, 0); 30547d993c5fSArseny Solokha gfar_write(®s->igaddr3, 0); 30557d993c5fSArseny Solokha gfar_write(®s->igaddr4, 0); 30567d993c5fSArseny Solokha gfar_write(®s->igaddr5, 0); 30577d993c5fSArseny Solokha gfar_write(®s->igaddr6, 0); 30587d993c5fSArseny Solokha gfar_write(®s->igaddr7, 0); 30596ce29b0eSClaudiu Manoil 30607d993c5fSArseny Solokha gfar_write(®s->gaddr0, 0); 30617d993c5fSArseny Solokha gfar_write(®s->gaddr1, 0); 30627d993c5fSArseny Solokha gfar_write(®s->gaddr2, 0); 30637d993c5fSArseny Solokha gfar_write(®s->gaddr3, 0); 30647d993c5fSArseny Solokha gfar_write(®s->gaddr4, 0); 30657d993c5fSArseny Solokha gfar_write(®s->gaddr5, 0); 30667d993c5fSArseny Solokha gfar_write(®s->gaddr6, 0); 30677d993c5fSArseny Solokha gfar_write(®s->gaddr7, 0); 30687d993c5fSArseny Solokha 30697d993c5fSArseny Solokha if (priv->extended_hash) 30707d993c5fSArseny Solokha gfar_clear_exact_match(priv->ndev); 30717d993c5fSArseny Solokha 30727d993c5fSArseny Solokha gfar_mac_rx_config(priv); 30737d993c5fSArseny Solokha 30747d993c5fSArseny Solokha gfar_mac_tx_config(priv); 30757d993c5fSArseny Solokha 30767d993c5fSArseny Solokha gfar_set_mac_address(priv->ndev); 30777d993c5fSArseny Solokha 30787d993c5fSArseny Solokha gfar_set_multi(priv->ndev); 30797d993c5fSArseny Solokha 30807d993c5fSArseny Solokha /* clear ievent and imask before configuring coalescing */ 30817d993c5fSArseny Solokha gfar_ints_disable(priv); 30827d993c5fSArseny Solokha 30837d993c5fSArseny Solokha /* Configure the coalescing support */ 30847d993c5fSArseny Solokha gfar_configure_coalescing_all(priv); 30857d993c5fSArseny Solokha } 30867d993c5fSArseny Solokha 30877d993c5fSArseny Solokha static void gfar_hw_init(struct gfar_private *priv) 30887d993c5fSArseny Solokha { 30897d993c5fSArseny Solokha struct gfar __iomem *regs = priv->gfargrp[0].regs; 30907d993c5fSArseny Solokha u32 attrs; 30917d993c5fSArseny Solokha 30927d993c5fSArseny Solokha /* Stop the DMA engine now, in case it was running before 30937d993c5fSArseny Solokha * (The firmware could have used it, and left it running). 30947d993c5fSArseny Solokha */ 30957d993c5fSArseny Solokha gfar_halt(priv); 30967d993c5fSArseny Solokha 30977d993c5fSArseny Solokha gfar_mac_reset(priv); 30987d993c5fSArseny Solokha 30997d993c5fSArseny Solokha /* Zero out the rmon mib registers if it has them */ 31007d993c5fSArseny Solokha if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 31017d993c5fSArseny Solokha memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib)); 31027d993c5fSArseny Solokha 31037d993c5fSArseny Solokha /* Mask off the CAM interrupts */ 31047d993c5fSArseny Solokha gfar_write(®s->rmon.cam1, 0xffffffff); 31057d993c5fSArseny Solokha gfar_write(®s->rmon.cam2, 0xffffffff); 3106*ef094874SEsben Haabendal /* Clear the CAR registers (w1c style) */ 3107*ef094874SEsben Haabendal gfar_write(®s->rmon.car1, 0xffffffff); 3108*ef094874SEsben Haabendal gfar_write(®s->rmon.car2, 0xffffffff); 31097d993c5fSArseny Solokha } 31107d993c5fSArseny Solokha 31117d993c5fSArseny Solokha /* Initialize ECNTRL */ 31127d993c5fSArseny Solokha gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); 31137d993c5fSArseny Solokha 31147d993c5fSArseny Solokha /* Set the extraction length and index */ 31157d993c5fSArseny Solokha attrs = ATTRELI_EL(priv->rx_stash_size) | 31167d993c5fSArseny Solokha ATTRELI_EI(priv->rx_stash_index); 31177d993c5fSArseny Solokha 31187d993c5fSArseny Solokha gfar_write(®s->attreli, attrs); 31197d993c5fSArseny Solokha 31207d993c5fSArseny Solokha /* Start with defaults, and add stashing 31217d993c5fSArseny Solokha * depending on driver parameters 31227d993c5fSArseny Solokha */ 31237d993c5fSArseny Solokha attrs = ATTR_INIT_SETTINGS; 31247d993c5fSArseny Solokha 31257d993c5fSArseny Solokha if (priv->bd_stash_en) 31267d993c5fSArseny Solokha attrs |= ATTR_BDSTASH; 31277d993c5fSArseny Solokha 31287d993c5fSArseny Solokha if (priv->rx_stash_size != 0) 31297d993c5fSArseny Solokha attrs |= ATTR_BUFSTASH; 31307d993c5fSArseny Solokha 31317d993c5fSArseny Solokha gfar_write(®s->attr, attrs); 31327d993c5fSArseny Solokha 31337d993c5fSArseny Solokha /* FIFO configs */ 31347d993c5fSArseny Solokha gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR); 31357d993c5fSArseny Solokha gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE); 31367d993c5fSArseny Solokha gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF); 31377d993c5fSArseny Solokha 31387d993c5fSArseny Solokha /* Program the interrupt steering regs, only for MG devices */ 31397d993c5fSArseny Solokha if (priv->num_grps > 1) 31407d993c5fSArseny Solokha gfar_write_isrg(priv); 31417d993c5fSArseny Solokha } 31427d993c5fSArseny Solokha 31437d993c5fSArseny Solokha static const struct net_device_ops gfar_netdev_ops = { 31447d993c5fSArseny Solokha .ndo_open = gfar_enet_open, 31457d993c5fSArseny Solokha .ndo_start_xmit = gfar_start_xmit, 31467d993c5fSArseny Solokha .ndo_stop = gfar_close, 31477d993c5fSArseny Solokha .ndo_change_mtu = gfar_change_mtu, 31487d993c5fSArseny Solokha .ndo_set_features = gfar_set_features, 31497d993c5fSArseny Solokha .ndo_set_rx_mode = gfar_set_multi, 31507d993c5fSArseny Solokha .ndo_tx_timeout = gfar_timeout, 31517d993c5fSArseny Solokha .ndo_do_ioctl = gfar_ioctl, 3152d59a24fdSEsben Haabendal .ndo_get_stats64 = gfar_get_stats64, 31537d993c5fSArseny Solokha .ndo_change_carrier = fixed_phy_change_carrier, 31547d993c5fSArseny Solokha .ndo_set_mac_address = gfar_set_mac_addr, 31557d993c5fSArseny Solokha .ndo_validate_addr = eth_validate_addr, 31567d993c5fSArseny Solokha #ifdef CONFIG_NET_POLL_CONTROLLER 31577d993c5fSArseny Solokha .ndo_poll_controller = gfar_netpoll, 31587d993c5fSArseny Solokha #endif 31597d993c5fSArseny Solokha }; 31607d993c5fSArseny Solokha 31617d993c5fSArseny Solokha /* Set up the ethernet device structure, private data, 31627d993c5fSArseny Solokha * and anything else we need before we start 31637d993c5fSArseny Solokha */ 31647d993c5fSArseny Solokha static int gfar_probe(struct platform_device *ofdev) 31657d993c5fSArseny Solokha { 31667d993c5fSArseny Solokha struct device_node *np = ofdev->dev.of_node; 31677d993c5fSArseny Solokha struct net_device *dev = NULL; 31687d993c5fSArseny Solokha struct gfar_private *priv = NULL; 31697d993c5fSArseny Solokha int err = 0, i; 31707d993c5fSArseny Solokha 31717d993c5fSArseny Solokha err = gfar_of_init(ofdev, &dev); 31727d993c5fSArseny Solokha 31737d993c5fSArseny Solokha if (err) 31747d993c5fSArseny Solokha return err; 31757d993c5fSArseny Solokha 31767d993c5fSArseny Solokha priv = netdev_priv(dev); 31777d993c5fSArseny Solokha priv->ndev = dev; 31787d993c5fSArseny Solokha priv->ofdev = ofdev; 31797d993c5fSArseny Solokha priv->dev = &ofdev->dev; 31807d993c5fSArseny Solokha SET_NETDEV_DEV(dev, &ofdev->dev); 31817d993c5fSArseny Solokha 31827d993c5fSArseny Solokha INIT_WORK(&priv->reset_task, gfar_reset_task); 31837d993c5fSArseny Solokha 31847d993c5fSArseny Solokha platform_set_drvdata(ofdev, priv); 31857d993c5fSArseny Solokha 31867d993c5fSArseny Solokha gfar_detect_errata(priv); 31877d993c5fSArseny Solokha 31887d993c5fSArseny Solokha /* Set the dev->base_addr to the gfar reg region */ 31897d993c5fSArseny Solokha dev->base_addr = (unsigned long) priv->gfargrp[0].regs; 31907d993c5fSArseny Solokha 31917d993c5fSArseny Solokha /* Fill in the dev structure */ 31927d993c5fSArseny Solokha dev->watchdog_timeo = TX_TIMEOUT; 31937d993c5fSArseny Solokha /* MTU range: 50 - 9586 */ 31947d993c5fSArseny Solokha dev->mtu = 1500; 31957d993c5fSArseny Solokha dev->min_mtu = 50; 31967d993c5fSArseny Solokha dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN; 31977d993c5fSArseny Solokha dev->netdev_ops = &gfar_netdev_ops; 31987d993c5fSArseny Solokha dev->ethtool_ops = &gfar_ethtool_ops; 31997d993c5fSArseny Solokha 32007d993c5fSArseny Solokha /* Register for napi ...We are registering NAPI for each grp */ 32017d993c5fSArseny Solokha for (i = 0; i < priv->num_grps; i++) { 32027d993c5fSArseny Solokha netif_napi_add(dev, &priv->gfargrp[i].napi_rx, 32037d993c5fSArseny Solokha gfar_poll_rx_sq, GFAR_DEV_WEIGHT); 32047d993c5fSArseny Solokha netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, 32057d993c5fSArseny Solokha gfar_poll_tx_sq, 2); 32067d993c5fSArseny Solokha } 32077d993c5fSArseny Solokha 32087d993c5fSArseny Solokha if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 32097d993c5fSArseny Solokha dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 32107d993c5fSArseny Solokha NETIF_F_RXCSUM; 32117d993c5fSArseny Solokha dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | 32127d993c5fSArseny Solokha NETIF_F_RXCSUM | NETIF_F_HIGHDMA; 32137d993c5fSArseny Solokha } 32147d993c5fSArseny Solokha 32157d993c5fSArseny Solokha if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 32167d993c5fSArseny Solokha dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | 32177d993c5fSArseny Solokha NETIF_F_HW_VLAN_CTAG_RX; 32187d993c5fSArseny Solokha dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 32197d993c5fSArseny Solokha } 32207d993c5fSArseny Solokha 32217d993c5fSArseny Solokha dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 32227d993c5fSArseny Solokha 32237d993c5fSArseny Solokha gfar_init_addr_hash_table(priv); 32247d993c5fSArseny Solokha 32257d993c5fSArseny Solokha /* Insert receive time stamps into padding alignment bytes, and 32267d993c5fSArseny Solokha * plus 2 bytes padding to ensure the cpu alignment. 32277d993c5fSArseny Solokha */ 32287d993c5fSArseny Solokha if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 32297d993c5fSArseny Solokha priv->padding = 8 + DEFAULT_PADDING; 32307d993c5fSArseny Solokha 32317d993c5fSArseny Solokha if (dev->features & NETIF_F_IP_CSUM || 32327d993c5fSArseny Solokha priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 3233d6a076d6SClaudiu Manoil dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN; 32347d993c5fSArseny Solokha 32357d993c5fSArseny Solokha /* Initializing some of the rx/tx queue level parameters */ 32367d993c5fSArseny Solokha for (i = 0; i < priv->num_tx_queues; i++) { 32377d993c5fSArseny Solokha priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; 32387d993c5fSArseny Solokha priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; 32397d993c5fSArseny Solokha priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; 32407d993c5fSArseny Solokha priv->tx_queue[i]->txic = DEFAULT_TXIC; 32417d993c5fSArseny Solokha } 32427d993c5fSArseny Solokha 32437d993c5fSArseny Solokha for (i = 0; i < priv->num_rx_queues; i++) { 32447d993c5fSArseny Solokha priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; 32457d993c5fSArseny Solokha priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; 32467d993c5fSArseny Solokha priv->rx_queue[i]->rxic = DEFAULT_RXIC; 32477d993c5fSArseny Solokha } 32487d993c5fSArseny Solokha 32497d993c5fSArseny Solokha /* Always enable rx filer if available */ 32507d993c5fSArseny Solokha priv->rx_filer_enable = 32517d993c5fSArseny Solokha (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0; 32527d993c5fSArseny Solokha /* Enable most messages by default */ 32537d993c5fSArseny Solokha priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 32547d993c5fSArseny Solokha /* use pritority h/w tx queue scheduling for single queue devices */ 32557d993c5fSArseny Solokha if (priv->num_tx_queues == 1) 32567d993c5fSArseny Solokha priv->prio_sched_en = 1; 32577d993c5fSArseny Solokha 32587d993c5fSArseny Solokha set_bit(GFAR_DOWN, &priv->state); 32597d993c5fSArseny Solokha 32607d993c5fSArseny Solokha gfar_hw_init(priv); 32617d993c5fSArseny Solokha 32627d993c5fSArseny Solokha /* Carrier starts down, phylib will bring it up */ 32637d993c5fSArseny Solokha netif_carrier_off(dev); 32647d993c5fSArseny Solokha 32657d993c5fSArseny Solokha err = register_netdev(dev); 32667d993c5fSArseny Solokha 32677d993c5fSArseny Solokha if (err) { 32687d993c5fSArseny Solokha pr_err("%s: Cannot register net device, aborting\n", dev->name); 32697d993c5fSArseny Solokha goto register_fail; 32707d993c5fSArseny Solokha } 32717d993c5fSArseny Solokha 32727d993c5fSArseny Solokha if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) 32737d993c5fSArseny Solokha priv->wol_supported |= GFAR_WOL_MAGIC; 32747d993c5fSArseny Solokha 32757d993c5fSArseny Solokha if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) && 32767d993c5fSArseny Solokha priv->rx_filer_enable) 32777d993c5fSArseny Solokha priv->wol_supported |= GFAR_WOL_FILER_UCAST; 32787d993c5fSArseny Solokha 32797d993c5fSArseny Solokha device_set_wakeup_capable(&ofdev->dev, priv->wol_supported); 32807d993c5fSArseny Solokha 32817d993c5fSArseny Solokha /* fill out IRQ number and name fields */ 32827d993c5fSArseny Solokha for (i = 0; i < priv->num_grps; i++) { 32837d993c5fSArseny Solokha struct gfar_priv_grp *grp = &priv->gfargrp[i]; 32847d993c5fSArseny Solokha if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 32857d993c5fSArseny Solokha sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s", 32867d993c5fSArseny Solokha dev->name, "_g", '0' + i, "_tx"); 32877d993c5fSArseny Solokha sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s", 32887d993c5fSArseny Solokha dev->name, "_g", '0' + i, "_rx"); 32897d993c5fSArseny Solokha sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s", 32907d993c5fSArseny Solokha dev->name, "_g", '0' + i, "_er"); 32917d993c5fSArseny Solokha } else 32927d993c5fSArseny Solokha strcpy(gfar_irq(grp, TX)->name, dev->name); 32937d993c5fSArseny Solokha } 32947d993c5fSArseny Solokha 32957d993c5fSArseny Solokha /* Initialize the filer table */ 32967d993c5fSArseny Solokha gfar_init_filer_table(priv); 32977d993c5fSArseny Solokha 32987d993c5fSArseny Solokha /* Print out the device info */ 32997d993c5fSArseny Solokha netdev_info(dev, "mac: %pM\n", dev->dev_addr); 33007d993c5fSArseny Solokha 33017d993c5fSArseny Solokha /* Even more device info helps when determining which kernel 33027d993c5fSArseny Solokha * provided which set of benchmarks. 33037d993c5fSArseny Solokha */ 33047d993c5fSArseny Solokha netdev_info(dev, "Running with NAPI enabled\n"); 33057d993c5fSArseny Solokha for (i = 0; i < priv->num_rx_queues; i++) 33067d993c5fSArseny Solokha netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", 33077d993c5fSArseny Solokha i, priv->rx_queue[i]->rx_ring_size); 33087d993c5fSArseny Solokha for (i = 0; i < priv->num_tx_queues; i++) 33097d993c5fSArseny Solokha netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", 33107d993c5fSArseny Solokha i, priv->tx_queue[i]->tx_ring_size); 33117d993c5fSArseny Solokha 33127d993c5fSArseny Solokha return 0; 33137d993c5fSArseny Solokha 33147d993c5fSArseny Solokha register_fail: 33157d993c5fSArseny Solokha if (of_phy_is_fixed_link(np)) 33167d993c5fSArseny Solokha of_phy_deregister_fixed_link(np); 33177d993c5fSArseny Solokha unmap_group_regs(priv); 33187d993c5fSArseny Solokha gfar_free_rx_queues(priv); 33197d993c5fSArseny Solokha gfar_free_tx_queues(priv); 33207d993c5fSArseny Solokha of_node_put(priv->phy_node); 33217d993c5fSArseny Solokha of_node_put(priv->tbi_node); 33227d993c5fSArseny Solokha free_gfar_dev(priv); 33237d993c5fSArseny Solokha return err; 33247d993c5fSArseny Solokha } 33257d993c5fSArseny Solokha 33267d993c5fSArseny Solokha static int gfar_remove(struct platform_device *ofdev) 33277d993c5fSArseny Solokha { 33287d993c5fSArseny Solokha struct gfar_private *priv = platform_get_drvdata(ofdev); 33297d993c5fSArseny Solokha struct device_node *np = ofdev->dev.of_node; 33307d993c5fSArseny Solokha 33317d993c5fSArseny Solokha of_node_put(priv->phy_node); 33327d993c5fSArseny Solokha of_node_put(priv->tbi_node); 33337d993c5fSArseny Solokha 33347d993c5fSArseny Solokha unregister_netdev(priv->ndev); 33357d993c5fSArseny Solokha 33367d993c5fSArseny Solokha if (of_phy_is_fixed_link(np)) 33377d993c5fSArseny Solokha of_phy_deregister_fixed_link(np); 33387d993c5fSArseny Solokha 33397d993c5fSArseny Solokha unmap_group_regs(priv); 33407d993c5fSArseny Solokha gfar_free_rx_queues(priv); 33417d993c5fSArseny Solokha gfar_free_tx_queues(priv); 33427d993c5fSArseny Solokha free_gfar_dev(priv); 33437d993c5fSArseny Solokha 33447d993c5fSArseny Solokha return 0; 33457d993c5fSArseny Solokha } 33467d993c5fSArseny Solokha 33477d993c5fSArseny Solokha #ifdef CONFIG_PM 33487d993c5fSArseny Solokha 33497d993c5fSArseny Solokha static void __gfar_filer_disable(struct gfar_private *priv) 33507d993c5fSArseny Solokha { 33517d993c5fSArseny Solokha struct gfar __iomem *regs = priv->gfargrp[0].regs; 33527d993c5fSArseny Solokha u32 temp; 33537d993c5fSArseny Solokha 33547d993c5fSArseny Solokha temp = gfar_read(®s->rctrl); 33557d993c5fSArseny Solokha temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT); 33567d993c5fSArseny Solokha gfar_write(®s->rctrl, temp); 33577d993c5fSArseny Solokha } 33587d993c5fSArseny Solokha 33597d993c5fSArseny Solokha static void __gfar_filer_enable(struct gfar_private *priv) 33607d993c5fSArseny Solokha { 33617d993c5fSArseny Solokha struct gfar __iomem *regs = priv->gfargrp[0].regs; 33627d993c5fSArseny Solokha u32 temp; 33637d993c5fSArseny Solokha 33647d993c5fSArseny Solokha temp = gfar_read(®s->rctrl); 33657d993c5fSArseny Solokha temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; 33667d993c5fSArseny Solokha gfar_write(®s->rctrl, temp); 33677d993c5fSArseny Solokha } 33687d993c5fSArseny Solokha 33697d993c5fSArseny Solokha /* Filer rules implementing wol capabilities */ 33707d993c5fSArseny Solokha static void gfar_filer_config_wol(struct gfar_private *priv) 33717d993c5fSArseny Solokha { 33727d993c5fSArseny Solokha unsigned int i; 33737d993c5fSArseny Solokha u32 rqfcr; 33747d993c5fSArseny Solokha 33757d993c5fSArseny Solokha __gfar_filer_disable(priv); 33767d993c5fSArseny Solokha 33777d993c5fSArseny Solokha /* clear the filer table, reject any packet by default */ 33787d993c5fSArseny Solokha rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH; 33797d993c5fSArseny Solokha for (i = 0; i <= MAX_FILER_IDX; i++) 33807d993c5fSArseny Solokha gfar_write_filer(priv, i, rqfcr, 0); 33817d993c5fSArseny Solokha 33827d993c5fSArseny Solokha i = 0; 33837d993c5fSArseny Solokha if (priv->wol_opts & GFAR_WOL_FILER_UCAST) { 33847d993c5fSArseny Solokha /* unicast packet, accept it */ 33857d993c5fSArseny Solokha struct net_device *ndev = priv->ndev; 33867d993c5fSArseny Solokha /* get the default rx queue index */ 33877d993c5fSArseny Solokha u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex; 33887d993c5fSArseny Solokha u32 dest_mac_addr = (ndev->dev_addr[0] << 16) | 33897d993c5fSArseny Solokha (ndev->dev_addr[1] << 8) | 33907d993c5fSArseny Solokha ndev->dev_addr[2]; 33917d993c5fSArseny Solokha 33927d993c5fSArseny Solokha rqfcr = (qindex << 10) | RQFCR_AND | 33937d993c5fSArseny Solokha RQFCR_CMP_EXACT | RQFCR_PID_DAH; 33947d993c5fSArseny Solokha 33957d993c5fSArseny Solokha gfar_write_filer(priv, i++, rqfcr, dest_mac_addr); 33967d993c5fSArseny Solokha 33977d993c5fSArseny Solokha dest_mac_addr = (ndev->dev_addr[3] << 16) | 33987d993c5fSArseny Solokha (ndev->dev_addr[4] << 8) | 33997d993c5fSArseny Solokha ndev->dev_addr[5]; 34007d993c5fSArseny Solokha rqfcr = (qindex << 10) | RQFCR_GPI | 34017d993c5fSArseny Solokha RQFCR_CMP_EXACT | RQFCR_PID_DAL; 34027d993c5fSArseny Solokha gfar_write_filer(priv, i++, rqfcr, dest_mac_addr); 34037d993c5fSArseny Solokha } 34047d993c5fSArseny Solokha 34057d993c5fSArseny Solokha __gfar_filer_enable(priv); 34067d993c5fSArseny Solokha } 34077d993c5fSArseny Solokha 34087d993c5fSArseny Solokha static void gfar_filer_restore_table(struct gfar_private *priv) 34097d993c5fSArseny Solokha { 34107d993c5fSArseny Solokha u32 rqfcr, rqfpr; 34117d993c5fSArseny Solokha unsigned int i; 34127d993c5fSArseny Solokha 34137d993c5fSArseny Solokha __gfar_filer_disable(priv); 34147d993c5fSArseny Solokha 34157d993c5fSArseny Solokha for (i = 0; i <= MAX_FILER_IDX; i++) { 34167d993c5fSArseny Solokha rqfcr = priv->ftp_rqfcr[i]; 34177d993c5fSArseny Solokha rqfpr = priv->ftp_rqfpr[i]; 34187d993c5fSArseny Solokha gfar_write_filer(priv, i, rqfcr, rqfpr); 34197d993c5fSArseny Solokha } 34207d993c5fSArseny Solokha 34217d993c5fSArseny Solokha __gfar_filer_enable(priv); 34227d993c5fSArseny Solokha } 34237d993c5fSArseny Solokha 34247d993c5fSArseny Solokha /* gfar_start() for Rx only and with the FGPI filer interrupt enabled */ 34257d993c5fSArseny Solokha static void gfar_start_wol_filer(struct gfar_private *priv) 34267d993c5fSArseny Solokha { 34277d993c5fSArseny Solokha struct gfar __iomem *regs = priv->gfargrp[0].regs; 34287d993c5fSArseny Solokha u32 tempval; 34297d993c5fSArseny Solokha int i = 0; 34307d993c5fSArseny Solokha 34317d993c5fSArseny Solokha /* Enable Rx hw queues */ 34327d993c5fSArseny Solokha gfar_write(®s->rqueue, priv->rqueue); 34337d993c5fSArseny Solokha 34347d993c5fSArseny Solokha /* Initialize DMACTRL to have WWR and WOP */ 34357d993c5fSArseny Solokha tempval = gfar_read(®s->dmactrl); 34367d993c5fSArseny Solokha tempval |= DMACTRL_INIT_SETTINGS; 34377d993c5fSArseny Solokha gfar_write(®s->dmactrl, tempval); 34387d993c5fSArseny Solokha 34397d993c5fSArseny Solokha /* Make sure we aren't stopped */ 34407d993c5fSArseny Solokha tempval = gfar_read(®s->dmactrl); 34417d993c5fSArseny Solokha tempval &= ~DMACTRL_GRS; 34427d993c5fSArseny Solokha gfar_write(®s->dmactrl, tempval); 34437d993c5fSArseny Solokha 34447d993c5fSArseny Solokha for (i = 0; i < priv->num_grps; i++) { 34457d993c5fSArseny Solokha regs = priv->gfargrp[i].regs; 34467d993c5fSArseny Solokha /* Clear RHLT, so that the DMA starts polling now */ 34477d993c5fSArseny Solokha gfar_write(®s->rstat, priv->gfargrp[i].rstat); 34487d993c5fSArseny Solokha /* enable the Filer General Purpose Interrupt */ 34497d993c5fSArseny Solokha gfar_write(®s->imask, IMASK_FGPI); 34507d993c5fSArseny Solokha } 34517d993c5fSArseny Solokha 34527d993c5fSArseny Solokha /* Enable Rx DMA */ 34537d993c5fSArseny Solokha tempval = gfar_read(®s->maccfg1); 34547d993c5fSArseny Solokha tempval |= MACCFG1_RX_EN; 34557d993c5fSArseny Solokha gfar_write(®s->maccfg1, tempval); 34567d993c5fSArseny Solokha } 34577d993c5fSArseny Solokha 34587d993c5fSArseny Solokha static int gfar_suspend(struct device *dev) 34597d993c5fSArseny Solokha { 34607d993c5fSArseny Solokha struct gfar_private *priv = dev_get_drvdata(dev); 34617d993c5fSArseny Solokha struct net_device *ndev = priv->ndev; 34627d993c5fSArseny Solokha struct gfar __iomem *regs = priv->gfargrp[0].regs; 34637d993c5fSArseny Solokha u32 tempval; 34647d993c5fSArseny Solokha u16 wol = priv->wol_opts; 34657d993c5fSArseny Solokha 34667d993c5fSArseny Solokha if (!netif_running(ndev)) 34677d993c5fSArseny Solokha return 0; 34687d993c5fSArseny Solokha 34697d993c5fSArseny Solokha disable_napi(priv); 34707d993c5fSArseny Solokha netif_tx_lock(ndev); 34717d993c5fSArseny Solokha netif_device_detach(ndev); 34727d993c5fSArseny Solokha netif_tx_unlock(ndev); 34737d993c5fSArseny Solokha 34747d993c5fSArseny Solokha gfar_halt(priv); 34757d993c5fSArseny Solokha 34767d993c5fSArseny Solokha if (wol & GFAR_WOL_MAGIC) { 34777d993c5fSArseny Solokha /* Enable interrupt on Magic Packet */ 34787d993c5fSArseny Solokha gfar_write(®s->imask, IMASK_MAG); 34797d993c5fSArseny Solokha 34807d993c5fSArseny Solokha /* Enable Magic Packet mode */ 34817d993c5fSArseny Solokha tempval = gfar_read(®s->maccfg2); 34827d993c5fSArseny Solokha tempval |= MACCFG2_MPEN; 34837d993c5fSArseny Solokha gfar_write(®s->maccfg2, tempval); 34847d993c5fSArseny Solokha 34857d993c5fSArseny Solokha /* re-enable the Rx block */ 34867d993c5fSArseny Solokha tempval = gfar_read(®s->maccfg1); 34877d993c5fSArseny Solokha tempval |= MACCFG1_RX_EN; 34887d993c5fSArseny Solokha gfar_write(®s->maccfg1, tempval); 34897d993c5fSArseny Solokha 34907d993c5fSArseny Solokha } else if (wol & GFAR_WOL_FILER_UCAST) { 34917d993c5fSArseny Solokha gfar_filer_config_wol(priv); 34927d993c5fSArseny Solokha gfar_start_wol_filer(priv); 34937d993c5fSArseny Solokha 34947d993c5fSArseny Solokha } else { 34957d993c5fSArseny Solokha phy_stop(ndev->phydev); 34967d993c5fSArseny Solokha } 34977d993c5fSArseny Solokha 34987d993c5fSArseny Solokha return 0; 34997d993c5fSArseny Solokha } 35007d993c5fSArseny Solokha 35017d993c5fSArseny Solokha static int gfar_resume(struct device *dev) 35027d993c5fSArseny Solokha { 35037d993c5fSArseny Solokha struct gfar_private *priv = dev_get_drvdata(dev); 35047d993c5fSArseny Solokha struct net_device *ndev = priv->ndev; 35057d993c5fSArseny Solokha struct gfar __iomem *regs = priv->gfargrp[0].regs; 35067d993c5fSArseny Solokha u32 tempval; 35077d993c5fSArseny Solokha u16 wol = priv->wol_opts; 35087d993c5fSArseny Solokha 35097d993c5fSArseny Solokha if (!netif_running(ndev)) 35107d993c5fSArseny Solokha return 0; 35117d993c5fSArseny Solokha 35127d993c5fSArseny Solokha if (wol & GFAR_WOL_MAGIC) { 35137d993c5fSArseny Solokha /* Disable Magic Packet mode */ 35147d993c5fSArseny Solokha tempval = gfar_read(®s->maccfg2); 35157d993c5fSArseny Solokha tempval &= ~MACCFG2_MPEN; 35167d993c5fSArseny Solokha gfar_write(®s->maccfg2, tempval); 35177d993c5fSArseny Solokha 35187d993c5fSArseny Solokha } else if (wol & GFAR_WOL_FILER_UCAST) { 35197d993c5fSArseny Solokha /* need to stop rx only, tx is already down */ 35207d993c5fSArseny Solokha gfar_halt(priv); 35217d993c5fSArseny Solokha gfar_filer_restore_table(priv); 35227d993c5fSArseny Solokha 35237d993c5fSArseny Solokha } else { 35247d993c5fSArseny Solokha phy_start(ndev->phydev); 35257d993c5fSArseny Solokha } 35267d993c5fSArseny Solokha 35277d993c5fSArseny Solokha gfar_start(priv); 35287d993c5fSArseny Solokha 35297d993c5fSArseny Solokha netif_device_attach(ndev); 35307d993c5fSArseny Solokha enable_napi(priv); 35317d993c5fSArseny Solokha 35327d993c5fSArseny Solokha return 0; 35337d993c5fSArseny Solokha } 35347d993c5fSArseny Solokha 35357d993c5fSArseny Solokha static int gfar_restore(struct device *dev) 35367d993c5fSArseny Solokha { 35377d993c5fSArseny Solokha struct gfar_private *priv = dev_get_drvdata(dev); 35387d993c5fSArseny Solokha struct net_device *ndev = priv->ndev; 35397d993c5fSArseny Solokha 35407d993c5fSArseny Solokha if (!netif_running(ndev)) { 35417d993c5fSArseny Solokha netif_device_attach(ndev); 35427d993c5fSArseny Solokha 35437d993c5fSArseny Solokha return 0; 35447d993c5fSArseny Solokha } 35457d993c5fSArseny Solokha 35467d993c5fSArseny Solokha gfar_init_bds(ndev); 35477d993c5fSArseny Solokha 35487d993c5fSArseny Solokha gfar_mac_reset(priv); 35497d993c5fSArseny Solokha 35507d993c5fSArseny Solokha gfar_init_tx_rx_base(priv); 35517d993c5fSArseny Solokha 35527d993c5fSArseny Solokha gfar_start(priv); 35537d993c5fSArseny Solokha 35546ce29b0eSClaudiu Manoil priv->oldlink = 0; 35556ce29b0eSClaudiu Manoil priv->oldspeed = 0; 35566ce29b0eSClaudiu Manoil priv->oldduplex = -1; 35577d993c5fSArseny Solokha 35587d993c5fSArseny Solokha if (ndev->phydev) 35597d993c5fSArseny Solokha phy_start(ndev->phydev); 35607d993c5fSArseny Solokha 35617d993c5fSArseny Solokha netif_device_attach(ndev); 35627d993c5fSArseny Solokha enable_napi(priv); 35637d993c5fSArseny Solokha 35647d993c5fSArseny Solokha return 0; 35656ce29b0eSClaudiu Manoil } 35666ce29b0eSClaudiu Manoil 35677d993c5fSArseny Solokha static const struct dev_pm_ops gfar_pm_ops = { 35687d993c5fSArseny Solokha .suspend = gfar_suspend, 35697d993c5fSArseny Solokha .resume = gfar_resume, 35707d993c5fSArseny Solokha .freeze = gfar_suspend, 35717d993c5fSArseny Solokha .thaw = gfar_resume, 35727d993c5fSArseny Solokha .restore = gfar_restore, 35737d993c5fSArseny Solokha }; 35747d993c5fSArseny Solokha 35757d993c5fSArseny Solokha #define GFAR_PM_OPS (&gfar_pm_ops) 35767d993c5fSArseny Solokha 35777d993c5fSArseny Solokha #else 35787d993c5fSArseny Solokha 35797d993c5fSArseny Solokha #define GFAR_PM_OPS NULL 35807d993c5fSArseny Solokha 35817d993c5fSArseny Solokha #endif 35826ce29b0eSClaudiu Manoil 358394e5a2a8SFabian Frederick static const struct of_device_id gfar_match[] = 3584ec21e2ecSJeff Kirsher { 3585ec21e2ecSJeff Kirsher { 3586ec21e2ecSJeff Kirsher .type = "network", 3587ec21e2ecSJeff Kirsher .compatible = "gianfar", 3588ec21e2ecSJeff Kirsher }, 3589ec21e2ecSJeff Kirsher { 3590ec21e2ecSJeff Kirsher .compatible = "fsl,etsec2", 3591ec21e2ecSJeff Kirsher }, 3592ec21e2ecSJeff Kirsher {}, 3593ec21e2ecSJeff Kirsher }; 3594ec21e2ecSJeff Kirsher MODULE_DEVICE_TABLE(of, gfar_match); 3595ec21e2ecSJeff Kirsher 3596ec21e2ecSJeff Kirsher /* Structure for a device driver */ 3597ec21e2ecSJeff Kirsher static struct platform_driver gfar_driver = { 3598ec21e2ecSJeff Kirsher .driver = { 3599ec21e2ecSJeff Kirsher .name = "fsl-gianfar", 3600ec21e2ecSJeff Kirsher .pm = GFAR_PM_OPS, 3601ec21e2ecSJeff Kirsher .of_match_table = gfar_match, 3602ec21e2ecSJeff Kirsher }, 3603ec21e2ecSJeff Kirsher .probe = gfar_probe, 3604ec21e2ecSJeff Kirsher .remove = gfar_remove, 3605ec21e2ecSJeff Kirsher }; 3606ec21e2ecSJeff Kirsher 3607db62f684SAxel Lin module_platform_driver(gfar_driver); 3608