xref: /openbmc/linux/drivers/net/ethernet/freescale/gianfar.c (revision 8eda54c5e6c4eb3f3a9b70fdea278f4e0f8496b2)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
20977f817SJan Ceuleers /* drivers/net/ethernet/freescale/gianfar.c
3ec21e2ecSJeff Kirsher  *
4ec21e2ecSJeff Kirsher  * Gianfar Ethernet Driver
5ec21e2ecSJeff Kirsher  * This driver is designed for the non-CPM ethernet controllers
6ec21e2ecSJeff Kirsher  * on the 85xx and 83xx family of integrated processors
7ec21e2ecSJeff Kirsher  * Based on 8260_io/fcc_enet.c
8ec21e2ecSJeff Kirsher  *
9ec21e2ecSJeff Kirsher  * Author: Andy Fleming
10ec21e2ecSJeff Kirsher  * Maintainer: Kumar Gala
11ec21e2ecSJeff Kirsher  * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12ec21e2ecSJeff Kirsher  *
1320862788SClaudiu Manoil  * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
14ec21e2ecSJeff Kirsher  * Copyright 2007 MontaVista Software, Inc.
15ec21e2ecSJeff Kirsher  *
16ec21e2ecSJeff Kirsher  *  Gianfar:  AKA Lambda Draconis, "Dragon"
17ec21e2ecSJeff Kirsher  *  RA 11 31 24.2
18ec21e2ecSJeff Kirsher  *  Dec +69 19 52
19ec21e2ecSJeff Kirsher  *  V 3.84
20ec21e2ecSJeff Kirsher  *  B-V +1.62
21ec21e2ecSJeff Kirsher  *
22ec21e2ecSJeff Kirsher  *  Theory of operation
23ec21e2ecSJeff Kirsher  *
24ec21e2ecSJeff Kirsher  *  The driver is initialized through of_device. Configuration information
25ec21e2ecSJeff Kirsher  *  is therefore conveyed through an OF-style device tree.
26ec21e2ecSJeff Kirsher  *
27ec21e2ecSJeff Kirsher  *  The Gianfar Ethernet Controller uses a ring of buffer
28ec21e2ecSJeff Kirsher  *  descriptors.  The beginning is indicated by a register
29ec21e2ecSJeff Kirsher  *  pointing to the physical address of the start of the ring.
30ec21e2ecSJeff Kirsher  *  The end is determined by a "wrap" bit being set in the
31ec21e2ecSJeff Kirsher  *  last descriptor of the ring.
32ec21e2ecSJeff Kirsher  *
33ec21e2ecSJeff Kirsher  *  When a packet is received, the RXF bit in the
34ec21e2ecSJeff Kirsher  *  IEVENT register is set, triggering an interrupt when the
35ec21e2ecSJeff Kirsher  *  corresponding bit in the IMASK register is also set (if
36ec21e2ecSJeff Kirsher  *  interrupt coalescing is active, then the interrupt may not
37ec21e2ecSJeff Kirsher  *  happen immediately, but will wait until either a set number
38ec21e2ecSJeff Kirsher  *  of frames or amount of time have passed).  In NAPI, the
39ec21e2ecSJeff Kirsher  *  interrupt handler will signal there is work to be done, and
40ec21e2ecSJeff Kirsher  *  exit. This method will start at the last known empty
41ec21e2ecSJeff Kirsher  *  descriptor, and process every subsequent descriptor until there
42ec21e2ecSJeff Kirsher  *  are none left with data (NAPI will stop after a set number of
43ec21e2ecSJeff Kirsher  *  packets to give time to other tasks, but will eventually
44ec21e2ecSJeff Kirsher  *  process all the packets).  The data arrives inside a
45ec21e2ecSJeff Kirsher  *  pre-allocated skb, and so after the skb is passed up to the
46ec21e2ecSJeff Kirsher  *  stack, a new skb must be allocated, and the address field in
47ec21e2ecSJeff Kirsher  *  the buffer descriptor must be updated to indicate this new
48ec21e2ecSJeff Kirsher  *  skb.
49ec21e2ecSJeff Kirsher  *
50ec21e2ecSJeff Kirsher  *  When the kernel requests that a packet be transmitted, the
51ec21e2ecSJeff Kirsher  *  driver starts where it left off last time, and points the
52ec21e2ecSJeff Kirsher  *  descriptor at the buffer which was passed in.  The driver
53ec21e2ecSJeff Kirsher  *  then informs the DMA engine that there are packets ready to
54ec21e2ecSJeff Kirsher  *  be transmitted.  Once the controller is finished transmitting
55ec21e2ecSJeff Kirsher  *  the packet, an interrupt may be triggered (under the same
56ec21e2ecSJeff Kirsher  *  conditions as for reception, but depending on the TXF bit).
57ec21e2ecSJeff Kirsher  *  The driver then cleans up the buffer.
58ec21e2ecSJeff Kirsher  */
59ec21e2ecSJeff Kirsher 
60ec21e2ecSJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
61ec21e2ecSJeff Kirsher 
62ec21e2ecSJeff Kirsher #include <linux/kernel.h>
63ec21e2ecSJeff Kirsher #include <linux/string.h>
64ec21e2ecSJeff Kirsher #include <linux/errno.h>
65ec21e2ecSJeff Kirsher #include <linux/unistd.h>
66ec21e2ecSJeff Kirsher #include <linux/slab.h>
67ec21e2ecSJeff Kirsher #include <linux/interrupt.h>
68ec21e2ecSJeff Kirsher #include <linux/delay.h>
69ec21e2ecSJeff Kirsher #include <linux/netdevice.h>
70ec21e2ecSJeff Kirsher #include <linux/etherdevice.h>
71ec21e2ecSJeff Kirsher #include <linux/skbuff.h>
72ec21e2ecSJeff Kirsher #include <linux/if_vlan.h>
73ec21e2ecSJeff Kirsher #include <linux/spinlock.h>
74ec21e2ecSJeff Kirsher #include <linux/mm.h>
755af50730SRob Herring #include <linux/of_address.h>
765af50730SRob Herring #include <linux/of_irq.h>
77ec21e2ecSJeff Kirsher #include <linux/of_mdio.h>
78ec21e2ecSJeff Kirsher #include <linux/of_platform.h>
79ec21e2ecSJeff Kirsher #include <linux/ip.h>
80ec21e2ecSJeff Kirsher #include <linux/tcp.h>
81ec21e2ecSJeff Kirsher #include <linux/udp.h>
82ec21e2ecSJeff Kirsher #include <linux/in.h>
83ec21e2ecSJeff Kirsher #include <linux/net_tstamp.h>
84ec21e2ecSJeff Kirsher 
85ec21e2ecSJeff Kirsher #include <asm/io.h>
86d6ef0bccSClaudiu Manoil #ifdef CONFIG_PPC
87ec21e2ecSJeff Kirsher #include <asm/reg.h>
882969b1f7SClaudiu Manoil #include <asm/mpc85xx.h>
89d6ef0bccSClaudiu Manoil #endif
90ec21e2ecSJeff Kirsher #include <asm/irq.h>
917c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
92ec21e2ecSJeff Kirsher #include <linux/module.h>
93ec21e2ecSJeff Kirsher #include <linux/dma-mapping.h>
94ec21e2ecSJeff Kirsher #include <linux/crc32.h>
95ec21e2ecSJeff Kirsher #include <linux/mii.h>
96ec21e2ecSJeff Kirsher #include <linux/phy.h>
97ec21e2ecSJeff Kirsher #include <linux/phy_fixed.h>
98ec21e2ecSJeff Kirsher #include <linux/of.h>
99ec21e2ecSJeff Kirsher #include <linux/of_net.h>
100ec21e2ecSJeff Kirsher 
101ec21e2ecSJeff Kirsher #include "gianfar.h"
102ec21e2ecSJeff Kirsher 
1038fcc6033SAbhimanyu #define TX_TIMEOUT      (5*HZ)
104ec21e2ecSJeff Kirsher 
105ec21e2ecSJeff Kirsher MODULE_AUTHOR("Freescale Semiconductor, Inc");
106ec21e2ecSJeff Kirsher MODULE_DESCRIPTION("Gianfar Ethernet Driver");
107ec21e2ecSJeff Kirsher MODULE_LICENSE("GPL");
108ec21e2ecSJeff Kirsher 
109ec21e2ecSJeff Kirsher static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
110ec21e2ecSJeff Kirsher 			    dma_addr_t buf)
111ec21e2ecSJeff Kirsher {
112ec21e2ecSJeff Kirsher 	u32 lstatus;
113ec21e2ecSJeff Kirsher 
114a7312d58SClaudiu Manoil 	bdp->bufPtr = cpu_to_be32(buf);
115ec21e2ecSJeff Kirsher 
116ec21e2ecSJeff Kirsher 	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
117ec21e2ecSJeff Kirsher 	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
118ec21e2ecSJeff Kirsher 		lstatus |= BD_LFLAG(RXBD_WRAP);
119ec21e2ecSJeff Kirsher 
120d55398baSClaudiu Manoil 	gfar_wmb();
121ec21e2ecSJeff Kirsher 
122a7312d58SClaudiu Manoil 	bdp->lstatus = cpu_to_be32(lstatus);
123ec21e2ecSJeff Kirsher }
124ec21e2ecSJeff Kirsher 
125ec21e2ecSJeff Kirsher static void gfar_init_tx_rx_base(struct gfar_private *priv)
126ec21e2ecSJeff Kirsher {
127ec21e2ecSJeff Kirsher 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
128ec21e2ecSJeff Kirsher 	u32 __iomem *baddr;
129ec21e2ecSJeff Kirsher 	int i;
130ec21e2ecSJeff Kirsher 
131ec21e2ecSJeff Kirsher 	baddr = &regs->tbase0;
132ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_tx_queues; i++) {
133ec21e2ecSJeff Kirsher 		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
134ec21e2ecSJeff Kirsher 		baddr += 2;
135ec21e2ecSJeff Kirsher 	}
136ec21e2ecSJeff Kirsher 
137ec21e2ecSJeff Kirsher 	baddr = &regs->rbase0;
138ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_rx_queues; i++) {
139ec21e2ecSJeff Kirsher 		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
140ec21e2ecSJeff Kirsher 		baddr += 2;
141ec21e2ecSJeff Kirsher 	}
142ec21e2ecSJeff Kirsher }
143ec21e2ecSJeff Kirsher 
14445b679c9SMatei Pavaluca static void gfar_init_rqprm(struct gfar_private *priv)
14545b679c9SMatei Pavaluca {
14645b679c9SMatei Pavaluca 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
14745b679c9SMatei Pavaluca 	u32 __iomem *baddr;
14845b679c9SMatei Pavaluca 	int i;
14945b679c9SMatei Pavaluca 
15045b679c9SMatei Pavaluca 	baddr = &regs->rqprm0;
15145b679c9SMatei Pavaluca 	for (i = 0; i < priv->num_rx_queues; i++) {
15245b679c9SMatei Pavaluca 		gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
15345b679c9SMatei Pavaluca 			   (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
15445b679c9SMatei Pavaluca 		baddr++;
15545b679c9SMatei Pavaluca 	}
15645b679c9SMatei Pavaluca }
15745b679c9SMatei Pavaluca 
15875354148SClaudiu Manoil static void gfar_rx_offload_en(struct gfar_private *priv)
15988302648SClaudiu Manoil {
16088302648SClaudiu Manoil 	/* set this when rx hw offload (TOE) functions are being used */
16188302648SClaudiu Manoil 	priv->uses_rxfcb = 0;
16288302648SClaudiu Manoil 
16388302648SClaudiu Manoil 	if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
16488302648SClaudiu Manoil 		priv->uses_rxfcb = 1;
16588302648SClaudiu Manoil 
16615bf176dSClaudiu Manoil 	if (priv->hwts_rx_en || priv->rx_filer_enable)
16788302648SClaudiu Manoil 		priv->uses_rxfcb = 1;
16888302648SClaudiu Manoil }
16988302648SClaudiu Manoil 
170a328ac92SClaudiu Manoil static void gfar_mac_rx_config(struct gfar_private *priv)
171ec21e2ecSJeff Kirsher {
172ec21e2ecSJeff Kirsher 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
173ec21e2ecSJeff Kirsher 	u32 rctrl = 0;
174ec21e2ecSJeff Kirsher 
175ec21e2ecSJeff Kirsher 	if (priv->rx_filer_enable) {
17615bf176dSClaudiu Manoil 		rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
177ec21e2ecSJeff Kirsher 		/* Program the RIR0 reg with the required distribution */
17871ff9e3dSClaudiu Manoil 		gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
179ec21e2ecSJeff Kirsher 	}
180ec21e2ecSJeff Kirsher 
181f5ae6279SClaudiu Manoil 	/* Restore PROMISC mode */
182a328ac92SClaudiu Manoil 	if (priv->ndev->flags & IFF_PROMISC)
183f5ae6279SClaudiu Manoil 		rctrl |= RCTRL_PROM;
184f5ae6279SClaudiu Manoil 
18588302648SClaudiu Manoil 	if (priv->ndev->features & NETIF_F_RXCSUM)
186ec21e2ecSJeff Kirsher 		rctrl |= RCTRL_CHECKSUMMING;
187ec21e2ecSJeff Kirsher 
18888302648SClaudiu Manoil 	if (priv->extended_hash)
18988302648SClaudiu Manoil 		rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
190ec21e2ecSJeff Kirsher 
191ec21e2ecSJeff Kirsher 	if (priv->padding) {
192ec21e2ecSJeff Kirsher 		rctrl &= ~RCTRL_PAL_MASK;
193ec21e2ecSJeff Kirsher 		rctrl |= RCTRL_PADDING(priv->padding);
194ec21e2ecSJeff Kirsher 	}
195ec21e2ecSJeff Kirsher 
196ec21e2ecSJeff Kirsher 	/* Enable HW time stamping if requested from user space */
19788302648SClaudiu Manoil 	if (priv->hwts_rx_en)
198ec21e2ecSJeff Kirsher 		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
199ec21e2ecSJeff Kirsher 
20088302648SClaudiu Manoil 	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
201ec21e2ecSJeff Kirsher 		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
202ec21e2ecSJeff Kirsher 
20345b679c9SMatei Pavaluca 	/* Clear the LFC bit */
20445b679c9SMatei Pavaluca 	gfar_write(&regs->rctrl, rctrl);
20545b679c9SMatei Pavaluca 	/* Init flow control threshold values */
20645b679c9SMatei Pavaluca 	gfar_init_rqprm(priv);
20745b679c9SMatei Pavaluca 	gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
20845b679c9SMatei Pavaluca 	rctrl |= RCTRL_LFC;
20945b679c9SMatei Pavaluca 
210ec21e2ecSJeff Kirsher 	/* Init rctrl based on our settings */
211ec21e2ecSJeff Kirsher 	gfar_write(&regs->rctrl, rctrl);
212a328ac92SClaudiu Manoil }
213ec21e2ecSJeff Kirsher 
214a328ac92SClaudiu Manoil static void gfar_mac_tx_config(struct gfar_private *priv)
215a328ac92SClaudiu Manoil {
216a328ac92SClaudiu Manoil 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
217a328ac92SClaudiu Manoil 	u32 tctrl = 0;
218a328ac92SClaudiu Manoil 
219a328ac92SClaudiu Manoil 	if (priv->ndev->features & NETIF_F_IP_CSUM)
220ec21e2ecSJeff Kirsher 		tctrl |= TCTRL_INIT_CSUM;
221ec21e2ecSJeff Kirsher 
222b98b8babSClaudiu Manoil 	if (priv->prio_sched_en)
223ec21e2ecSJeff Kirsher 		tctrl |= TCTRL_TXSCHED_PRIO;
224b98b8babSClaudiu Manoil 	else {
225b98b8babSClaudiu Manoil 		tctrl |= TCTRL_TXSCHED_WRRS;
226b98b8babSClaudiu Manoil 		gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
227b98b8babSClaudiu Manoil 		gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
228b98b8babSClaudiu Manoil 	}
229ec21e2ecSJeff Kirsher 
23088302648SClaudiu Manoil 	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
23188302648SClaudiu Manoil 		tctrl |= TCTRL_VLINS;
23288302648SClaudiu Manoil 
233ec21e2ecSJeff Kirsher 	gfar_write(&regs->tctrl, tctrl);
234ec21e2ecSJeff Kirsher }
235ec21e2ecSJeff Kirsher 
236f19015baSClaudiu Manoil static void gfar_configure_coalescing(struct gfar_private *priv,
237f19015baSClaudiu Manoil 			       unsigned long tx_mask, unsigned long rx_mask)
238f19015baSClaudiu Manoil {
239f19015baSClaudiu Manoil 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
240f19015baSClaudiu Manoil 	u32 __iomem *baddr;
241f19015baSClaudiu Manoil 
242f19015baSClaudiu Manoil 	if (priv->mode == MQ_MG_MODE) {
243f19015baSClaudiu Manoil 		int i = 0;
244f19015baSClaudiu Manoil 
245f19015baSClaudiu Manoil 		baddr = &regs->txic0;
246f19015baSClaudiu Manoil 		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
247f19015baSClaudiu Manoil 			gfar_write(baddr + i, 0);
248f19015baSClaudiu Manoil 			if (likely(priv->tx_queue[i]->txcoalescing))
249f19015baSClaudiu Manoil 				gfar_write(baddr + i, priv->tx_queue[i]->txic);
250f19015baSClaudiu Manoil 		}
251f19015baSClaudiu Manoil 
252f19015baSClaudiu Manoil 		baddr = &regs->rxic0;
253f19015baSClaudiu Manoil 		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
254f19015baSClaudiu Manoil 			gfar_write(baddr + i, 0);
255f19015baSClaudiu Manoil 			if (likely(priv->rx_queue[i]->rxcoalescing))
256f19015baSClaudiu Manoil 				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
257f19015baSClaudiu Manoil 		}
258f19015baSClaudiu Manoil 	} else {
259f19015baSClaudiu Manoil 		/* Backward compatible case -- even if we enable
260f19015baSClaudiu Manoil 		 * multiple queues, there's only single reg to program
261f19015baSClaudiu Manoil 		 */
262f19015baSClaudiu Manoil 		gfar_write(&regs->txic, 0);
263f19015baSClaudiu Manoil 		if (likely(priv->tx_queue[0]->txcoalescing))
264f19015baSClaudiu Manoil 			gfar_write(&regs->txic, priv->tx_queue[0]->txic);
265f19015baSClaudiu Manoil 
266f19015baSClaudiu Manoil 		gfar_write(&regs->rxic, 0);
267f19015baSClaudiu Manoil 		if (unlikely(priv->rx_queue[0]->rxcoalescing))
268f19015baSClaudiu Manoil 			gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
269f19015baSClaudiu Manoil 	}
270f19015baSClaudiu Manoil }
271f19015baSClaudiu Manoil 
2727ad38784SArseny Solokha static void gfar_configure_coalescing_all(struct gfar_private *priv)
273f19015baSClaudiu Manoil {
274f19015baSClaudiu Manoil 	gfar_configure_coalescing(priv, 0xFF, 0xFF);
275f19015baSClaudiu Manoil }
276f19015baSClaudiu Manoil 
277ec21e2ecSJeff Kirsher static struct net_device_stats *gfar_get_stats(struct net_device *dev)
278ec21e2ecSJeff Kirsher {
279ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
280ec21e2ecSJeff Kirsher 	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
281ec21e2ecSJeff Kirsher 	unsigned long tx_packets = 0, tx_bytes = 0;
2823a2e16c8SJan Ceuleers 	int i;
283ec21e2ecSJeff Kirsher 
284ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_rx_queues; i++) {
285ec21e2ecSJeff Kirsher 		rx_packets += priv->rx_queue[i]->stats.rx_packets;
286ec21e2ecSJeff Kirsher 		rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
287ec21e2ecSJeff Kirsher 		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
288ec21e2ecSJeff Kirsher 	}
289ec21e2ecSJeff Kirsher 
290ec21e2ecSJeff Kirsher 	dev->stats.rx_packets = rx_packets;
291ec21e2ecSJeff Kirsher 	dev->stats.rx_bytes   = rx_bytes;
292ec21e2ecSJeff Kirsher 	dev->stats.rx_dropped = rx_dropped;
293ec21e2ecSJeff Kirsher 
294ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_tx_queues; i++) {
295ec21e2ecSJeff Kirsher 		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
296ec21e2ecSJeff Kirsher 		tx_packets += priv->tx_queue[i]->stats.tx_packets;
297ec21e2ecSJeff Kirsher 	}
298ec21e2ecSJeff Kirsher 
299ec21e2ecSJeff Kirsher 	dev->stats.tx_bytes   = tx_bytes;
300ec21e2ecSJeff Kirsher 	dev->stats.tx_packets = tx_packets;
301ec21e2ecSJeff Kirsher 
302ec21e2ecSJeff Kirsher 	return &dev->stats;
303ec21e2ecSJeff Kirsher }
304ec21e2ecSJeff Kirsher 
3057d993c5fSArseny Solokha /* Set the appropriate hash bit for the given addr */
3067d993c5fSArseny Solokha /* The algorithm works like so:
3077d993c5fSArseny Solokha  * 1) Take the Destination Address (ie the multicast address), and
3087d993c5fSArseny Solokha  * do a CRC on it (little endian), and reverse the bits of the
3097d993c5fSArseny Solokha  * result.
3107d993c5fSArseny Solokha  * 2) Use the 8 most significant bits as a hash into a 256-entry
3117d993c5fSArseny Solokha  * table.  The table is controlled through 8 32-bit registers:
3127d993c5fSArseny Solokha  * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
3137d993c5fSArseny Solokha  * gaddr7.  This means that the 3 most significant bits in the
3147d993c5fSArseny Solokha  * hash index which gaddr register to use, and the 5 other bits
3157d993c5fSArseny Solokha  * indicate which bit (assuming an IBM numbering scheme, which
3167d993c5fSArseny Solokha  * for PowerPC (tm) is usually the case) in the register holds
3177d993c5fSArseny Solokha  * the entry.
3187d993c5fSArseny Solokha  */
3197d993c5fSArseny Solokha static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3207d993c5fSArseny Solokha {
3217d993c5fSArseny Solokha 	u32 tempval;
3227d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
3237d993c5fSArseny Solokha 	u32 result = ether_crc(ETH_ALEN, addr);
3247d993c5fSArseny Solokha 	int width = priv->hash_width;
3257d993c5fSArseny Solokha 	u8 whichbit = (result >> (32 - width)) & 0x1f;
3267d993c5fSArseny Solokha 	u8 whichreg = result >> (32 - width + 5);
3277d993c5fSArseny Solokha 	u32 value = (1 << (31-whichbit));
3287d993c5fSArseny Solokha 
3297d993c5fSArseny Solokha 	tempval = gfar_read(priv->hash_regs[whichreg]);
3307d993c5fSArseny Solokha 	tempval |= value;
3317d993c5fSArseny Solokha 	gfar_write(priv->hash_regs[whichreg], tempval);
3327d993c5fSArseny Solokha }
3337d993c5fSArseny Solokha 
3347d993c5fSArseny Solokha /* There are multiple MAC Address register pairs on some controllers
3357d993c5fSArseny Solokha  * This function sets the numth pair to a given address
3367d993c5fSArseny Solokha  */
3377d993c5fSArseny Solokha static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3387d993c5fSArseny Solokha 				  const u8 *addr)
3397d993c5fSArseny Solokha {
3407d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
3417d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3427d993c5fSArseny Solokha 	u32 tempval;
3437d993c5fSArseny Solokha 	u32 __iomem *macptr = &regs->macstnaddr1;
3447d993c5fSArseny Solokha 
3457d993c5fSArseny Solokha 	macptr += num*2;
3467d993c5fSArseny Solokha 
3477d993c5fSArseny Solokha 	/* For a station address of 0x12345678ABCD in transmission
3487d993c5fSArseny Solokha 	 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
3497d993c5fSArseny Solokha 	 * MACnADDR2 is set to 0x34120000.
3507d993c5fSArseny Solokha 	 */
3517d993c5fSArseny Solokha 	tempval = (addr[5] << 24) | (addr[4] << 16) |
3527d993c5fSArseny Solokha 		  (addr[3] << 8)  |  addr[2];
3537d993c5fSArseny Solokha 
3547d993c5fSArseny Solokha 	gfar_write(macptr, tempval);
3557d993c5fSArseny Solokha 
3567d993c5fSArseny Solokha 	tempval = (addr[1] << 24) | (addr[0] << 16);
3577d993c5fSArseny Solokha 
3587d993c5fSArseny Solokha 	gfar_write(macptr+1, tempval);
3597d993c5fSArseny Solokha }
3607d993c5fSArseny Solokha 
3613d23a05cSClaudiu Manoil static int gfar_set_mac_addr(struct net_device *dev, void *p)
3623d23a05cSClaudiu Manoil {
363bff5b625SClaudiu Manoil 	int ret;
364bff5b625SClaudiu Manoil 
365bff5b625SClaudiu Manoil 	ret = eth_mac_addr(dev, p);
366bff5b625SClaudiu Manoil 	if (ret)
367bff5b625SClaudiu Manoil 		return ret;
3683d23a05cSClaudiu Manoil 
3693d23a05cSClaudiu Manoil 	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
3703d23a05cSClaudiu Manoil 
3713d23a05cSClaudiu Manoil 	return 0;
3723d23a05cSClaudiu Manoil }
3733d23a05cSClaudiu Manoil 
374efeddce7SClaudiu Manoil static void gfar_ints_disable(struct gfar_private *priv)
375efeddce7SClaudiu Manoil {
376efeddce7SClaudiu Manoil 	int i;
377efeddce7SClaudiu Manoil 	for (i = 0; i < priv->num_grps; i++) {
378efeddce7SClaudiu Manoil 		struct gfar __iomem *regs = priv->gfargrp[i].regs;
379efeddce7SClaudiu Manoil 		/* Clear IEVENT */
380efeddce7SClaudiu Manoil 		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
381efeddce7SClaudiu Manoil 
382efeddce7SClaudiu Manoil 		/* Initialize IMASK */
383efeddce7SClaudiu Manoil 		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
384efeddce7SClaudiu Manoil 	}
385efeddce7SClaudiu Manoil }
386efeddce7SClaudiu Manoil 
387efeddce7SClaudiu Manoil static void gfar_ints_enable(struct gfar_private *priv)
388efeddce7SClaudiu Manoil {
389efeddce7SClaudiu Manoil 	int i;
390efeddce7SClaudiu Manoil 	for (i = 0; i < priv->num_grps; i++) {
391efeddce7SClaudiu Manoil 		struct gfar __iomem *regs = priv->gfargrp[i].regs;
392efeddce7SClaudiu Manoil 		/* Unmask the interrupts we look for */
393efeddce7SClaudiu Manoil 		gfar_write(&regs->imask, IMASK_DEFAULT);
394efeddce7SClaudiu Manoil 	}
395efeddce7SClaudiu Manoil }
396efeddce7SClaudiu Manoil 
39720862788SClaudiu Manoil static int gfar_alloc_tx_queues(struct gfar_private *priv)
39820862788SClaudiu Manoil {
39920862788SClaudiu Manoil 	int i;
40020862788SClaudiu Manoil 
40120862788SClaudiu Manoil 	for (i = 0; i < priv->num_tx_queues; i++) {
40220862788SClaudiu Manoil 		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
40320862788SClaudiu Manoil 					    GFP_KERNEL);
40420862788SClaudiu Manoil 		if (!priv->tx_queue[i])
40520862788SClaudiu Manoil 			return -ENOMEM;
40620862788SClaudiu Manoil 
40720862788SClaudiu Manoil 		priv->tx_queue[i]->tx_skbuff = NULL;
40820862788SClaudiu Manoil 		priv->tx_queue[i]->qindex = i;
40920862788SClaudiu Manoil 		priv->tx_queue[i]->dev = priv->ndev;
41020862788SClaudiu Manoil 		spin_lock_init(&(priv->tx_queue[i]->txlock));
41120862788SClaudiu Manoil 	}
41220862788SClaudiu Manoil 	return 0;
41320862788SClaudiu Manoil }
41420862788SClaudiu Manoil 
41520862788SClaudiu Manoil static int gfar_alloc_rx_queues(struct gfar_private *priv)
41620862788SClaudiu Manoil {
41720862788SClaudiu Manoil 	int i;
41820862788SClaudiu Manoil 
41920862788SClaudiu Manoil 	for (i = 0; i < priv->num_rx_queues; i++) {
42020862788SClaudiu Manoil 		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
42120862788SClaudiu Manoil 					    GFP_KERNEL);
42220862788SClaudiu Manoil 		if (!priv->rx_queue[i])
42320862788SClaudiu Manoil 			return -ENOMEM;
42420862788SClaudiu Manoil 
42520862788SClaudiu Manoil 		priv->rx_queue[i]->qindex = i;
426f23223f1SClaudiu Manoil 		priv->rx_queue[i]->ndev = priv->ndev;
42720862788SClaudiu Manoil 	}
42820862788SClaudiu Manoil 	return 0;
42920862788SClaudiu Manoil }
43020862788SClaudiu Manoil 
43120862788SClaudiu Manoil static void gfar_free_tx_queues(struct gfar_private *priv)
432ec21e2ecSJeff Kirsher {
4333a2e16c8SJan Ceuleers 	int i;
434ec21e2ecSJeff Kirsher 
435ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_tx_queues; i++)
436ec21e2ecSJeff Kirsher 		kfree(priv->tx_queue[i]);
437ec21e2ecSJeff Kirsher }
438ec21e2ecSJeff Kirsher 
43920862788SClaudiu Manoil static void gfar_free_rx_queues(struct gfar_private *priv)
440ec21e2ecSJeff Kirsher {
4413a2e16c8SJan Ceuleers 	int i;
442ec21e2ecSJeff Kirsher 
443ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_rx_queues; i++)
444ec21e2ecSJeff Kirsher 		kfree(priv->rx_queue[i]);
445ec21e2ecSJeff Kirsher }
446ec21e2ecSJeff Kirsher 
447ec21e2ecSJeff Kirsher static void unmap_group_regs(struct gfar_private *priv)
448ec21e2ecSJeff Kirsher {
4493a2e16c8SJan Ceuleers 	int i;
450ec21e2ecSJeff Kirsher 
451ec21e2ecSJeff Kirsher 	for (i = 0; i < MAXGROUPS; i++)
452ec21e2ecSJeff Kirsher 		if (priv->gfargrp[i].regs)
453ec21e2ecSJeff Kirsher 			iounmap(priv->gfargrp[i].regs);
454ec21e2ecSJeff Kirsher }
455ec21e2ecSJeff Kirsher 
456ee873fdaSClaudiu Manoil static void free_gfar_dev(struct gfar_private *priv)
457ee873fdaSClaudiu Manoil {
458ee873fdaSClaudiu Manoil 	int i, j;
459ee873fdaSClaudiu Manoil 
460ee873fdaSClaudiu Manoil 	for (i = 0; i < priv->num_grps; i++)
461ee873fdaSClaudiu Manoil 		for (j = 0; j < GFAR_NUM_IRQS; j++) {
462ee873fdaSClaudiu Manoil 			kfree(priv->gfargrp[i].irqinfo[j]);
463ee873fdaSClaudiu Manoil 			priv->gfargrp[i].irqinfo[j] = NULL;
464ee873fdaSClaudiu Manoil 		}
465ee873fdaSClaudiu Manoil 
466ee873fdaSClaudiu Manoil 	free_netdev(priv->ndev);
467ee873fdaSClaudiu Manoil }
468ee873fdaSClaudiu Manoil 
469ec21e2ecSJeff Kirsher static void disable_napi(struct gfar_private *priv)
470ec21e2ecSJeff Kirsher {
4713a2e16c8SJan Ceuleers 	int i;
472ec21e2ecSJeff Kirsher 
473aeb12c5eSClaudiu Manoil 	for (i = 0; i < priv->num_grps; i++) {
474aeb12c5eSClaudiu Manoil 		napi_disable(&priv->gfargrp[i].napi_rx);
475aeb12c5eSClaudiu Manoil 		napi_disable(&priv->gfargrp[i].napi_tx);
476aeb12c5eSClaudiu Manoil 	}
477ec21e2ecSJeff Kirsher }
478ec21e2ecSJeff Kirsher 
479ec21e2ecSJeff Kirsher static void enable_napi(struct gfar_private *priv)
480ec21e2ecSJeff Kirsher {
4813a2e16c8SJan Ceuleers 	int i;
482ec21e2ecSJeff Kirsher 
483aeb12c5eSClaudiu Manoil 	for (i = 0; i < priv->num_grps; i++) {
484aeb12c5eSClaudiu Manoil 		napi_enable(&priv->gfargrp[i].napi_rx);
485aeb12c5eSClaudiu Manoil 		napi_enable(&priv->gfargrp[i].napi_tx);
486aeb12c5eSClaudiu Manoil 	}
487ec21e2ecSJeff Kirsher }
488ec21e2ecSJeff Kirsher 
489ec21e2ecSJeff Kirsher static int gfar_parse_group(struct device_node *np,
490ec21e2ecSJeff Kirsher 			    struct gfar_private *priv, const char *model)
491ec21e2ecSJeff Kirsher {
4925fedcc14SClaudiu Manoil 	struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
493ee873fdaSClaudiu Manoil 	int i;
494ee873fdaSClaudiu Manoil 
495ee873fdaSClaudiu Manoil 	for (i = 0; i < GFAR_NUM_IRQS; i++) {
496ee873fdaSClaudiu Manoil 		grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
497ee873fdaSClaudiu Manoil 					  GFP_KERNEL);
498ee873fdaSClaudiu Manoil 		if (!grp->irqinfo[i])
499ee873fdaSClaudiu Manoil 			return -ENOMEM;
500ee873fdaSClaudiu Manoil 	}
501ec21e2ecSJeff Kirsher 
5025fedcc14SClaudiu Manoil 	grp->regs = of_iomap(np, 0);
5035fedcc14SClaudiu Manoil 	if (!grp->regs)
504ec21e2ecSJeff Kirsher 		return -ENOMEM;
505ec21e2ecSJeff Kirsher 
506ee873fdaSClaudiu Manoil 	gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
507ec21e2ecSJeff Kirsher 
508ec21e2ecSJeff Kirsher 	/* If we aren't the FEC we have multiple interrupts */
509ec21e2ecSJeff Kirsher 	if (model && strcasecmp(model, "FEC")) {
510ee873fdaSClaudiu Manoil 		gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
511ee873fdaSClaudiu Manoil 		gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
512fea0f665SMark Brown 		if (!gfar_irq(grp, TX)->irq ||
513fea0f665SMark Brown 		    !gfar_irq(grp, RX)->irq ||
514fea0f665SMark Brown 		    !gfar_irq(grp, ER)->irq)
515ec21e2ecSJeff Kirsher 			return -EINVAL;
516ec21e2ecSJeff Kirsher 	}
517ec21e2ecSJeff Kirsher 
5185fedcc14SClaudiu Manoil 	grp->priv = priv;
5195fedcc14SClaudiu Manoil 	spin_lock_init(&grp->grplock);
520ec21e2ecSJeff Kirsher 	if (priv->mode == MQ_MG_MODE) {
52171ff9e3dSClaudiu Manoil 		/* One Q per interrupt group: Q0 to G0, Q1 to G1 */
52271ff9e3dSClaudiu Manoil 		grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
52371ff9e3dSClaudiu Manoil 		grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
524ec21e2ecSJeff Kirsher 	} else {
5255fedcc14SClaudiu Manoil 		grp->rx_bit_map = 0xFF;
5265fedcc14SClaudiu Manoil 		grp->tx_bit_map = 0xFF;
527ec21e2ecSJeff Kirsher 	}
52820862788SClaudiu Manoil 
52920862788SClaudiu Manoil 	/* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
53020862788SClaudiu Manoil 	 * right to left, so we need to revert the 8 bits to get the q index
53120862788SClaudiu Manoil 	 */
53220862788SClaudiu Manoil 	grp->rx_bit_map = bitrev8(grp->rx_bit_map);
53320862788SClaudiu Manoil 	grp->tx_bit_map = bitrev8(grp->tx_bit_map);
53420862788SClaudiu Manoil 
53520862788SClaudiu Manoil 	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
53620862788SClaudiu Manoil 	 * also assign queues to groups
53720862788SClaudiu Manoil 	 */
53820862788SClaudiu Manoil 	for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
53971ff9e3dSClaudiu Manoil 		if (!grp->rx_queue)
54071ff9e3dSClaudiu Manoil 			grp->rx_queue = priv->rx_queue[i];
54120862788SClaudiu Manoil 		grp->num_rx_queues++;
54220862788SClaudiu Manoil 		grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
54320862788SClaudiu Manoil 		priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
54420862788SClaudiu Manoil 		priv->rx_queue[i]->grp = grp;
54520862788SClaudiu Manoil 	}
54620862788SClaudiu Manoil 
54720862788SClaudiu Manoil 	for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
54871ff9e3dSClaudiu Manoil 		if (!grp->tx_queue)
54971ff9e3dSClaudiu Manoil 			grp->tx_queue = priv->tx_queue[i];
55020862788SClaudiu Manoil 		grp->num_tx_queues++;
55120862788SClaudiu Manoil 		grp->tstat |= (TSTAT_CLEAR_THALT >> i);
55220862788SClaudiu Manoil 		priv->tqueue |= (TQUEUE_EN0 >> i);
55320862788SClaudiu Manoil 		priv->tx_queue[i]->grp = grp;
55420862788SClaudiu Manoil 	}
55520862788SClaudiu Manoil 
556ec21e2ecSJeff Kirsher 	priv->num_grps++;
557ec21e2ecSJeff Kirsher 
558ec21e2ecSJeff Kirsher 	return 0;
559ec21e2ecSJeff Kirsher }
560ec21e2ecSJeff Kirsher 
561f50724cdSTobias Waldekranz static int gfar_of_group_count(struct device_node *np)
562f50724cdSTobias Waldekranz {
563f50724cdSTobias Waldekranz 	struct device_node *child;
564f50724cdSTobias Waldekranz 	int num = 0;
565f50724cdSTobias Waldekranz 
566f50724cdSTobias Waldekranz 	for_each_available_child_of_node(np, child)
567bf5849f1SRob Herring 		if (of_node_name_eq(child, "queue-group"))
568f50724cdSTobias Waldekranz 			num++;
569f50724cdSTobias Waldekranz 
570f50724cdSTobias Waldekranz 	return num;
571f50724cdSTobias Waldekranz }
572f50724cdSTobias Waldekranz 
5737d993c5fSArseny Solokha /* Reads the controller's registers to determine what interface
5747d993c5fSArseny Solokha  * connects it to the PHY.
5757d993c5fSArseny Solokha  */
5767d993c5fSArseny Solokha static phy_interface_t gfar_get_interface(struct net_device *dev)
5777d993c5fSArseny Solokha {
5787d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
5797d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
5807d993c5fSArseny Solokha 	u32 ecntrl;
5817d993c5fSArseny Solokha 
5827d993c5fSArseny Solokha 	ecntrl = gfar_read(&regs->ecntrl);
5837d993c5fSArseny Solokha 
5847d993c5fSArseny Solokha 	if (ecntrl & ECNTRL_SGMII_MODE)
5857d993c5fSArseny Solokha 		return PHY_INTERFACE_MODE_SGMII;
5867d993c5fSArseny Solokha 
5877d993c5fSArseny Solokha 	if (ecntrl & ECNTRL_TBI_MODE) {
5887d993c5fSArseny Solokha 		if (ecntrl & ECNTRL_REDUCED_MODE)
5897d993c5fSArseny Solokha 			return PHY_INTERFACE_MODE_RTBI;
5907d993c5fSArseny Solokha 		else
5917d993c5fSArseny Solokha 			return PHY_INTERFACE_MODE_TBI;
5927d993c5fSArseny Solokha 	}
5937d993c5fSArseny Solokha 
5947d993c5fSArseny Solokha 	if (ecntrl & ECNTRL_REDUCED_MODE) {
5957d993c5fSArseny Solokha 		if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
5967d993c5fSArseny Solokha 			return PHY_INTERFACE_MODE_RMII;
5977d993c5fSArseny Solokha 		}
5987d993c5fSArseny Solokha 		else {
5997d993c5fSArseny Solokha 			phy_interface_t interface = priv->interface;
6007d993c5fSArseny Solokha 
6017d993c5fSArseny Solokha 			/* This isn't autodetected right now, so it must
6027d993c5fSArseny Solokha 			 * be set by the device tree or platform code.
6037d993c5fSArseny Solokha 			 */
6047d993c5fSArseny Solokha 			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
6057d993c5fSArseny Solokha 				return PHY_INTERFACE_MODE_RGMII_ID;
6067d993c5fSArseny Solokha 
6077d993c5fSArseny Solokha 			return PHY_INTERFACE_MODE_RGMII;
6087d993c5fSArseny Solokha 		}
6097d993c5fSArseny Solokha 	}
6107d993c5fSArseny Solokha 
6117d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
6127d993c5fSArseny Solokha 		return PHY_INTERFACE_MODE_GMII;
6137d993c5fSArseny Solokha 
6147d993c5fSArseny Solokha 	return PHY_INTERFACE_MODE_MII;
6157d993c5fSArseny Solokha }
6167d993c5fSArseny Solokha 
617ec21e2ecSJeff Kirsher static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
618ec21e2ecSJeff Kirsher {
619ec21e2ecSJeff Kirsher 	const char *model;
620ec21e2ecSJeff Kirsher 	int err = 0, i;
6210c65b2b9SAndrew Lunn 	phy_interface_t interface;
622ec21e2ecSJeff Kirsher 	struct net_device *dev = NULL;
623ec21e2ecSJeff Kirsher 	struct gfar_private *priv = NULL;
624ec21e2ecSJeff Kirsher 	struct device_node *np = ofdev->dev.of_node;
625ec21e2ecSJeff Kirsher 	struct device_node *child = NULL;
62655917641SJingchang Lu 	u32 stash_len = 0;
62755917641SJingchang Lu 	u32 stash_idx = 0;
628ec21e2ecSJeff Kirsher 	unsigned int num_tx_qs, num_rx_qs;
629*8eda54c5SClaudiu Manoil 	unsigned short mode;
630ec21e2ecSJeff Kirsher 
6314b222ca6SKevin Hao 	if (!np)
632ec21e2ecSJeff Kirsher 		return -ENODEV;
633ec21e2ecSJeff Kirsher 
634*8eda54c5SClaudiu Manoil 	if (of_device_is_compatible(np, "fsl,etsec2"))
635b338ce27SClaudiu Manoil 		mode = MQ_MG_MODE;
636*8eda54c5SClaudiu Manoil 	else
637b338ce27SClaudiu Manoil 		mode = SQ_SG_MODE;
638b338ce27SClaudiu Manoil 
639b338ce27SClaudiu Manoil 	if (mode == SQ_SG_MODE) {
64071ff9e3dSClaudiu Manoil 		num_tx_qs = 1;
64171ff9e3dSClaudiu Manoil 		num_rx_qs = 1;
64271ff9e3dSClaudiu Manoil 	} else { /* MQ_MG_MODE */
643c65d7533SClaudiu Manoil 		/* get the actual number of supported groups */
644f50724cdSTobias Waldekranz 		unsigned int num_grps = gfar_of_group_count(np);
645c65d7533SClaudiu Manoil 
646c65d7533SClaudiu Manoil 		if (num_grps == 0 || num_grps > MAXGROUPS) {
647c65d7533SClaudiu Manoil 			dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
648c65d7533SClaudiu Manoil 				num_grps);
649c65d7533SClaudiu Manoil 			pr_err("Cannot do alloc_etherdev, aborting\n");
650c65d7533SClaudiu Manoil 			return -EINVAL;
651c65d7533SClaudiu Manoil 		}
652c65d7533SClaudiu Manoil 
653c65d7533SClaudiu Manoil 		num_tx_qs = num_grps; /* one txq per int group */
654c65d7533SClaudiu Manoil 		num_rx_qs = num_grps; /* one rxq per int group */
65571ff9e3dSClaudiu Manoil 	}
656ec21e2ecSJeff Kirsher 
657ec21e2ecSJeff Kirsher 	if (num_tx_qs > MAX_TX_QS) {
658ec21e2ecSJeff Kirsher 		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
659ec21e2ecSJeff Kirsher 		       num_tx_qs, MAX_TX_QS);
660ec21e2ecSJeff Kirsher 		pr_err("Cannot do alloc_etherdev, aborting\n");
661ec21e2ecSJeff Kirsher 		return -EINVAL;
662ec21e2ecSJeff Kirsher 	}
663ec21e2ecSJeff Kirsher 
664ec21e2ecSJeff Kirsher 	if (num_rx_qs > MAX_RX_QS) {
665ec21e2ecSJeff Kirsher 		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
666ec21e2ecSJeff Kirsher 		       num_rx_qs, MAX_RX_QS);
667ec21e2ecSJeff Kirsher 		pr_err("Cannot do alloc_etherdev, aborting\n");
668ec21e2ecSJeff Kirsher 		return -EINVAL;
669ec21e2ecSJeff Kirsher 	}
670ec21e2ecSJeff Kirsher 
671ec21e2ecSJeff Kirsher 	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
672ec21e2ecSJeff Kirsher 	dev = *pdev;
673ec21e2ecSJeff Kirsher 	if (NULL == dev)
674ec21e2ecSJeff Kirsher 		return -ENOMEM;
675ec21e2ecSJeff Kirsher 
676ec21e2ecSJeff Kirsher 	priv = netdev_priv(dev);
677ec21e2ecSJeff Kirsher 	priv->ndev = dev;
678ec21e2ecSJeff Kirsher 
679b338ce27SClaudiu Manoil 	priv->mode = mode;
680b338ce27SClaudiu Manoil 
681ec21e2ecSJeff Kirsher 	priv->num_tx_queues = num_tx_qs;
682ec21e2ecSJeff Kirsher 	netif_set_real_num_rx_queues(dev, num_rx_qs);
683ec21e2ecSJeff Kirsher 	priv->num_rx_queues = num_rx_qs;
68420862788SClaudiu Manoil 
68520862788SClaudiu Manoil 	err = gfar_alloc_tx_queues(priv);
68620862788SClaudiu Manoil 	if (err)
68720862788SClaudiu Manoil 		goto tx_alloc_failed;
68820862788SClaudiu Manoil 
68920862788SClaudiu Manoil 	err = gfar_alloc_rx_queues(priv);
69020862788SClaudiu Manoil 	if (err)
69120862788SClaudiu Manoil 		goto rx_alloc_failed;
692ec21e2ecSJeff Kirsher 
69355917641SJingchang Lu 	err = of_property_read_string(np, "model", &model);
69455917641SJingchang Lu 	if (err) {
69555917641SJingchang Lu 		pr_err("Device model property missing, aborting\n");
69655917641SJingchang Lu 		goto rx_alloc_failed;
69755917641SJingchang Lu 	}
69855917641SJingchang Lu 
699ec21e2ecSJeff Kirsher 	/* Init Rx queue filer rule set linked list */
700ec21e2ecSJeff Kirsher 	INIT_LIST_HEAD(&priv->rx_list.list);
701ec21e2ecSJeff Kirsher 	priv->rx_list.count = 0;
702ec21e2ecSJeff Kirsher 	mutex_init(&priv->rx_queue_access);
703ec21e2ecSJeff Kirsher 
704ec21e2ecSJeff Kirsher 	for (i = 0; i < MAXGROUPS; i++)
705ec21e2ecSJeff Kirsher 		priv->gfargrp[i].regs = NULL;
706ec21e2ecSJeff Kirsher 
707ec21e2ecSJeff Kirsher 	/* Parse and initialize group specific information */
708b338ce27SClaudiu Manoil 	if (priv->mode == MQ_MG_MODE) {
709f50724cdSTobias Waldekranz 		for_each_available_child_of_node(np, child) {
710bf5849f1SRob Herring 			if (!of_node_name_eq(child, "queue-group"))
711f50724cdSTobias Waldekranz 				continue;
712f50724cdSTobias Waldekranz 
713ec21e2ecSJeff Kirsher 			err = gfar_parse_group(child, priv, model);
714989e4da0SSumera Priyadarsini 			if (err) {
715989e4da0SSumera Priyadarsini 				of_node_put(child);
716ec21e2ecSJeff Kirsher 				goto err_grp_init;
717ec21e2ecSJeff Kirsher 			}
718989e4da0SSumera Priyadarsini 		}
719b338ce27SClaudiu Manoil 	} else { /* SQ_SG_MODE */
720ec21e2ecSJeff Kirsher 		err = gfar_parse_group(np, priv, model);
721ec21e2ecSJeff Kirsher 		if (err)
722ec21e2ecSJeff Kirsher 			goto err_grp_init;
723ec21e2ecSJeff Kirsher 	}
724ec21e2ecSJeff Kirsher 
7253f8c0f7eSSaurabh Sengar 	if (of_property_read_bool(np, "bd-stash")) {
726ec21e2ecSJeff Kirsher 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
727ec21e2ecSJeff Kirsher 		priv->bd_stash_en = 1;
728ec21e2ecSJeff Kirsher 	}
729ec21e2ecSJeff Kirsher 
73055917641SJingchang Lu 	err = of_property_read_u32(np, "rx-stash-len", &stash_len);
731ec21e2ecSJeff Kirsher 
73255917641SJingchang Lu 	if (err == 0)
73355917641SJingchang Lu 		priv->rx_stash_size = stash_len;
734ec21e2ecSJeff Kirsher 
73555917641SJingchang Lu 	err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
736ec21e2ecSJeff Kirsher 
73755917641SJingchang Lu 	if (err == 0)
73855917641SJingchang Lu 		priv->rx_stash_index = stash_idx;
739ec21e2ecSJeff Kirsher 
740ec21e2ecSJeff Kirsher 	if (stash_len || stash_idx)
741ec21e2ecSJeff Kirsher 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
742ec21e2ecSJeff Kirsher 
74383216e39SMichael Walle 	err = of_get_mac_address(np, dev->dev_addr);
74483216e39SMichael Walle 	if (err) {
745ff021f22SMaxim Kochetkov 		eth_hw_addr_random(dev);
746ff021f22SMaxim Kochetkov 		dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
747ff021f22SMaxim Kochetkov 	}
748ec21e2ecSJeff Kirsher 
749ec21e2ecSJeff Kirsher 	if (model && !strcasecmp(model, "TSEC"))
75034018fd4SClaudiu Manoil 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
751ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_COALESCE |
752ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_RMON |
753ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_MULTI_INTR;
754bc4598bcSJan Ceuleers 
755ec21e2ecSJeff Kirsher 	if (model && !strcasecmp(model, "eTSEC"))
75634018fd4SClaudiu Manoil 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
757ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_COALESCE |
758ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_RMON |
759ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
760ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_CSUM |
761ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_VLAN |
762ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
763ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
7647bff47daSHamish Martin 				     FSL_GIANFAR_DEV_HAS_TIMER |
7657bff47daSHamish Martin 				     FSL_GIANFAR_DEV_HAS_RX_FILER;
766ec21e2ecSJeff Kirsher 
7678e578e73SArseny Solokha 	/* Use PHY connection type from the DT node if one is specified there.
7688e578e73SArseny Solokha 	 * rgmii-id really needs to be specified. Other types can be
7698e578e73SArseny Solokha 	 * detected by hardware
7708e578e73SArseny Solokha 	 */
7710c65b2b9SAndrew Lunn 	err = of_get_phy_mode(np, &interface);
7720c65b2b9SAndrew Lunn 	if (!err)
7730c65b2b9SAndrew Lunn 		priv->interface = interface;
774ec21e2ecSJeff Kirsher 	else
7758e578e73SArseny Solokha 		priv->interface = gfar_get_interface(dev);
776ec21e2ecSJeff Kirsher 
77755917641SJingchang Lu 	if (of_find_property(np, "fsl,magic-packet", NULL))
778ec21e2ecSJeff Kirsher 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
779ec21e2ecSJeff Kirsher 
7803e905b80SClaudiu Manoil 	if (of_get_property(np, "fsl,wake-on-filer", NULL))
7813e905b80SClaudiu Manoil 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
7823e905b80SClaudiu Manoil 
783ec21e2ecSJeff Kirsher 	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
784ec21e2ecSJeff Kirsher 
785be403645SFlorian Fainelli 	/* In the case of a fixed PHY, the DT node associated
786be403645SFlorian Fainelli 	 * to the PHY is the Ethernet MAC DT node.
787be403645SFlorian Fainelli 	 */
7886f2c9bd8SUwe Kleine-König 	if (!priv->phy_node && of_phy_is_fixed_link(np)) {
789be403645SFlorian Fainelli 		err = of_phy_register_fixed_link(np);
790be403645SFlorian Fainelli 		if (err)
791be403645SFlorian Fainelli 			goto err_grp_init;
792be403645SFlorian Fainelli 
7936f2c9bd8SUwe Kleine-König 		priv->phy_node = of_node_get(np);
794be403645SFlorian Fainelli 	}
795be403645SFlorian Fainelli 
796ec21e2ecSJeff Kirsher 	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
797ec21e2ecSJeff Kirsher 	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
798ec21e2ecSJeff Kirsher 
799ec21e2ecSJeff Kirsher 	return 0;
800ec21e2ecSJeff Kirsher 
801ec21e2ecSJeff Kirsher err_grp_init:
802ec21e2ecSJeff Kirsher 	unmap_group_regs(priv);
80320862788SClaudiu Manoil rx_alloc_failed:
80420862788SClaudiu Manoil 	gfar_free_rx_queues(priv);
80520862788SClaudiu Manoil tx_alloc_failed:
80620862788SClaudiu Manoil 	gfar_free_tx_queues(priv);
807ee873fdaSClaudiu Manoil 	free_gfar_dev(priv);
808ec21e2ecSJeff Kirsher 	return err;
809ec21e2ecSJeff Kirsher }
810ec21e2ecSJeff Kirsher 
811ec21e2ecSJeff Kirsher static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
812ec21e2ecSJeff Kirsher 				   u32 class)
813ec21e2ecSJeff Kirsher {
814ec21e2ecSJeff Kirsher 	u32 rqfpr = FPR_FILER_MASK;
815ec21e2ecSJeff Kirsher 	u32 rqfcr = 0x0;
816ec21e2ecSJeff Kirsher 
817ec21e2ecSJeff Kirsher 	rqfar--;
818ec21e2ecSJeff Kirsher 	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
819ec21e2ecSJeff Kirsher 	priv->ftp_rqfpr[rqfar] = rqfpr;
820ec21e2ecSJeff Kirsher 	priv->ftp_rqfcr[rqfar] = rqfcr;
821ec21e2ecSJeff Kirsher 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
822ec21e2ecSJeff Kirsher 
823ec21e2ecSJeff Kirsher 	rqfar--;
824ec21e2ecSJeff Kirsher 	rqfcr = RQFCR_CMP_NOMATCH;
825ec21e2ecSJeff Kirsher 	priv->ftp_rqfpr[rqfar] = rqfpr;
826ec21e2ecSJeff Kirsher 	priv->ftp_rqfcr[rqfar] = rqfcr;
827ec21e2ecSJeff Kirsher 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
828ec21e2ecSJeff Kirsher 
829ec21e2ecSJeff Kirsher 	rqfar--;
830ec21e2ecSJeff Kirsher 	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
831ec21e2ecSJeff Kirsher 	rqfpr = class;
832ec21e2ecSJeff Kirsher 	priv->ftp_rqfcr[rqfar] = rqfcr;
833ec21e2ecSJeff Kirsher 	priv->ftp_rqfpr[rqfar] = rqfpr;
834ec21e2ecSJeff Kirsher 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
835ec21e2ecSJeff Kirsher 
836ec21e2ecSJeff Kirsher 	rqfar--;
837ec21e2ecSJeff Kirsher 	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
838ec21e2ecSJeff Kirsher 	rqfpr = class;
839ec21e2ecSJeff Kirsher 	priv->ftp_rqfcr[rqfar] = rqfcr;
840ec21e2ecSJeff Kirsher 	priv->ftp_rqfpr[rqfar] = rqfpr;
841ec21e2ecSJeff Kirsher 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
842ec21e2ecSJeff Kirsher 
843ec21e2ecSJeff Kirsher 	return rqfar;
844ec21e2ecSJeff Kirsher }
845ec21e2ecSJeff Kirsher 
846ec21e2ecSJeff Kirsher static void gfar_init_filer_table(struct gfar_private *priv)
847ec21e2ecSJeff Kirsher {
848ec21e2ecSJeff Kirsher 	int i = 0x0;
849ec21e2ecSJeff Kirsher 	u32 rqfar = MAX_FILER_IDX;
850ec21e2ecSJeff Kirsher 	u32 rqfcr = 0x0;
851ec21e2ecSJeff Kirsher 	u32 rqfpr = FPR_FILER_MASK;
852ec21e2ecSJeff Kirsher 
853ec21e2ecSJeff Kirsher 	/* Default rule */
854ec21e2ecSJeff Kirsher 	rqfcr = RQFCR_CMP_MATCH;
855ec21e2ecSJeff Kirsher 	priv->ftp_rqfcr[rqfar] = rqfcr;
856ec21e2ecSJeff Kirsher 	priv->ftp_rqfpr[rqfar] = rqfpr;
857ec21e2ecSJeff Kirsher 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
858ec21e2ecSJeff Kirsher 
859ec21e2ecSJeff Kirsher 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
860ec21e2ecSJeff Kirsher 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
861ec21e2ecSJeff Kirsher 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
862ec21e2ecSJeff Kirsher 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
863ec21e2ecSJeff Kirsher 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
864ec21e2ecSJeff Kirsher 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
865ec21e2ecSJeff Kirsher 
866ec21e2ecSJeff Kirsher 	/* cur_filer_idx indicated the first non-masked rule */
867ec21e2ecSJeff Kirsher 	priv->cur_filer_idx = rqfar;
868ec21e2ecSJeff Kirsher 
869ec21e2ecSJeff Kirsher 	/* Rest are masked rules */
870ec21e2ecSJeff Kirsher 	rqfcr = RQFCR_CMP_NOMATCH;
871ec21e2ecSJeff Kirsher 	for (i = 0; i < rqfar; i++) {
872ec21e2ecSJeff Kirsher 		priv->ftp_rqfcr[i] = rqfcr;
873ec21e2ecSJeff Kirsher 		priv->ftp_rqfpr[i] = rqfpr;
874ec21e2ecSJeff Kirsher 		gfar_write_filer(priv, i, rqfcr, rqfpr);
875ec21e2ecSJeff Kirsher 	}
876ec21e2ecSJeff Kirsher }
877ec21e2ecSJeff Kirsher 
878d6ef0bccSClaudiu Manoil #ifdef CONFIG_PPC
8792969b1f7SClaudiu Manoil static void __gfar_detect_errata_83xx(struct gfar_private *priv)
880ec21e2ecSJeff Kirsher {
881ec21e2ecSJeff Kirsher 	unsigned int pvr = mfspr(SPRN_PVR);
882ec21e2ecSJeff Kirsher 	unsigned int svr = mfspr(SPRN_SVR);
883ec21e2ecSJeff Kirsher 	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
884ec21e2ecSJeff Kirsher 	unsigned int rev = svr & 0xffff;
885ec21e2ecSJeff Kirsher 
886ec21e2ecSJeff Kirsher 	/* MPC8313 Rev 2.0 and higher; All MPC837x */
887ec21e2ecSJeff Kirsher 	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
888ec21e2ecSJeff Kirsher 	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
889ec21e2ecSJeff Kirsher 		priv->errata |= GFAR_ERRATA_74;
890ec21e2ecSJeff Kirsher 
891ec21e2ecSJeff Kirsher 	/* MPC8313 and MPC837x all rev */
892ec21e2ecSJeff Kirsher 	if ((pvr == 0x80850010 && mod == 0x80b0) ||
893ec21e2ecSJeff Kirsher 	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
894ec21e2ecSJeff Kirsher 		priv->errata |= GFAR_ERRATA_76;
895ec21e2ecSJeff Kirsher 
8962969b1f7SClaudiu Manoil 	/* MPC8313 Rev < 2.0 */
8972969b1f7SClaudiu Manoil 	if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
898ec21e2ecSJeff Kirsher 		priv->errata |= GFAR_ERRATA_12;
8992969b1f7SClaudiu Manoil }
9002969b1f7SClaudiu Manoil 
9012969b1f7SClaudiu Manoil static void __gfar_detect_errata_85xx(struct gfar_private *priv)
9022969b1f7SClaudiu Manoil {
9032969b1f7SClaudiu Manoil 	unsigned int svr = mfspr(SPRN_SVR);
9042969b1f7SClaudiu Manoil 
9052969b1f7SClaudiu Manoil 	if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
9062969b1f7SClaudiu Manoil 		priv->errata |= GFAR_ERRATA_12;
9077bfc6082SAtsushi Nemoto 	/* P2020/P1010 Rev 1; MPC8548 Rev 2 */
90853fad773SClaudiu Manoil 	if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
9097bfc6082SAtsushi Nemoto 	    ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
9107bfc6082SAtsushi Nemoto 	    ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
91153fad773SClaudiu Manoil 		priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
9122969b1f7SClaudiu Manoil }
913d6ef0bccSClaudiu Manoil #endif
9142969b1f7SClaudiu Manoil 
9152969b1f7SClaudiu Manoil static void gfar_detect_errata(struct gfar_private *priv)
9162969b1f7SClaudiu Manoil {
9172969b1f7SClaudiu Manoil 	struct device *dev = &priv->ofdev->dev;
9182969b1f7SClaudiu Manoil 
9192969b1f7SClaudiu Manoil 	/* no plans to fix */
9202969b1f7SClaudiu Manoil 	priv->errata |= GFAR_ERRATA_A002;
9212969b1f7SClaudiu Manoil 
922d6ef0bccSClaudiu Manoil #ifdef CONFIG_PPC
9232969b1f7SClaudiu Manoil 	if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
9242969b1f7SClaudiu Manoil 		__gfar_detect_errata_85xx(priv);
9252969b1f7SClaudiu Manoil 	else /* non-mpc85xx parts, i.e. e300 core based */
9262969b1f7SClaudiu Manoil 		__gfar_detect_errata_83xx(priv);
927d6ef0bccSClaudiu Manoil #endif
928ec21e2ecSJeff Kirsher 
929ec21e2ecSJeff Kirsher 	if (priv->errata)
930ec21e2ecSJeff Kirsher 		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
931ec21e2ecSJeff Kirsher 			 priv->errata);
932ec21e2ecSJeff Kirsher }
933ec21e2ecSJeff Kirsher 
934898157edSXiubo Li static void gfar_init_addr_hash_table(struct gfar_private *priv)
93520862788SClaudiu Manoil {
93620862788SClaudiu Manoil 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
937ec21e2ecSJeff Kirsher 
938ec21e2ecSJeff Kirsher 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
939ec21e2ecSJeff Kirsher 		priv->extended_hash = 1;
940ec21e2ecSJeff Kirsher 		priv->hash_width = 9;
941ec21e2ecSJeff Kirsher 
942ec21e2ecSJeff Kirsher 		priv->hash_regs[0] = &regs->igaddr0;
943ec21e2ecSJeff Kirsher 		priv->hash_regs[1] = &regs->igaddr1;
944ec21e2ecSJeff Kirsher 		priv->hash_regs[2] = &regs->igaddr2;
945ec21e2ecSJeff Kirsher 		priv->hash_regs[3] = &regs->igaddr3;
946ec21e2ecSJeff Kirsher 		priv->hash_regs[4] = &regs->igaddr4;
947ec21e2ecSJeff Kirsher 		priv->hash_regs[5] = &regs->igaddr5;
948ec21e2ecSJeff Kirsher 		priv->hash_regs[6] = &regs->igaddr6;
949ec21e2ecSJeff Kirsher 		priv->hash_regs[7] = &regs->igaddr7;
950ec21e2ecSJeff Kirsher 		priv->hash_regs[8] = &regs->gaddr0;
951ec21e2ecSJeff Kirsher 		priv->hash_regs[9] = &regs->gaddr1;
952ec21e2ecSJeff Kirsher 		priv->hash_regs[10] = &regs->gaddr2;
953ec21e2ecSJeff Kirsher 		priv->hash_regs[11] = &regs->gaddr3;
954ec21e2ecSJeff Kirsher 		priv->hash_regs[12] = &regs->gaddr4;
955ec21e2ecSJeff Kirsher 		priv->hash_regs[13] = &regs->gaddr5;
956ec21e2ecSJeff Kirsher 		priv->hash_regs[14] = &regs->gaddr6;
957ec21e2ecSJeff Kirsher 		priv->hash_regs[15] = &regs->gaddr7;
958ec21e2ecSJeff Kirsher 
959ec21e2ecSJeff Kirsher 	} else {
960ec21e2ecSJeff Kirsher 		priv->extended_hash = 0;
961ec21e2ecSJeff Kirsher 		priv->hash_width = 8;
962ec21e2ecSJeff Kirsher 
963ec21e2ecSJeff Kirsher 		priv->hash_regs[0] = &regs->gaddr0;
964ec21e2ecSJeff Kirsher 		priv->hash_regs[1] = &regs->gaddr1;
965ec21e2ecSJeff Kirsher 		priv->hash_regs[2] = &regs->gaddr2;
966ec21e2ecSJeff Kirsher 		priv->hash_regs[3] = &regs->gaddr3;
967ec21e2ecSJeff Kirsher 		priv->hash_regs[4] = &regs->gaddr4;
968ec21e2ecSJeff Kirsher 		priv->hash_regs[5] = &regs->gaddr5;
969ec21e2ecSJeff Kirsher 		priv->hash_regs[6] = &regs->gaddr6;
970ec21e2ecSJeff Kirsher 		priv->hash_regs[7] = &regs->gaddr7;
971ec21e2ecSJeff Kirsher 	}
97220862788SClaudiu Manoil }
97320862788SClaudiu Manoil 
974ec21e2ecSJeff Kirsher static int __gfar_is_rx_idle(struct gfar_private *priv)
975ec21e2ecSJeff Kirsher {
976ec21e2ecSJeff Kirsher 	u32 res;
977ec21e2ecSJeff Kirsher 
9780977f817SJan Ceuleers 	/* Normaly TSEC should not hang on GRS commands, so we should
979ec21e2ecSJeff Kirsher 	 * actually wait for IEVENT_GRSC flag.
980ec21e2ecSJeff Kirsher 	 */
981ad3660c2SClaudiu Manoil 	if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
982ec21e2ecSJeff Kirsher 		return 0;
983ec21e2ecSJeff Kirsher 
9840977f817SJan Ceuleers 	/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
985ec21e2ecSJeff Kirsher 	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
986ec21e2ecSJeff Kirsher 	 * and the Rx can be safely reset.
987ec21e2ecSJeff Kirsher 	 */
988ec21e2ecSJeff Kirsher 	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
989ec21e2ecSJeff Kirsher 	res &= 0x7f807f80;
990ec21e2ecSJeff Kirsher 	if ((res & 0xffff) == (res >> 16))
991ec21e2ecSJeff Kirsher 		return 1;
992ec21e2ecSJeff Kirsher 
993ec21e2ecSJeff Kirsher 	return 0;
994ec21e2ecSJeff Kirsher }
995ec21e2ecSJeff Kirsher 
996ec21e2ecSJeff Kirsher /* Halt the receive and transmit queues */
997c10650b6SClaudiu Manoil static void gfar_halt_nodisable(struct gfar_private *priv)
998ec21e2ecSJeff Kirsher {
999efeddce7SClaudiu Manoil 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1000ec21e2ecSJeff Kirsher 	u32 tempval;
1001a4feee89SClaudiu Manoil 	unsigned int timeout;
1002a4feee89SClaudiu Manoil 	int stopped;
1003ec21e2ecSJeff Kirsher 
1004efeddce7SClaudiu Manoil 	gfar_ints_disable(priv);
1005ec21e2ecSJeff Kirsher 
1006a4feee89SClaudiu Manoil 	if (gfar_is_dma_stopped(priv))
1007a4feee89SClaudiu Manoil 		return;
1008a4feee89SClaudiu Manoil 
1009ec21e2ecSJeff Kirsher 	/* Stop the DMA, and wait for it to stop */
1010ec21e2ecSJeff Kirsher 	tempval = gfar_read(&regs->dmactrl);
1011ec21e2ecSJeff Kirsher 	tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1012ec21e2ecSJeff Kirsher 	gfar_write(&regs->dmactrl, tempval);
1013ec21e2ecSJeff Kirsher 
1014a4feee89SClaudiu Manoil retry:
1015a4feee89SClaudiu Manoil 	timeout = 1000;
1016a4feee89SClaudiu Manoil 	while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1017a4feee89SClaudiu Manoil 		cpu_relax();
1018a4feee89SClaudiu Manoil 		timeout--;
1019ec21e2ecSJeff Kirsher 	}
1020a4feee89SClaudiu Manoil 
1021a4feee89SClaudiu Manoil 	if (!timeout)
1022a4feee89SClaudiu Manoil 		stopped = gfar_is_dma_stopped(priv);
1023a4feee89SClaudiu Manoil 
1024a4feee89SClaudiu Manoil 	if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1025a4feee89SClaudiu Manoil 	    !__gfar_is_rx_idle(priv))
1026a4feee89SClaudiu Manoil 		goto retry;
1027ec21e2ecSJeff Kirsher }
1028ec21e2ecSJeff Kirsher 
1029ec21e2ecSJeff Kirsher /* Halt the receive and transmit queues */
10307ad38784SArseny Solokha static void gfar_halt(struct gfar_private *priv)
1031ec21e2ecSJeff Kirsher {
1032ec21e2ecSJeff Kirsher 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1033ec21e2ecSJeff Kirsher 	u32 tempval;
1034ec21e2ecSJeff Kirsher 
1035c10650b6SClaudiu Manoil 	/* Dissable the Rx/Tx hw queues */
1036c10650b6SClaudiu Manoil 	gfar_write(&regs->rqueue, 0);
1037c10650b6SClaudiu Manoil 	gfar_write(&regs->tqueue, 0);
1038ec21e2ecSJeff Kirsher 
1039c10650b6SClaudiu Manoil 	mdelay(10);
1040c10650b6SClaudiu Manoil 
1041c10650b6SClaudiu Manoil 	gfar_halt_nodisable(priv);
1042c10650b6SClaudiu Manoil 
1043c10650b6SClaudiu Manoil 	/* Disable Rx/Tx DMA */
1044ec21e2ecSJeff Kirsher 	tempval = gfar_read(&regs->maccfg1);
1045ec21e2ecSJeff Kirsher 	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1046ec21e2ecSJeff Kirsher 	gfar_write(&regs->maccfg1, tempval);
1047ec21e2ecSJeff Kirsher }
1048ec21e2ecSJeff Kirsher 
1049ec21e2ecSJeff Kirsher static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1050ec21e2ecSJeff Kirsher {
1051ec21e2ecSJeff Kirsher 	struct txbd8 *txbdp;
1052ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(tx_queue->dev);
1053ec21e2ecSJeff Kirsher 	int i, j;
1054ec21e2ecSJeff Kirsher 
1055ec21e2ecSJeff Kirsher 	txbdp = tx_queue->tx_bd_base;
1056ec21e2ecSJeff Kirsher 
1057ec21e2ecSJeff Kirsher 	for (i = 0; i < tx_queue->tx_ring_size; i++) {
1058ec21e2ecSJeff Kirsher 		if (!tx_queue->tx_skbuff[i])
1059ec21e2ecSJeff Kirsher 			continue;
1060ec21e2ecSJeff Kirsher 
1061a7312d58SClaudiu Manoil 		dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1062a7312d58SClaudiu Manoil 				 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
1063ec21e2ecSJeff Kirsher 		txbdp->lstatus = 0;
1064ec21e2ecSJeff Kirsher 		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1065ec21e2ecSJeff Kirsher 		     j++) {
1066ec21e2ecSJeff Kirsher 			txbdp++;
1067a7312d58SClaudiu Manoil 			dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1068a7312d58SClaudiu Manoil 				       be16_to_cpu(txbdp->length),
1069a7312d58SClaudiu Manoil 				       DMA_TO_DEVICE);
1070ec21e2ecSJeff Kirsher 		}
1071ec21e2ecSJeff Kirsher 		txbdp++;
1072ec21e2ecSJeff Kirsher 		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1073ec21e2ecSJeff Kirsher 		tx_queue->tx_skbuff[i] = NULL;
1074ec21e2ecSJeff Kirsher 	}
1075ec21e2ecSJeff Kirsher 	kfree(tx_queue->tx_skbuff);
10761eb8f7a7SClaudiu Manoil 	tx_queue->tx_skbuff = NULL;
1077ec21e2ecSJeff Kirsher }
1078ec21e2ecSJeff Kirsher 
1079ec21e2ecSJeff Kirsher static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1080ec21e2ecSJeff Kirsher {
1081ec21e2ecSJeff Kirsher 	int i;
1082ec21e2ecSJeff Kirsher 
108375354148SClaudiu Manoil 	struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
108475354148SClaudiu Manoil 
108575354148SClaudiu Manoil 	dev_kfree_skb(rx_queue->skb);
1086ec21e2ecSJeff Kirsher 
1087ec21e2ecSJeff Kirsher 	for (i = 0; i < rx_queue->rx_ring_size; i++) {
108875354148SClaudiu Manoil 		struct	gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
108975354148SClaudiu Manoil 
1090ec21e2ecSJeff Kirsher 		rxbdp->lstatus = 0;
1091ec21e2ecSJeff Kirsher 		rxbdp->bufPtr = 0;
1092ec21e2ecSJeff Kirsher 		rxbdp++;
109375354148SClaudiu Manoil 
109475354148SClaudiu Manoil 		if (!rxb->page)
109575354148SClaudiu Manoil 			continue;
109675354148SClaudiu Manoil 
10974af0e5bbSArseny Solokha 		dma_unmap_page(rx_queue->dev, rxb->dma,
109875354148SClaudiu Manoil 			       PAGE_SIZE, DMA_FROM_DEVICE);
109975354148SClaudiu Manoil 		__free_page(rxb->page);
110075354148SClaudiu Manoil 
110175354148SClaudiu Manoil 		rxb->page = NULL;
1102ec21e2ecSJeff Kirsher 	}
110375354148SClaudiu Manoil 
110475354148SClaudiu Manoil 	kfree(rx_queue->rx_buff);
110575354148SClaudiu Manoil 	rx_queue->rx_buff = NULL;
1106ec21e2ecSJeff Kirsher }
1107ec21e2ecSJeff Kirsher 
1108ec21e2ecSJeff Kirsher /* If there are any tx skbs or rx skbs still around, free them.
11090977f817SJan Ceuleers  * Then free tx_skbuff and rx_skbuff
11100977f817SJan Ceuleers  */
1111ec21e2ecSJeff Kirsher static void free_skb_resources(struct gfar_private *priv)
1112ec21e2ecSJeff Kirsher {
1113ec21e2ecSJeff Kirsher 	struct gfar_priv_tx_q *tx_queue = NULL;
1114ec21e2ecSJeff Kirsher 	struct gfar_priv_rx_q *rx_queue = NULL;
1115ec21e2ecSJeff Kirsher 	int i;
1116ec21e2ecSJeff Kirsher 
1117ec21e2ecSJeff Kirsher 	/* Go through all the buffer descriptors and free their data buffers */
1118ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_tx_queues; i++) {
1119d8a0f1b0SPaul Gortmaker 		struct netdev_queue *txq;
1120bc4598bcSJan Ceuleers 
1121ec21e2ecSJeff Kirsher 		tx_queue = priv->tx_queue[i];
1122d8a0f1b0SPaul Gortmaker 		txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1123ec21e2ecSJeff Kirsher 		if (tx_queue->tx_skbuff)
1124ec21e2ecSJeff Kirsher 			free_skb_tx_queue(tx_queue);
1125d8a0f1b0SPaul Gortmaker 		netdev_tx_reset_queue(txq);
1126ec21e2ecSJeff Kirsher 	}
1127ec21e2ecSJeff Kirsher 
1128ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_rx_queues; i++) {
1129ec21e2ecSJeff Kirsher 		rx_queue = priv->rx_queue[i];
113075354148SClaudiu Manoil 		if (rx_queue->rx_buff)
1131ec21e2ecSJeff Kirsher 			free_skb_rx_queue(rx_queue);
1132ec21e2ecSJeff Kirsher 	}
1133ec21e2ecSJeff Kirsher 
1134369ec162SClaudiu Manoil 	dma_free_coherent(priv->dev,
1135ec21e2ecSJeff Kirsher 			  sizeof(struct txbd8) * priv->total_tx_ring_size +
1136ec21e2ecSJeff Kirsher 			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
1137ec21e2ecSJeff Kirsher 			  priv->tx_queue[0]->tx_bd_base,
1138ec21e2ecSJeff Kirsher 			  priv->tx_queue[0]->tx_bd_dma_base);
1139ec21e2ecSJeff Kirsher }
1140ec21e2ecSJeff Kirsher 
11417d993c5fSArseny Solokha void stop_gfar(struct net_device *dev)
11427d993c5fSArseny Solokha {
11437d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
11447d993c5fSArseny Solokha 
11457d993c5fSArseny Solokha 	netif_tx_stop_all_queues(dev);
11467d993c5fSArseny Solokha 
11477d993c5fSArseny Solokha 	smp_mb__before_atomic();
11487d993c5fSArseny Solokha 	set_bit(GFAR_DOWN, &priv->state);
11497d993c5fSArseny Solokha 	smp_mb__after_atomic();
11507d993c5fSArseny Solokha 
11517d993c5fSArseny Solokha 	disable_napi(priv);
11527d993c5fSArseny Solokha 
11537d993c5fSArseny Solokha 	/* disable ints and gracefully shut down Rx/Tx DMA */
11547d993c5fSArseny Solokha 	gfar_halt(priv);
11557d993c5fSArseny Solokha 
11567d993c5fSArseny Solokha 	phy_stop(dev->phydev);
11577d993c5fSArseny Solokha 
11587d993c5fSArseny Solokha 	free_skb_resources(priv);
11597d993c5fSArseny Solokha }
11607d993c5fSArseny Solokha 
11617ad38784SArseny Solokha static void gfar_start(struct gfar_private *priv)
1162ec21e2ecSJeff Kirsher {
1163ec21e2ecSJeff Kirsher 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1164ec21e2ecSJeff Kirsher 	u32 tempval;
1165ec21e2ecSJeff Kirsher 	int i = 0;
1166ec21e2ecSJeff Kirsher 
1167c10650b6SClaudiu Manoil 	/* Enable Rx/Tx hw queues */
1168c10650b6SClaudiu Manoil 	gfar_write(&regs->rqueue, priv->rqueue);
1169c10650b6SClaudiu Manoil 	gfar_write(&regs->tqueue, priv->tqueue);
1170ec21e2ecSJeff Kirsher 
1171ec21e2ecSJeff Kirsher 	/* Initialize DMACTRL to have WWR and WOP */
1172ec21e2ecSJeff Kirsher 	tempval = gfar_read(&regs->dmactrl);
1173ec21e2ecSJeff Kirsher 	tempval |= DMACTRL_INIT_SETTINGS;
1174ec21e2ecSJeff Kirsher 	gfar_write(&regs->dmactrl, tempval);
1175ec21e2ecSJeff Kirsher 
1176ec21e2ecSJeff Kirsher 	/* Make sure we aren't stopped */
1177ec21e2ecSJeff Kirsher 	tempval = gfar_read(&regs->dmactrl);
1178ec21e2ecSJeff Kirsher 	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1179ec21e2ecSJeff Kirsher 	gfar_write(&regs->dmactrl, tempval);
1180ec21e2ecSJeff Kirsher 
1181ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_grps; i++) {
1182ec21e2ecSJeff Kirsher 		regs = priv->gfargrp[i].regs;
1183ec21e2ecSJeff Kirsher 		/* Clear THLT/RHLT, so that the DMA starts polling now */
1184ec21e2ecSJeff Kirsher 		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1185ec21e2ecSJeff Kirsher 		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1186ec21e2ecSJeff Kirsher 	}
1187ec21e2ecSJeff Kirsher 
1188c10650b6SClaudiu Manoil 	/* Enable Rx/Tx DMA */
1189c10650b6SClaudiu Manoil 	tempval = gfar_read(&regs->maccfg1);
1190c10650b6SClaudiu Manoil 	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1191c10650b6SClaudiu Manoil 	gfar_write(&regs->maccfg1, tempval);
1192c10650b6SClaudiu Manoil 
1193efeddce7SClaudiu Manoil 	gfar_ints_enable(priv);
1194efeddce7SClaudiu Manoil 
1195860e9538SFlorian Westphal 	netif_trans_update(priv->ndev); /* prevent tx timeout */
1196ec21e2ecSJeff Kirsher }
1197ec21e2ecSJeff Kirsher 
11987d993c5fSArseny Solokha static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
119980ec396cSClaudiu Manoil {
12007d993c5fSArseny Solokha 	struct page *page;
12017d993c5fSArseny Solokha 	dma_addr_t addr;
12027d993c5fSArseny Solokha 
12037d993c5fSArseny Solokha 	page = dev_alloc_page();
12047d993c5fSArseny Solokha 	if (unlikely(!page))
12057d993c5fSArseny Solokha 		return false;
12067d993c5fSArseny Solokha 
12077d993c5fSArseny Solokha 	addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
12087d993c5fSArseny Solokha 	if (unlikely(dma_mapping_error(rxq->dev, addr))) {
12097d993c5fSArseny Solokha 		__free_page(page);
12107d993c5fSArseny Solokha 
12117d993c5fSArseny Solokha 		return false;
121280ec396cSClaudiu Manoil 	}
121380ec396cSClaudiu Manoil 
12147d993c5fSArseny Solokha 	rxb->dma = addr;
12157d993c5fSArseny Solokha 	rxb->page = page;
12167d993c5fSArseny Solokha 	rxb->page_offset = 0;
12177d993c5fSArseny Solokha 
12187d993c5fSArseny Solokha 	return true;
12197d993c5fSArseny Solokha }
12207d993c5fSArseny Solokha 
12217d993c5fSArseny Solokha static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
1222ec21e2ecSJeff Kirsher {
12237d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(rx_queue->ndev);
12247d993c5fSArseny Solokha 	struct gfar_extra_stats *estats = &priv->extra_stats;
1225ec21e2ecSJeff Kirsher 
12267d993c5fSArseny Solokha 	netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
12277d993c5fSArseny Solokha 	atomic64_inc(&estats->rx_alloc_err);
1228ec21e2ecSJeff Kirsher }
1229ec21e2ecSJeff Kirsher 
12307d993c5fSArseny Solokha static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
12317d993c5fSArseny Solokha 				int alloc_cnt)
123280ec396cSClaudiu Manoil {
12337d993c5fSArseny Solokha 	struct rxbd8 *bdp;
12347d993c5fSArseny Solokha 	struct gfar_rx_buff *rxb;
123580ec396cSClaudiu Manoil 	int i;
123680ec396cSClaudiu Manoil 
12377d993c5fSArseny Solokha 	i = rx_queue->next_to_use;
12387d993c5fSArseny Solokha 	bdp = &rx_queue->rx_bd_base[i];
12397d993c5fSArseny Solokha 	rxb = &rx_queue->rx_buff[i];
12407d993c5fSArseny Solokha 
12417d993c5fSArseny Solokha 	while (alloc_cnt--) {
12427d993c5fSArseny Solokha 		/* try reuse page */
12437d993c5fSArseny Solokha 		if (unlikely(!rxb->page)) {
12447d993c5fSArseny Solokha 			if (unlikely(!gfar_new_page(rx_queue, rxb))) {
12457d993c5fSArseny Solokha 				gfar_rx_alloc_err(rx_queue);
12467d993c5fSArseny Solokha 				break;
124780ec396cSClaudiu Manoil 			}
124880ec396cSClaudiu Manoil 		}
124980ec396cSClaudiu Manoil 
12507d993c5fSArseny Solokha 		/* Setup the new RxBD */
12517d993c5fSArseny Solokha 		gfar_init_rxbdp(rx_queue, bdp,
12527d993c5fSArseny Solokha 				rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
12537d993c5fSArseny Solokha 
12547d993c5fSArseny Solokha 		/* Update to the next pointer */
12557d993c5fSArseny Solokha 		bdp++;
12567d993c5fSArseny Solokha 		rxb++;
12577d993c5fSArseny Solokha 
12587d993c5fSArseny Solokha 		if (unlikely(++i == rx_queue->rx_ring_size)) {
12597d993c5fSArseny Solokha 			i = 0;
12607d993c5fSArseny Solokha 			bdp = rx_queue->rx_bd_base;
12617d993c5fSArseny Solokha 			rxb = rx_queue->rx_buff;
12627d993c5fSArseny Solokha 		}
12637d993c5fSArseny Solokha 	}
12647d993c5fSArseny Solokha 
12657d993c5fSArseny Solokha 	rx_queue->next_to_use = i;
12667d993c5fSArseny Solokha 	rx_queue->next_to_alloc = i;
12677d993c5fSArseny Solokha }
12687d993c5fSArseny Solokha 
12697d993c5fSArseny Solokha static void gfar_init_bds(struct net_device *ndev)
127080ec396cSClaudiu Manoil {
12717d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(ndev);
12727d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
12737d993c5fSArseny Solokha 	struct gfar_priv_tx_q *tx_queue = NULL;
12747d993c5fSArseny Solokha 	struct gfar_priv_rx_q *rx_queue = NULL;
12757d993c5fSArseny Solokha 	struct txbd8 *txbdp;
12767d993c5fSArseny Solokha 	u32 __iomem *rfbptr;
12777d993c5fSArseny Solokha 	int i, j;
127880ec396cSClaudiu Manoil 
12797d993c5fSArseny Solokha 	for (i = 0; i < priv->num_tx_queues; i++) {
12807d993c5fSArseny Solokha 		tx_queue = priv->tx_queue[i];
12817d993c5fSArseny Solokha 		/* Initialize some variables in our dev structure */
12827d993c5fSArseny Solokha 		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
12837d993c5fSArseny Solokha 		tx_queue->dirty_tx = tx_queue->tx_bd_base;
12847d993c5fSArseny Solokha 		tx_queue->cur_tx = tx_queue->tx_bd_base;
12857d993c5fSArseny Solokha 		tx_queue->skb_curtx = 0;
12867d993c5fSArseny Solokha 		tx_queue->skb_dirtytx = 0;
12877d993c5fSArseny Solokha 
12887d993c5fSArseny Solokha 		/* Initialize Transmit Descriptor Ring */
12897d993c5fSArseny Solokha 		txbdp = tx_queue->tx_bd_base;
12907d993c5fSArseny Solokha 		for (j = 0; j < tx_queue->tx_ring_size; j++) {
12917d993c5fSArseny Solokha 			txbdp->lstatus = 0;
12927d993c5fSArseny Solokha 			txbdp->bufPtr = 0;
12937d993c5fSArseny Solokha 			txbdp++;
12947d993c5fSArseny Solokha 		}
12957d993c5fSArseny Solokha 
12967d993c5fSArseny Solokha 		/* Set the last descriptor in the ring to indicate wrap */
12977d993c5fSArseny Solokha 		txbdp--;
12987d993c5fSArseny Solokha 		txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
12997d993c5fSArseny Solokha 					    TXBD_WRAP);
13007d993c5fSArseny Solokha 	}
13017d993c5fSArseny Solokha 
13027d993c5fSArseny Solokha 	rfbptr = &regs->rfbptr0;
13037d993c5fSArseny Solokha 	for (i = 0; i < priv->num_rx_queues; i++) {
13047d993c5fSArseny Solokha 		rx_queue = priv->rx_queue[i];
13057d993c5fSArseny Solokha 
13067d993c5fSArseny Solokha 		rx_queue->next_to_clean = 0;
13077d993c5fSArseny Solokha 		rx_queue->next_to_use = 0;
13087d993c5fSArseny Solokha 		rx_queue->next_to_alloc = 0;
13097d993c5fSArseny Solokha 
13107d993c5fSArseny Solokha 		/* make sure next_to_clean != next_to_use after this
13117d993c5fSArseny Solokha 		 * by leaving at least 1 unused descriptor
13127d993c5fSArseny Solokha 		 */
13137d993c5fSArseny Solokha 		gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
13147d993c5fSArseny Solokha 
13157d993c5fSArseny Solokha 		rx_queue->rfbptr = rfbptr;
13167d993c5fSArseny Solokha 		rfbptr += 2;
131780ec396cSClaudiu Manoil 	}
131880ec396cSClaudiu Manoil }
131980ec396cSClaudiu Manoil 
13207d993c5fSArseny Solokha static int gfar_alloc_skb_resources(struct net_device *ndev)
13217d993c5fSArseny Solokha {
13227d993c5fSArseny Solokha 	void *vaddr;
13237d993c5fSArseny Solokha 	dma_addr_t addr;
13247d993c5fSArseny Solokha 	int i, j;
13257d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(ndev);
13267d993c5fSArseny Solokha 	struct device *dev = priv->dev;
13277d993c5fSArseny Solokha 	struct gfar_priv_tx_q *tx_queue = NULL;
13287d993c5fSArseny Solokha 	struct gfar_priv_rx_q *rx_queue = NULL;
13297d993c5fSArseny Solokha 
13307d993c5fSArseny Solokha 	priv->total_tx_ring_size = 0;
13317d993c5fSArseny Solokha 	for (i = 0; i < priv->num_tx_queues; i++)
13327d993c5fSArseny Solokha 		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
13337d993c5fSArseny Solokha 
13347d993c5fSArseny Solokha 	priv->total_rx_ring_size = 0;
13357d993c5fSArseny Solokha 	for (i = 0; i < priv->num_rx_queues; i++)
13367d993c5fSArseny Solokha 		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
13377d993c5fSArseny Solokha 
13387d993c5fSArseny Solokha 	/* Allocate memory for the buffer descriptors */
13397d993c5fSArseny Solokha 	vaddr = dma_alloc_coherent(dev,
13407d993c5fSArseny Solokha 				   (priv->total_tx_ring_size *
13417d993c5fSArseny Solokha 				    sizeof(struct txbd8)) +
13427d993c5fSArseny Solokha 				   (priv->total_rx_ring_size *
13437d993c5fSArseny Solokha 				    sizeof(struct rxbd8)),
13447d993c5fSArseny Solokha 				   &addr, GFP_KERNEL);
13457d993c5fSArseny Solokha 	if (!vaddr)
13467d993c5fSArseny Solokha 		return -ENOMEM;
13477d993c5fSArseny Solokha 
13487d993c5fSArseny Solokha 	for (i = 0; i < priv->num_tx_queues; i++) {
13497d993c5fSArseny Solokha 		tx_queue = priv->tx_queue[i];
13507d993c5fSArseny Solokha 		tx_queue->tx_bd_base = vaddr;
13517d993c5fSArseny Solokha 		tx_queue->tx_bd_dma_base = addr;
13527d993c5fSArseny Solokha 		tx_queue->dev = ndev;
13537d993c5fSArseny Solokha 		/* enet DMA only understands physical addresses */
13547d993c5fSArseny Solokha 		addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
13557d993c5fSArseny Solokha 		vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
13567d993c5fSArseny Solokha 	}
13577d993c5fSArseny Solokha 
13587d993c5fSArseny Solokha 	/* Start the rx descriptor ring where the tx ring leaves off */
13597d993c5fSArseny Solokha 	for (i = 0; i < priv->num_rx_queues; i++) {
13607d993c5fSArseny Solokha 		rx_queue = priv->rx_queue[i];
13617d993c5fSArseny Solokha 		rx_queue->rx_bd_base = vaddr;
13627d993c5fSArseny Solokha 		rx_queue->rx_bd_dma_base = addr;
13637d993c5fSArseny Solokha 		rx_queue->ndev = ndev;
13647d993c5fSArseny Solokha 		rx_queue->dev = dev;
13657d993c5fSArseny Solokha 		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
13667d993c5fSArseny Solokha 		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
13677d993c5fSArseny Solokha 	}
13687d993c5fSArseny Solokha 
13697d993c5fSArseny Solokha 	/* Setup the skbuff rings */
13707d993c5fSArseny Solokha 	for (i = 0; i < priv->num_tx_queues; i++) {
13717d993c5fSArseny Solokha 		tx_queue = priv->tx_queue[i];
13727d993c5fSArseny Solokha 		tx_queue->tx_skbuff =
13737d993c5fSArseny Solokha 			kmalloc_array(tx_queue->tx_ring_size,
13747d993c5fSArseny Solokha 				      sizeof(*tx_queue->tx_skbuff),
13757d993c5fSArseny Solokha 				      GFP_KERNEL);
13767d993c5fSArseny Solokha 		if (!tx_queue->tx_skbuff)
13777d993c5fSArseny Solokha 			goto cleanup;
13787d993c5fSArseny Solokha 
13797d993c5fSArseny Solokha 		for (j = 0; j < tx_queue->tx_ring_size; j++)
13807d993c5fSArseny Solokha 			tx_queue->tx_skbuff[j] = NULL;
13817d993c5fSArseny Solokha 	}
13827d993c5fSArseny Solokha 
13837d993c5fSArseny Solokha 	for (i = 0; i < priv->num_rx_queues; i++) {
13847d993c5fSArseny Solokha 		rx_queue = priv->rx_queue[i];
13857d993c5fSArseny Solokha 		rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
13867d993c5fSArseny Solokha 					    sizeof(*rx_queue->rx_buff),
13877d993c5fSArseny Solokha 					    GFP_KERNEL);
13887d993c5fSArseny Solokha 		if (!rx_queue->rx_buff)
13897d993c5fSArseny Solokha 			goto cleanup;
13907d993c5fSArseny Solokha 	}
13917d993c5fSArseny Solokha 
13927d993c5fSArseny Solokha 	gfar_init_bds(ndev);
13937d993c5fSArseny Solokha 
139480ec396cSClaudiu Manoil 	return 0;
13957d993c5fSArseny Solokha 
13967d993c5fSArseny Solokha cleanup:
13977d993c5fSArseny Solokha 	free_skb_resources(priv);
13987d993c5fSArseny Solokha 	return -ENOMEM;
139980ec396cSClaudiu Manoil }
140080ec396cSClaudiu Manoil 
1401ec21e2ecSJeff Kirsher /* Bring the controller up and running */
1402ec21e2ecSJeff Kirsher int startup_gfar(struct net_device *ndev)
1403ec21e2ecSJeff Kirsher {
1404ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(ndev);
140580ec396cSClaudiu Manoil 	int err;
1406ec21e2ecSJeff Kirsher 
1407a328ac92SClaudiu Manoil 	gfar_mac_reset(priv);
1408ec21e2ecSJeff Kirsher 
1409ec21e2ecSJeff Kirsher 	err = gfar_alloc_skb_resources(ndev);
1410ec21e2ecSJeff Kirsher 	if (err)
1411ec21e2ecSJeff Kirsher 		return err;
1412ec21e2ecSJeff Kirsher 
1413a328ac92SClaudiu Manoil 	gfar_init_tx_rx_base(priv);
1414ec21e2ecSJeff Kirsher 
14154e857c58SPeter Zijlstra 	smp_mb__before_atomic();
14160851133bSClaudiu Manoil 	clear_bit(GFAR_DOWN, &priv->state);
14174e857c58SPeter Zijlstra 	smp_mb__after_atomic();
14180851133bSClaudiu Manoil 
14190851133bSClaudiu Manoil 	/* Start Rx/Tx DMA and enable the interrupts */
1420c10650b6SClaudiu Manoil 	gfar_start(priv);
1421ec21e2ecSJeff Kirsher 
14222a4eebf0SClaudiu Manoil 	/* force link state update after mac reset */
14232a4eebf0SClaudiu Manoil 	priv->oldlink = 0;
14242a4eebf0SClaudiu Manoil 	priv->oldspeed = 0;
14252a4eebf0SClaudiu Manoil 	priv->oldduplex = -1;
14262a4eebf0SClaudiu Manoil 
14274c4a6b0eSPhilippe Reynes 	phy_start(ndev->phydev);
1428ec21e2ecSJeff Kirsher 
14290851133bSClaudiu Manoil 	enable_napi(priv);
14300851133bSClaudiu Manoil 
14310851133bSClaudiu Manoil 	netif_tx_wake_all_queues(ndev);
14320851133bSClaudiu Manoil 
1433ec21e2ecSJeff Kirsher 	return 0;
1434ec21e2ecSJeff Kirsher }
1435ec21e2ecSJeff Kirsher 
14367d993c5fSArseny Solokha static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
14377d993c5fSArseny Solokha {
14387d993c5fSArseny Solokha 	struct net_device *ndev = priv->ndev;
14397d993c5fSArseny Solokha 	struct phy_device *phydev = ndev->phydev;
14407d993c5fSArseny Solokha 	u32 val = 0;
14417d993c5fSArseny Solokha 
14427d993c5fSArseny Solokha 	if (!phydev->duplex)
14437d993c5fSArseny Solokha 		return val;
14447d993c5fSArseny Solokha 
14457d993c5fSArseny Solokha 	if (!priv->pause_aneg_en) {
14467d993c5fSArseny Solokha 		if (priv->tx_pause_en)
14477d993c5fSArseny Solokha 			val |= MACCFG1_TX_FLOW;
14487d993c5fSArseny Solokha 		if (priv->rx_pause_en)
14497d993c5fSArseny Solokha 			val |= MACCFG1_RX_FLOW;
14507d993c5fSArseny Solokha 	} else {
14517d993c5fSArseny Solokha 		u16 lcl_adv, rmt_adv;
14527d993c5fSArseny Solokha 		u8 flowctrl;
14537d993c5fSArseny Solokha 		/* get link partner capabilities */
14547d993c5fSArseny Solokha 		rmt_adv = 0;
14557d993c5fSArseny Solokha 		if (phydev->pause)
14567d993c5fSArseny Solokha 			rmt_adv = LPA_PAUSE_CAP;
14577d993c5fSArseny Solokha 		if (phydev->asym_pause)
14587d993c5fSArseny Solokha 			rmt_adv |= LPA_PAUSE_ASYM;
14597d993c5fSArseny Solokha 
14607d993c5fSArseny Solokha 		lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
14617d993c5fSArseny Solokha 		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
14627d993c5fSArseny Solokha 		if (flowctrl & FLOW_CTRL_TX)
14637d993c5fSArseny Solokha 			val |= MACCFG1_TX_FLOW;
14647d993c5fSArseny Solokha 		if (flowctrl & FLOW_CTRL_RX)
14657d993c5fSArseny Solokha 			val |= MACCFG1_RX_FLOW;
14667d993c5fSArseny Solokha 	}
14677d993c5fSArseny Solokha 
14687d993c5fSArseny Solokha 	return val;
14697d993c5fSArseny Solokha }
14707d993c5fSArseny Solokha 
14717d993c5fSArseny Solokha static noinline void gfar_update_link_state(struct gfar_private *priv)
14727d993c5fSArseny Solokha {
14737d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
14747d993c5fSArseny Solokha 	struct net_device *ndev = priv->ndev;
14757d993c5fSArseny Solokha 	struct phy_device *phydev = ndev->phydev;
14767d993c5fSArseny Solokha 	struct gfar_priv_rx_q *rx_queue = NULL;
14777d993c5fSArseny Solokha 	int i;
14787d993c5fSArseny Solokha 
14797d993c5fSArseny Solokha 	if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
14807d993c5fSArseny Solokha 		return;
14817d993c5fSArseny Solokha 
14827d993c5fSArseny Solokha 	if (phydev->link) {
14837d993c5fSArseny Solokha 		u32 tempval1 = gfar_read(&regs->maccfg1);
14847d993c5fSArseny Solokha 		u32 tempval = gfar_read(&regs->maccfg2);
14857d993c5fSArseny Solokha 		u32 ecntrl = gfar_read(&regs->ecntrl);
14867d993c5fSArseny Solokha 		u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
14877d993c5fSArseny Solokha 
14887d993c5fSArseny Solokha 		if (phydev->duplex != priv->oldduplex) {
14897d993c5fSArseny Solokha 			if (!(phydev->duplex))
14907d993c5fSArseny Solokha 				tempval &= ~(MACCFG2_FULL_DUPLEX);
14917d993c5fSArseny Solokha 			else
14927d993c5fSArseny Solokha 				tempval |= MACCFG2_FULL_DUPLEX;
14937d993c5fSArseny Solokha 
14947d993c5fSArseny Solokha 			priv->oldduplex = phydev->duplex;
14957d993c5fSArseny Solokha 		}
14967d993c5fSArseny Solokha 
14977d993c5fSArseny Solokha 		if (phydev->speed != priv->oldspeed) {
14987d993c5fSArseny Solokha 			switch (phydev->speed) {
14997d993c5fSArseny Solokha 			case 1000:
15007d993c5fSArseny Solokha 				tempval =
15017d993c5fSArseny Solokha 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
15027d993c5fSArseny Solokha 
15037d993c5fSArseny Solokha 				ecntrl &= ~(ECNTRL_R100);
15047d993c5fSArseny Solokha 				break;
15057d993c5fSArseny Solokha 			case 100:
15067d993c5fSArseny Solokha 			case 10:
15077d993c5fSArseny Solokha 				tempval =
15087d993c5fSArseny Solokha 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
15097d993c5fSArseny Solokha 
15107d993c5fSArseny Solokha 				/* Reduced mode distinguishes
15117d993c5fSArseny Solokha 				 * between 10 and 100
15120977f817SJan Ceuleers 				 */
15137d993c5fSArseny Solokha 				if (phydev->speed == SPEED_100)
15147d993c5fSArseny Solokha 					ecntrl |= ECNTRL_R100;
15157d993c5fSArseny Solokha 				else
15167d993c5fSArseny Solokha 					ecntrl &= ~(ECNTRL_R100);
15177d993c5fSArseny Solokha 				break;
15187d993c5fSArseny Solokha 			default:
15197d993c5fSArseny Solokha 				netif_warn(priv, link, priv->ndev,
15207d993c5fSArseny Solokha 					   "Ack!  Speed (%d) is not 10/100/1000!\n",
15217d993c5fSArseny Solokha 					   phydev->speed);
15227d993c5fSArseny Solokha 				break;
15237d993c5fSArseny Solokha 			}
15247d993c5fSArseny Solokha 
15257d993c5fSArseny Solokha 			priv->oldspeed = phydev->speed;
15267d993c5fSArseny Solokha 		}
15277d993c5fSArseny Solokha 
15287d993c5fSArseny Solokha 		tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
15297d993c5fSArseny Solokha 		tempval1 |= gfar_get_flowctrl_cfg(priv);
15307d993c5fSArseny Solokha 
15317d993c5fSArseny Solokha 		/* Turn last free buffer recording on */
15327d993c5fSArseny Solokha 		if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
15337d993c5fSArseny Solokha 			for (i = 0; i < priv->num_rx_queues; i++) {
15347d993c5fSArseny Solokha 				u32 bdp_dma;
15357d993c5fSArseny Solokha 
15367d993c5fSArseny Solokha 				rx_queue = priv->rx_queue[i];
15377d993c5fSArseny Solokha 				bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
15387d993c5fSArseny Solokha 				gfar_write(rx_queue->rfbptr, bdp_dma);
15397d993c5fSArseny Solokha 			}
15407d993c5fSArseny Solokha 
15417d993c5fSArseny Solokha 			priv->tx_actual_en = 1;
15427d993c5fSArseny Solokha 		}
15437d993c5fSArseny Solokha 
15447d993c5fSArseny Solokha 		if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
15457d993c5fSArseny Solokha 			priv->tx_actual_en = 0;
15467d993c5fSArseny Solokha 
15477d993c5fSArseny Solokha 		gfar_write(&regs->maccfg1, tempval1);
15487d993c5fSArseny Solokha 		gfar_write(&regs->maccfg2, tempval);
15497d993c5fSArseny Solokha 		gfar_write(&regs->ecntrl, ecntrl);
15507d993c5fSArseny Solokha 
15517d993c5fSArseny Solokha 		if (!priv->oldlink)
15527d993c5fSArseny Solokha 			priv->oldlink = 1;
15537d993c5fSArseny Solokha 
15547d993c5fSArseny Solokha 	} else if (priv->oldlink) {
15557d993c5fSArseny Solokha 		priv->oldlink = 0;
15567d993c5fSArseny Solokha 		priv->oldspeed = 0;
15577d993c5fSArseny Solokha 		priv->oldduplex = -1;
15587d993c5fSArseny Solokha 	}
15597d993c5fSArseny Solokha 
15607d993c5fSArseny Solokha 	if (netif_msg_link(priv))
15617d993c5fSArseny Solokha 		phy_print_status(phydev);
15627d993c5fSArseny Solokha }
15637d993c5fSArseny Solokha 
15647d993c5fSArseny Solokha /* Called every time the controller might need to be made
15657d993c5fSArseny Solokha  * aware of new link state.  The PHY code conveys this
15667d993c5fSArseny Solokha  * information through variables in the phydev structure, and this
15677d993c5fSArseny Solokha  * function converts those variables into the appropriate
15687d993c5fSArseny Solokha  * register values, and can bring down the device if needed.
15697d993c5fSArseny Solokha  */
15707d993c5fSArseny Solokha static void adjust_link(struct net_device *dev)
1571ec21e2ecSJeff Kirsher {
1572ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
15737d993c5fSArseny Solokha 	struct phy_device *phydev = dev->phydev;
1574ec21e2ecSJeff Kirsher 
15757d993c5fSArseny Solokha 	if (unlikely(phydev->link != priv->oldlink ||
15767d993c5fSArseny Solokha 		     (phydev->link && (phydev->duplex != priv->oldduplex ||
15777d993c5fSArseny Solokha 				       phydev->speed != priv->oldspeed))))
15787d993c5fSArseny Solokha 		gfar_update_link_state(priv);
15797d993c5fSArseny Solokha }
1580ec21e2ecSJeff Kirsher 
15817d993c5fSArseny Solokha /* Initialize TBI PHY interface for communicating with the
15827d993c5fSArseny Solokha  * SERDES lynx PHY on the chip.  We communicate with this PHY
15837d993c5fSArseny Solokha  * through the MDIO bus on each controller, treating it as a
15847d993c5fSArseny Solokha  * "normal" PHY at the address found in the TBIPA register.  We assume
15857d993c5fSArseny Solokha  * that the TBIPA register is valid.  Either the MDIO bus code will set
15867d993c5fSArseny Solokha  * it to a value that doesn't conflict with other PHYs on the bus, or the
15877d993c5fSArseny Solokha  * value doesn't matter, as there are no other PHYs on the bus.
15887d993c5fSArseny Solokha  */
15897d993c5fSArseny Solokha static void gfar_configure_serdes(struct net_device *dev)
15907d993c5fSArseny Solokha {
15917d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
15927d993c5fSArseny Solokha 	struct phy_device *tbiphy;
159380ec396cSClaudiu Manoil 
15947d993c5fSArseny Solokha 	if (!priv->tbi_node) {
15957d993c5fSArseny Solokha 		dev_warn(&dev->dev, "error: SGMII mode requires that the "
15967d993c5fSArseny Solokha 				    "device tree specify a tbi-handle\n");
15977d993c5fSArseny Solokha 		return;
15987d993c5fSArseny Solokha 	}
1599ec21e2ecSJeff Kirsher 
16007d993c5fSArseny Solokha 	tbiphy = of_phy_find_device(priv->tbi_node);
16017d993c5fSArseny Solokha 	if (!tbiphy) {
16027d993c5fSArseny Solokha 		dev_err(&dev->dev, "error: Could not get TBI device\n");
16037d993c5fSArseny Solokha 		return;
16047d993c5fSArseny Solokha 	}
16057d993c5fSArseny Solokha 
16067d993c5fSArseny Solokha 	/* If the link is already up, we must already be ok, and don't need to
16077d993c5fSArseny Solokha 	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
16087d993c5fSArseny Solokha 	 * everything for us?  Resetting it takes the link down and requires
16097d993c5fSArseny Solokha 	 * several seconds for it to come back.
16107d993c5fSArseny Solokha 	 */
16117d993c5fSArseny Solokha 	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
16127d993c5fSArseny Solokha 		put_device(&tbiphy->mdio.dev);
16137d993c5fSArseny Solokha 		return;
16147d993c5fSArseny Solokha 	}
16157d993c5fSArseny Solokha 
16167d993c5fSArseny Solokha 	/* Single clk mode, mii mode off(for serdes communication) */
16177d993c5fSArseny Solokha 	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
16187d993c5fSArseny Solokha 
16197d993c5fSArseny Solokha 	phy_write(tbiphy, MII_ADVERTISE,
16207d993c5fSArseny Solokha 		  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
16217d993c5fSArseny Solokha 		  ADVERTISE_1000XPSE_ASYM);
16227d993c5fSArseny Solokha 
16237d993c5fSArseny Solokha 	phy_write(tbiphy, MII_BMCR,
16247d993c5fSArseny Solokha 		  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
16257d993c5fSArseny Solokha 		  BMCR_SPEED1000);
16267d993c5fSArseny Solokha 
16277d993c5fSArseny Solokha 	put_device(&tbiphy->mdio.dev);
16287d993c5fSArseny Solokha }
16297d993c5fSArseny Solokha 
16307d993c5fSArseny Solokha /* Initializes driver's PHY state, and attaches to the PHY.
16317d993c5fSArseny Solokha  * Returns 0 on success.
16327d993c5fSArseny Solokha  */
16337d993c5fSArseny Solokha static int init_phy(struct net_device *dev)
16347d993c5fSArseny Solokha {
16357d993c5fSArseny Solokha 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
16367d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
16378e578e73SArseny Solokha 	phy_interface_t interface = priv->interface;
16387d993c5fSArseny Solokha 	struct phy_device *phydev;
16397d993c5fSArseny Solokha 	struct ethtool_eee edata;
16407d993c5fSArseny Solokha 
16417d993c5fSArseny Solokha 	linkmode_set_bit_array(phy_10_100_features_array,
16427d993c5fSArseny Solokha 			       ARRAY_SIZE(phy_10_100_features_array),
16437d993c5fSArseny Solokha 			       mask);
16447d993c5fSArseny Solokha 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
16457d993c5fSArseny Solokha 	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
16467d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
16477d993c5fSArseny Solokha 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
16487d993c5fSArseny Solokha 
16497d993c5fSArseny Solokha 	priv->oldlink = 0;
16507d993c5fSArseny Solokha 	priv->oldspeed = 0;
16517d993c5fSArseny Solokha 	priv->oldduplex = -1;
16527d993c5fSArseny Solokha 
16537d993c5fSArseny Solokha 	phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
16547d993c5fSArseny Solokha 				interface);
16557d993c5fSArseny Solokha 	if (!phydev) {
16567d993c5fSArseny Solokha 		dev_err(&dev->dev, "could not attach to PHY\n");
16577d993c5fSArseny Solokha 		return -ENODEV;
16587d993c5fSArseny Solokha 	}
16597d993c5fSArseny Solokha 
16607d993c5fSArseny Solokha 	if (interface == PHY_INTERFACE_MODE_SGMII)
16617d993c5fSArseny Solokha 		gfar_configure_serdes(dev);
16627d993c5fSArseny Solokha 
16637d993c5fSArseny Solokha 	/* Remove any features not supported by the controller */
16647d993c5fSArseny Solokha 	linkmode_and(phydev->supported, phydev->supported, mask);
16657d993c5fSArseny Solokha 	linkmode_copy(phydev->advertising, phydev->supported);
16667d993c5fSArseny Solokha 
16677d993c5fSArseny Solokha 	/* Add support for flow control */
16687d993c5fSArseny Solokha 	phy_support_asym_pause(phydev);
16697d993c5fSArseny Solokha 
16707d993c5fSArseny Solokha 	/* disable EEE autoneg, EEE not supported by eTSEC */
16717d993c5fSArseny Solokha 	memset(&edata, 0, sizeof(struct ethtool_eee));
16727d993c5fSArseny Solokha 	phy_ethtool_set_eee(phydev, &edata);
16737d993c5fSArseny Solokha 
16747d993c5fSArseny Solokha 	return 0;
1675ec21e2ecSJeff Kirsher }
1676ec21e2ecSJeff Kirsher 
1677ec21e2ecSJeff Kirsher static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1678ec21e2ecSJeff Kirsher {
1679d58ff351SJohannes Berg 	struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
1680ec21e2ecSJeff Kirsher 
1681ec21e2ecSJeff Kirsher 	memset(fcb, 0, GMAC_FCB_LEN);
1682ec21e2ecSJeff Kirsher 
1683ec21e2ecSJeff Kirsher 	return fcb;
1684ec21e2ecSJeff Kirsher }
1685ec21e2ecSJeff Kirsher 
16869c4886e5SManfred Rudigier static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
16879c4886e5SManfred Rudigier 				    int fcb_length)
1688ec21e2ecSJeff Kirsher {
1689ec21e2ecSJeff Kirsher 	/* If we're here, it's a IP packet with a TCP or UDP
1690ec21e2ecSJeff Kirsher 	 * payload.  We set it to checksum, using a pseudo-header
1691ec21e2ecSJeff Kirsher 	 * we provide
1692ec21e2ecSJeff Kirsher 	 */
16933a2e16c8SJan Ceuleers 	u8 flags = TXFCB_DEFAULT;
1694ec21e2ecSJeff Kirsher 
16950977f817SJan Ceuleers 	/* Tell the controller what the protocol is
16960977f817SJan Ceuleers 	 * And provide the already calculated phcs
16970977f817SJan Ceuleers 	 */
1698ec21e2ecSJeff Kirsher 	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1699ec21e2ecSJeff Kirsher 		flags |= TXFCB_UDP;
170026eb9374SClaudiu Manoil 		fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
1701ec21e2ecSJeff Kirsher 	} else
170226eb9374SClaudiu Manoil 		fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
1703ec21e2ecSJeff Kirsher 
1704ec21e2ecSJeff Kirsher 	/* l3os is the distance between the start of the
1705ec21e2ecSJeff Kirsher 	 * frame (skb->data) and the start of the IP hdr.
1706ec21e2ecSJeff Kirsher 	 * l4os is the distance between the start of the
17070977f817SJan Ceuleers 	 * l3 hdr and the l4 hdr
17080977f817SJan Ceuleers 	 */
170926eb9374SClaudiu Manoil 	fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
1710ec21e2ecSJeff Kirsher 	fcb->l4os = skb_network_header_len(skb);
1711ec21e2ecSJeff Kirsher 
1712ec21e2ecSJeff Kirsher 	fcb->flags = flags;
1713ec21e2ecSJeff Kirsher }
1714ec21e2ecSJeff Kirsher 
1715278af574SArnd Bergmann static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1716ec21e2ecSJeff Kirsher {
1717ec21e2ecSJeff Kirsher 	fcb->flags |= TXFCB_VLN;
171826eb9374SClaudiu Manoil 	fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
1719ec21e2ecSJeff Kirsher }
1720ec21e2ecSJeff Kirsher 
1721ec21e2ecSJeff Kirsher static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1722ec21e2ecSJeff Kirsher 				      struct txbd8 *base, int ring_size)
1723ec21e2ecSJeff Kirsher {
1724ec21e2ecSJeff Kirsher 	struct txbd8 *new_bd = bdp + stride;
1725ec21e2ecSJeff Kirsher 
1726ec21e2ecSJeff Kirsher 	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1727ec21e2ecSJeff Kirsher }
1728ec21e2ecSJeff Kirsher 
1729ec21e2ecSJeff Kirsher static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1730ec21e2ecSJeff Kirsher 				      int ring_size)
1731ec21e2ecSJeff Kirsher {
1732ec21e2ecSJeff Kirsher 	return skip_txbd(bdp, 1, base, ring_size);
1733ec21e2ecSJeff Kirsher }
1734ec21e2ecSJeff Kirsher 
173502d88fb4SClaudiu Manoil /* eTSEC12: csum generation not supported for some fcb offsets */
173602d88fb4SClaudiu Manoil static inline bool gfar_csum_errata_12(struct gfar_private *priv,
173702d88fb4SClaudiu Manoil 				       unsigned long fcb_addr)
173802d88fb4SClaudiu Manoil {
173902d88fb4SClaudiu Manoil 	return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
174002d88fb4SClaudiu Manoil 	       (fcb_addr % 0x20) > 0x18);
174102d88fb4SClaudiu Manoil }
174202d88fb4SClaudiu Manoil 
174302d88fb4SClaudiu Manoil /* eTSEC76: csum generation for frames larger than 2500 may
174402d88fb4SClaudiu Manoil  * cause excess delays before start of transmission
174502d88fb4SClaudiu Manoil  */
174602d88fb4SClaudiu Manoil static inline bool gfar_csum_errata_76(struct gfar_private *priv,
174702d88fb4SClaudiu Manoil 				       unsigned int len)
174802d88fb4SClaudiu Manoil {
174902d88fb4SClaudiu Manoil 	return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
175002d88fb4SClaudiu Manoil 	       (len > 2500));
175102d88fb4SClaudiu Manoil }
175202d88fb4SClaudiu Manoil 
17530977f817SJan Ceuleers /* This is called by the kernel when a frame is ready for transmission.
17540977f817SJan Ceuleers  * It is pointed to by the dev->hard_start_xmit function pointer
17550977f817SJan Ceuleers  */
175606983aa5SYueHaibing static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1757ec21e2ecSJeff Kirsher {
1758ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
1759ec21e2ecSJeff Kirsher 	struct gfar_priv_tx_q *tx_queue = NULL;
1760ec21e2ecSJeff Kirsher 	struct netdev_queue *txq;
1761ec21e2ecSJeff Kirsher 	struct gfar __iomem *regs = NULL;
1762ec21e2ecSJeff Kirsher 	struct txfcb *fcb = NULL;
1763ec21e2ecSJeff Kirsher 	struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
1764ec21e2ecSJeff Kirsher 	u32 lstatus;
176542f397adSClaudiu Manoil 	skb_frag_t *frag;
17660d0cffdcSClaudiu Manoil 	int i, rq = 0;
17670d0cffdcSClaudiu Manoil 	int do_tstamp, do_csum, do_vlan;
1768ec21e2ecSJeff Kirsher 	u32 bufaddr;
176950ad076bSClaudiu Manoil 	unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
1770ec21e2ecSJeff Kirsher 
1771ec21e2ecSJeff Kirsher 	rq = skb->queue_mapping;
1772ec21e2ecSJeff Kirsher 	tx_queue = priv->tx_queue[rq];
1773ec21e2ecSJeff Kirsher 	txq = netdev_get_tx_queue(dev, rq);
1774ec21e2ecSJeff Kirsher 	base = tx_queue->tx_bd_base;
1775ec21e2ecSJeff Kirsher 	regs = tx_queue->grp->regs;
1776ec21e2ecSJeff Kirsher 
17770d0cffdcSClaudiu Manoil 	do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
1778df8a39deSJiri Pirko 	do_vlan = skb_vlan_tag_present(skb);
17790d0cffdcSClaudiu Manoil 	do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
17800d0cffdcSClaudiu Manoil 		    priv->hwts_tx_en;
17810d0cffdcSClaudiu Manoil 
17820d0cffdcSClaudiu Manoil 	if (do_csum || do_vlan)
17830d0cffdcSClaudiu Manoil 		fcb_len = GMAC_FCB_LEN;
17840d0cffdcSClaudiu Manoil 
1785ec21e2ecSJeff Kirsher 	/* check if time stamp should be generated */
17860d0cffdcSClaudiu Manoil 	if (unlikely(do_tstamp))
17870d0cffdcSClaudiu Manoil 		fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
1788ec21e2ecSJeff Kirsher 
1789ec21e2ecSJeff Kirsher 	/* make space for additional header when fcb is needed */
1790d145c903SClaudiu Manoil 	if (fcb_len) {
1791d145c903SClaudiu Manoil 		if (unlikely(skb_cow_head(skb, fcb_len))) {
1792ec21e2ecSJeff Kirsher 			dev->stats.tx_errors++;
1793c9974ad4SEric W. Biederman 			dev_kfree_skb_any(skb);
1794ec21e2ecSJeff Kirsher 			return NETDEV_TX_OK;
1795ec21e2ecSJeff Kirsher 		}
1796ec21e2ecSJeff Kirsher 	}
1797ec21e2ecSJeff Kirsher 
1798ec21e2ecSJeff Kirsher 	/* total number of fragments in the SKB */
1799ec21e2ecSJeff Kirsher 	nr_frags = skb_shinfo(skb)->nr_frags;
1800ec21e2ecSJeff Kirsher 
1801ec21e2ecSJeff Kirsher 	/* calculate the required number of TxBDs for this skb */
1802ec21e2ecSJeff Kirsher 	if (unlikely(do_tstamp))
1803ec21e2ecSJeff Kirsher 		nr_txbds = nr_frags + 2;
1804ec21e2ecSJeff Kirsher 	else
1805ec21e2ecSJeff Kirsher 		nr_txbds = nr_frags + 1;
1806ec21e2ecSJeff Kirsher 
1807ec21e2ecSJeff Kirsher 	/* check if there is space to queue this packet */
1808ec21e2ecSJeff Kirsher 	if (nr_txbds > tx_queue->num_txbdfree) {
1809ec21e2ecSJeff Kirsher 		/* no space, stop the queue */
1810ec21e2ecSJeff Kirsher 		netif_tx_stop_queue(txq);
1811ec21e2ecSJeff Kirsher 		dev->stats.tx_fifo_errors++;
1812ec21e2ecSJeff Kirsher 		return NETDEV_TX_BUSY;
1813ec21e2ecSJeff Kirsher 	}
1814ec21e2ecSJeff Kirsher 
1815ec21e2ecSJeff Kirsher 	/* Update transmit stats */
181650ad076bSClaudiu Manoil 	bytes_sent = skb->len;
181750ad076bSClaudiu Manoil 	tx_queue->stats.tx_bytes += bytes_sent;
181850ad076bSClaudiu Manoil 	/* keep Tx bytes on wire for BQL accounting */
181950ad076bSClaudiu Manoil 	GFAR_CB(skb)->bytes_sent = bytes_sent;
1820ec21e2ecSJeff Kirsher 	tx_queue->stats.tx_packets++;
1821ec21e2ecSJeff Kirsher 
1822ec21e2ecSJeff Kirsher 	txbdp = txbdp_start = tx_queue->cur_tx;
1823a7312d58SClaudiu Manoil 	lstatus = be32_to_cpu(txbdp->lstatus);
1824ec21e2ecSJeff Kirsher 
18259c4886e5SManfred Rudigier 	/* Add TxPAL between FCB and frame if required */
18269c4886e5SManfred Rudigier 	if (unlikely(do_tstamp)) {
18279c4886e5SManfred Rudigier 		skb_push(skb, GMAC_TXPAL_LEN);
18289c4886e5SManfred Rudigier 		memset(skb->data, 0, GMAC_TXPAL_LEN);
18299c4886e5SManfred Rudigier 	}
18309c4886e5SManfred Rudigier 
18310d0cffdcSClaudiu Manoil 	/* Add TxFCB if required */
18320d0cffdcSClaudiu Manoil 	if (fcb_len) {
1833ec21e2ecSJeff Kirsher 		fcb = gfar_add_fcb(skb);
1834ec21e2ecSJeff Kirsher 		lstatus |= BD_LFLAG(TXBD_TOE);
18350d0cffdcSClaudiu Manoil 	}
18360d0cffdcSClaudiu Manoil 
18370d0cffdcSClaudiu Manoil 	/* Set up checksumming */
18380d0cffdcSClaudiu Manoil 	if (do_csum) {
18390d0cffdcSClaudiu Manoil 		gfar_tx_checksum(skb, fcb, fcb_len);
184002d88fb4SClaudiu Manoil 
184102d88fb4SClaudiu Manoil 		if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
184202d88fb4SClaudiu Manoil 		    unlikely(gfar_csum_errata_76(priv, skb->len))) {
184302d88fb4SClaudiu Manoil 			__skb_pull(skb, GMAC_FCB_LEN);
184402d88fb4SClaudiu Manoil 			skb_checksum_help(skb);
18450d0cffdcSClaudiu Manoil 			if (do_vlan || do_tstamp) {
18460d0cffdcSClaudiu Manoil 				/* put back a new fcb for vlan/tstamp TOE */
18470d0cffdcSClaudiu Manoil 				fcb = gfar_add_fcb(skb);
18480d0cffdcSClaudiu Manoil 			} else {
18490d0cffdcSClaudiu Manoil 				/* Tx TOE not used */
185002d88fb4SClaudiu Manoil 				lstatus &= ~(BD_LFLAG(TXBD_TOE));
185102d88fb4SClaudiu Manoil 				fcb = NULL;
1852ec21e2ecSJeff Kirsher 			}
1853ec21e2ecSJeff Kirsher 		}
1854ec21e2ecSJeff Kirsher 	}
1855ec21e2ecSJeff Kirsher 
18560d0cffdcSClaudiu Manoil 	if (do_vlan)
1857ec21e2ecSJeff Kirsher 		gfar_tx_vlan(skb, fcb);
1858ec21e2ecSJeff Kirsher 
18590a4b5a24SKevin Hao 	bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
18600a4b5a24SKevin Hao 				 DMA_TO_DEVICE);
18610a4b5a24SKevin Hao 	if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
18620a4b5a24SKevin Hao 		goto dma_map_err;
18630a4b5a24SKevin Hao 
1864a7312d58SClaudiu Manoil 	txbdp_start->bufPtr = cpu_to_be32(bufaddr);
1865ec21e2ecSJeff Kirsher 
1866e19d0839SClaudiu Manoil 	/* Time stamp insertion requires one additional TxBD */
1867e19d0839SClaudiu Manoil 	if (unlikely(do_tstamp))
1868e19d0839SClaudiu Manoil 		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
1869e19d0839SClaudiu Manoil 						 tx_queue->tx_ring_size);
1870e19d0839SClaudiu Manoil 
187148963b44SClaudiu Manoil 	if (likely(!nr_frags)) {
18729c8b0778SYangbo Lu 		if (likely(!do_tstamp))
1873e19d0839SClaudiu Manoil 			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1874e19d0839SClaudiu Manoil 	} else {
1875e19d0839SClaudiu Manoil 		u32 lstatus_start = lstatus;
1876e19d0839SClaudiu Manoil 
1877e19d0839SClaudiu Manoil 		/* Place the fragment addresses and lengths into the TxBDs */
187842f397adSClaudiu Manoil 		frag = &skb_shinfo(skb)->frags[0];
187942f397adSClaudiu Manoil 		for (i = 0; i < nr_frags; i++, frag++) {
188042f397adSClaudiu Manoil 			unsigned int size;
188142f397adSClaudiu Manoil 
1882e19d0839SClaudiu Manoil 			/* Point at the next BD, wrapping as needed */
1883e19d0839SClaudiu Manoil 			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1884e19d0839SClaudiu Manoil 
188542f397adSClaudiu Manoil 			size = skb_frag_size(frag);
1886e19d0839SClaudiu Manoil 
188742f397adSClaudiu Manoil 			lstatus = be32_to_cpu(txbdp->lstatus) | size |
1888e19d0839SClaudiu Manoil 				  BD_LFLAG(TXBD_READY);
1889e19d0839SClaudiu Manoil 
1890e19d0839SClaudiu Manoil 			/* Handle the last BD specially */
1891e19d0839SClaudiu Manoil 			if (i == nr_frags - 1)
1892e19d0839SClaudiu Manoil 				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1893e19d0839SClaudiu Manoil 
189442f397adSClaudiu Manoil 			bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
189542f397adSClaudiu Manoil 						   size, DMA_TO_DEVICE);
1896e19d0839SClaudiu Manoil 			if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1897e19d0839SClaudiu Manoil 				goto dma_map_err;
1898e19d0839SClaudiu Manoil 
1899e19d0839SClaudiu Manoil 			/* set the TxBD length and buffer pointer */
1900e19d0839SClaudiu Manoil 			txbdp->bufPtr = cpu_to_be32(bufaddr);
1901e19d0839SClaudiu Manoil 			txbdp->lstatus = cpu_to_be32(lstatus);
1902e19d0839SClaudiu Manoil 		}
1903e19d0839SClaudiu Manoil 
1904e19d0839SClaudiu Manoil 		lstatus = lstatus_start;
1905e19d0839SClaudiu Manoil 	}
1906e19d0839SClaudiu Manoil 
19070977f817SJan Ceuleers 	/* If time stamping is requested one additional TxBD must be set up. The
1908ec21e2ecSJeff Kirsher 	 * first TxBD points to the FCB and must have a data length of
1909ec21e2ecSJeff Kirsher 	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
1910ec21e2ecSJeff Kirsher 	 * the full frame length.
1911ec21e2ecSJeff Kirsher 	 */
1912ec21e2ecSJeff Kirsher 	if (unlikely(do_tstamp)) {
1913a7312d58SClaudiu Manoil 		u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
1914a7312d58SClaudiu Manoil 
1915a7312d58SClaudiu Manoil 		bufaddr = be32_to_cpu(txbdp_start->bufPtr);
1916a7312d58SClaudiu Manoil 		bufaddr += fcb_len;
191748963b44SClaudiu Manoil 
1918a7312d58SClaudiu Manoil 		lstatus_ts |= BD_LFLAG(TXBD_READY) |
19190d0cffdcSClaudiu Manoil 			      (skb_headlen(skb) - fcb_len);
192048963b44SClaudiu Manoil 		if (!nr_frags)
192148963b44SClaudiu Manoil 			lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1922a7312d58SClaudiu Manoil 
1923a7312d58SClaudiu Manoil 		txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
1924a7312d58SClaudiu Manoil 		txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
1925ec21e2ecSJeff Kirsher 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
1926e19d0839SClaudiu Manoil 
1927e19d0839SClaudiu Manoil 		/* Setup tx hardware time stamping */
1928e19d0839SClaudiu Manoil 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1929e19d0839SClaudiu Manoil 		fcb->ptp = 1;
1930ec21e2ecSJeff Kirsher 	} else {
1931ec21e2ecSJeff Kirsher 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1932ec21e2ecSJeff Kirsher 	}
1933ec21e2ecSJeff Kirsher 
193450ad076bSClaudiu Manoil 	netdev_tx_sent_queue(txq, bytes_sent);
1935d8a0f1b0SPaul Gortmaker 
1936d55398baSClaudiu Manoil 	gfar_wmb();
1937ec21e2ecSJeff Kirsher 
1938a7312d58SClaudiu Manoil 	txbdp_start->lstatus = cpu_to_be32(lstatus);
1939ec21e2ecSJeff Kirsher 
1940d55398baSClaudiu Manoil 	gfar_wmb(); /* force lstatus write before tx_skbuff */
1941ec21e2ecSJeff Kirsher 
1942ec21e2ecSJeff Kirsher 	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1943ec21e2ecSJeff Kirsher 
1944ec21e2ecSJeff Kirsher 	/* Update the current skb pointer to the next entry we will use
19450977f817SJan Ceuleers 	 * (wrapping if necessary)
19460977f817SJan Ceuleers 	 */
1947ec21e2ecSJeff Kirsher 	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1948ec21e2ecSJeff Kirsher 			      TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1949ec21e2ecSJeff Kirsher 
1950ec21e2ecSJeff Kirsher 	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1951ec21e2ecSJeff Kirsher 
1952bc602280SClaudiu Manoil 	/* We can work in parallel with gfar_clean_tx_ring(), except
1953bc602280SClaudiu Manoil 	 * when modifying num_txbdfree. Note that we didn't grab the lock
1954bc602280SClaudiu Manoil 	 * when we were reading the num_txbdfree and checking for available
1955bc602280SClaudiu Manoil 	 * space, that's because outside of this function it can only grow.
1956bc602280SClaudiu Manoil 	 */
1957bc602280SClaudiu Manoil 	spin_lock_bh(&tx_queue->txlock);
1958ec21e2ecSJeff Kirsher 	/* reduce TxBD free count */
1959ec21e2ecSJeff Kirsher 	tx_queue->num_txbdfree -= (nr_txbds);
1960bc602280SClaudiu Manoil 	spin_unlock_bh(&tx_queue->txlock);
1961ec21e2ecSJeff Kirsher 
1962ec21e2ecSJeff Kirsher 	/* If the next BD still needs to be cleaned up, then the bds
19630977f817SJan Ceuleers 	 * are full.  We need to tell the kernel to stop sending us stuff.
19640977f817SJan Ceuleers 	 */
1965ec21e2ecSJeff Kirsher 	if (!tx_queue->num_txbdfree) {
1966ec21e2ecSJeff Kirsher 		netif_tx_stop_queue(txq);
1967ec21e2ecSJeff Kirsher 
1968ec21e2ecSJeff Kirsher 		dev->stats.tx_fifo_errors++;
1969ec21e2ecSJeff Kirsher 	}
1970ec21e2ecSJeff Kirsher 
1971ec21e2ecSJeff Kirsher 	/* Tell the DMA to go go go */
1972ec21e2ecSJeff Kirsher 	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
1973ec21e2ecSJeff Kirsher 
1974ec21e2ecSJeff Kirsher 	return NETDEV_TX_OK;
19750a4b5a24SKevin Hao 
19760a4b5a24SKevin Hao dma_map_err:
19770a4b5a24SKevin Hao 	txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
19780a4b5a24SKevin Hao 	if (do_tstamp)
19790a4b5a24SKevin Hao 		txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
19800a4b5a24SKevin Hao 	for (i = 0; i < nr_frags; i++) {
1981a7312d58SClaudiu Manoil 		lstatus = be32_to_cpu(txbdp->lstatus);
19820a4b5a24SKevin Hao 		if (!(lstatus & BD_LFLAG(TXBD_READY)))
19830a4b5a24SKevin Hao 			break;
19840a4b5a24SKevin Hao 
1985a7312d58SClaudiu Manoil 		lstatus &= ~BD_LFLAG(TXBD_READY);
1986a7312d58SClaudiu Manoil 		txbdp->lstatus = cpu_to_be32(lstatus);
1987a7312d58SClaudiu Manoil 		bufaddr = be32_to_cpu(txbdp->bufPtr);
1988a7312d58SClaudiu Manoil 		dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
19890a4b5a24SKevin Hao 			       DMA_TO_DEVICE);
19900a4b5a24SKevin Hao 		txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
19910a4b5a24SKevin Hao 	}
19920a4b5a24SKevin Hao 	gfar_wmb();
19930a4b5a24SKevin Hao 	dev_kfree_skb_any(skb);
19940a4b5a24SKevin Hao 	return NETDEV_TX_OK;
1995ec21e2ecSJeff Kirsher }
1996ec21e2ecSJeff Kirsher 
1997ec21e2ecSJeff Kirsher /* Changes the mac address if the controller is not running. */
1998ec21e2ecSJeff Kirsher static int gfar_set_mac_address(struct net_device *dev)
1999ec21e2ecSJeff Kirsher {
2000ec21e2ecSJeff Kirsher 	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2001ec21e2ecSJeff Kirsher 
2002ec21e2ecSJeff Kirsher 	return 0;
2003ec21e2ecSJeff Kirsher }
2004ec21e2ecSJeff Kirsher 
2005ec21e2ecSJeff Kirsher static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2006ec21e2ecSJeff Kirsher {
2007ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
2008ec21e2ecSJeff Kirsher 
20090851133bSClaudiu Manoil 	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
20100851133bSClaudiu Manoil 		cpu_relax();
20110851133bSClaudiu Manoil 
201288302648SClaudiu Manoil 	if (dev->flags & IFF_UP)
2013ec21e2ecSJeff Kirsher 		stop_gfar(dev);
2014ec21e2ecSJeff Kirsher 
2015ec21e2ecSJeff Kirsher 	dev->mtu = new_mtu;
2016ec21e2ecSJeff Kirsher 
201788302648SClaudiu Manoil 	if (dev->flags & IFF_UP)
2018ec21e2ecSJeff Kirsher 		startup_gfar(dev);
2019ec21e2ecSJeff Kirsher 
20200851133bSClaudiu Manoil 	clear_bit_unlock(GFAR_RESETTING, &priv->state);
20210851133bSClaudiu Manoil 
2022ec21e2ecSJeff Kirsher 	return 0;
2023ec21e2ecSJeff Kirsher }
2024ec21e2ecSJeff Kirsher 
20259f5c44cfSYueHaibing static void reset_gfar(struct net_device *ndev)
20260851133bSClaudiu Manoil {
20270851133bSClaudiu Manoil 	struct gfar_private *priv = netdev_priv(ndev);
20280851133bSClaudiu Manoil 
20290851133bSClaudiu Manoil 	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
20300851133bSClaudiu Manoil 		cpu_relax();
20310851133bSClaudiu Manoil 
20320851133bSClaudiu Manoil 	stop_gfar(ndev);
20330851133bSClaudiu Manoil 	startup_gfar(ndev);
20340851133bSClaudiu Manoil 
20350851133bSClaudiu Manoil 	clear_bit_unlock(GFAR_RESETTING, &priv->state);
20360851133bSClaudiu Manoil }
20370851133bSClaudiu Manoil 
2038ec21e2ecSJeff Kirsher /* gfar_reset_task gets scheduled when a packet has not been
2039ec21e2ecSJeff Kirsher  * transmitted after a set amount of time.
2040ec21e2ecSJeff Kirsher  * For now, assume that clearing out all the structures, and
2041ec21e2ecSJeff Kirsher  * starting over will fix the problem.
2042ec21e2ecSJeff Kirsher  */
2043ec21e2ecSJeff Kirsher static void gfar_reset_task(struct work_struct *work)
2044ec21e2ecSJeff Kirsher {
2045ec21e2ecSJeff Kirsher 	struct gfar_private *priv = container_of(work, struct gfar_private,
2046ec21e2ecSJeff Kirsher 						 reset_task);
20470851133bSClaudiu Manoil 	reset_gfar(priv->ndev);
2048ec21e2ecSJeff Kirsher }
2049ec21e2ecSJeff Kirsher 
20500290bd29SMichael S. Tsirkin static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
2051ec21e2ecSJeff Kirsher {
2052ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
2053ec21e2ecSJeff Kirsher 
2054ec21e2ecSJeff Kirsher 	dev->stats.tx_errors++;
2055ec21e2ecSJeff Kirsher 	schedule_work(&priv->reset_task);
2056ec21e2ecSJeff Kirsher }
2057ec21e2ecSJeff Kirsher 
20587d993c5fSArseny Solokha static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
20597d993c5fSArseny Solokha {
20607d993c5fSArseny Solokha 	struct hwtstamp_config config;
20617d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(netdev);
20627d993c5fSArseny Solokha 
20637d993c5fSArseny Solokha 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
20647d993c5fSArseny Solokha 		return -EFAULT;
20657d993c5fSArseny Solokha 
20667d993c5fSArseny Solokha 	/* reserved for future extensions */
20677d993c5fSArseny Solokha 	if (config.flags)
20687d993c5fSArseny Solokha 		return -EINVAL;
20697d993c5fSArseny Solokha 
20707d993c5fSArseny Solokha 	switch (config.tx_type) {
20717d993c5fSArseny Solokha 	case HWTSTAMP_TX_OFF:
20727d993c5fSArseny Solokha 		priv->hwts_tx_en = 0;
20737d993c5fSArseny Solokha 		break;
20747d993c5fSArseny Solokha 	case HWTSTAMP_TX_ON:
20757d993c5fSArseny Solokha 		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
20767d993c5fSArseny Solokha 			return -ERANGE;
20777d993c5fSArseny Solokha 		priv->hwts_tx_en = 1;
20787d993c5fSArseny Solokha 		break;
20797d993c5fSArseny Solokha 	default:
20807d993c5fSArseny Solokha 		return -ERANGE;
20817d993c5fSArseny Solokha 	}
20827d993c5fSArseny Solokha 
20837d993c5fSArseny Solokha 	switch (config.rx_filter) {
20847d993c5fSArseny Solokha 	case HWTSTAMP_FILTER_NONE:
20857d993c5fSArseny Solokha 		if (priv->hwts_rx_en) {
20867d993c5fSArseny Solokha 			priv->hwts_rx_en = 0;
20877d993c5fSArseny Solokha 			reset_gfar(netdev);
20887d993c5fSArseny Solokha 		}
20897d993c5fSArseny Solokha 		break;
20907d993c5fSArseny Solokha 	default:
20917d993c5fSArseny Solokha 		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
20927d993c5fSArseny Solokha 			return -ERANGE;
20937d993c5fSArseny Solokha 		if (!priv->hwts_rx_en) {
20947d993c5fSArseny Solokha 			priv->hwts_rx_en = 1;
20957d993c5fSArseny Solokha 			reset_gfar(netdev);
20967d993c5fSArseny Solokha 		}
20977d993c5fSArseny Solokha 		config.rx_filter = HWTSTAMP_FILTER_ALL;
20987d993c5fSArseny Solokha 		break;
20997d993c5fSArseny Solokha 	}
21007d993c5fSArseny Solokha 
21017d993c5fSArseny Solokha 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
21027d993c5fSArseny Solokha 		-EFAULT : 0;
21037d993c5fSArseny Solokha }
21047d993c5fSArseny Solokha 
21057d993c5fSArseny Solokha static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
21067d993c5fSArseny Solokha {
21077d993c5fSArseny Solokha 	struct hwtstamp_config config;
21087d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(netdev);
21097d993c5fSArseny Solokha 
21107d993c5fSArseny Solokha 	config.flags = 0;
21117d993c5fSArseny Solokha 	config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
21127d993c5fSArseny Solokha 	config.rx_filter = (priv->hwts_rx_en ?
21137d993c5fSArseny Solokha 			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
21147d993c5fSArseny Solokha 
21157d993c5fSArseny Solokha 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
21167d993c5fSArseny Solokha 		-EFAULT : 0;
21177d993c5fSArseny Solokha }
21187d993c5fSArseny Solokha 
21197d993c5fSArseny Solokha static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
21207d993c5fSArseny Solokha {
21217d993c5fSArseny Solokha 	struct phy_device *phydev = dev->phydev;
21227d993c5fSArseny Solokha 
21237d993c5fSArseny Solokha 	if (!netif_running(dev))
21247d993c5fSArseny Solokha 		return -EINVAL;
21257d993c5fSArseny Solokha 
21267d993c5fSArseny Solokha 	if (cmd == SIOCSHWTSTAMP)
21277d993c5fSArseny Solokha 		return gfar_hwtstamp_set(dev, rq);
21287d993c5fSArseny Solokha 	if (cmd == SIOCGHWTSTAMP)
21297d993c5fSArseny Solokha 		return gfar_hwtstamp_get(dev, rq);
21307d993c5fSArseny Solokha 
21317d993c5fSArseny Solokha 	if (!phydev)
21327d993c5fSArseny Solokha 		return -ENODEV;
21337d993c5fSArseny Solokha 
21347d993c5fSArseny Solokha 	return phy_mii_ioctl(phydev, rq, cmd);
21357d993c5fSArseny Solokha }
21367d993c5fSArseny Solokha 
2137ec21e2ecSJeff Kirsher /* Interrupt Handler for Transmit complete */
2138c233cf40SClaudiu Manoil static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2139ec21e2ecSJeff Kirsher {
2140ec21e2ecSJeff Kirsher 	struct net_device *dev = tx_queue->dev;
2141d8a0f1b0SPaul Gortmaker 	struct netdev_queue *txq;
2142ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
2143ec21e2ecSJeff Kirsher 	struct txbd8 *bdp, *next = NULL;
2144ec21e2ecSJeff Kirsher 	struct txbd8 *lbdp = NULL;
2145ec21e2ecSJeff Kirsher 	struct txbd8 *base = tx_queue->tx_bd_base;
2146ec21e2ecSJeff Kirsher 	struct sk_buff *skb;
2147ec21e2ecSJeff Kirsher 	int skb_dirtytx;
2148ec21e2ecSJeff Kirsher 	int tx_ring_size = tx_queue->tx_ring_size;
2149ec21e2ecSJeff Kirsher 	int frags = 0, nr_txbds = 0;
2150ec21e2ecSJeff Kirsher 	int i;
2151ec21e2ecSJeff Kirsher 	int howmany = 0;
2152d8a0f1b0SPaul Gortmaker 	int tqi = tx_queue->qindex;
2153d8a0f1b0SPaul Gortmaker 	unsigned int bytes_sent = 0;
2154ec21e2ecSJeff Kirsher 	u32 lstatus;
2155ec21e2ecSJeff Kirsher 	size_t buflen;
2156ec21e2ecSJeff Kirsher 
2157d8a0f1b0SPaul Gortmaker 	txq = netdev_get_tx_queue(dev, tqi);
2158ec21e2ecSJeff Kirsher 	bdp = tx_queue->dirty_tx;
2159ec21e2ecSJeff Kirsher 	skb_dirtytx = tx_queue->skb_dirtytx;
2160ec21e2ecSJeff Kirsher 
2161ec21e2ecSJeff Kirsher 	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2162c26a2c2dSVladimir Oltean 		bool do_tstamp;
2163c26a2c2dSVladimir Oltean 
2164c26a2c2dSVladimir Oltean 		do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2165c26a2c2dSVladimir Oltean 			    priv->hwts_tx_en;
2166ec21e2ecSJeff Kirsher 
2167ec21e2ecSJeff Kirsher 		frags = skb_shinfo(skb)->nr_frags;
2168ec21e2ecSJeff Kirsher 
21690977f817SJan Ceuleers 		/* When time stamping, one additional TxBD must be freed.
2170ec21e2ecSJeff Kirsher 		 * Also, we need to dma_unmap_single() the TxPAL.
2171ec21e2ecSJeff Kirsher 		 */
2172c26a2c2dSVladimir Oltean 		if (unlikely(do_tstamp))
2173ec21e2ecSJeff Kirsher 			nr_txbds = frags + 2;
2174ec21e2ecSJeff Kirsher 		else
2175ec21e2ecSJeff Kirsher 			nr_txbds = frags + 1;
2176ec21e2ecSJeff Kirsher 
2177ec21e2ecSJeff Kirsher 		lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2178ec21e2ecSJeff Kirsher 
2179a7312d58SClaudiu Manoil 		lstatus = be32_to_cpu(lbdp->lstatus);
2180ec21e2ecSJeff Kirsher 
2181ec21e2ecSJeff Kirsher 		/* Only clean completed frames */
2182ec21e2ecSJeff Kirsher 		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2183ec21e2ecSJeff Kirsher 		    (lstatus & BD_LENGTH_MASK))
2184ec21e2ecSJeff Kirsher 			break;
2185ec21e2ecSJeff Kirsher 
2186c26a2c2dSVladimir Oltean 		if (unlikely(do_tstamp)) {
2187ec21e2ecSJeff Kirsher 			next = next_txbd(bdp, base, tx_ring_size);
2188a7312d58SClaudiu Manoil 			buflen = be16_to_cpu(next->length) +
2189a7312d58SClaudiu Manoil 				 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2190ec21e2ecSJeff Kirsher 		} else
2191a7312d58SClaudiu Manoil 			buflen = be16_to_cpu(bdp->length);
2192ec21e2ecSJeff Kirsher 
2193a7312d58SClaudiu Manoil 		dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2194ec21e2ecSJeff Kirsher 				 buflen, DMA_TO_DEVICE);
2195ec21e2ecSJeff Kirsher 
2196c26a2c2dSVladimir Oltean 		if (unlikely(do_tstamp)) {
2197ec21e2ecSJeff Kirsher 			struct skb_shared_hwtstamps shhwtstamps;
2198b4b67f26SScott Wood 			u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
2199b4b67f26SScott Wood 					  ~0x7UL);
2200bc4598bcSJan Ceuleers 
2201ec21e2ecSJeff Kirsher 			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2202f54af12fSYangbo Lu 			shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
22039c4886e5SManfred Rudigier 			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2204ec21e2ecSJeff Kirsher 			skb_tstamp_tx(skb, &shhwtstamps);
2205a7312d58SClaudiu Manoil 			gfar_clear_txbd_status(bdp);
2206ec21e2ecSJeff Kirsher 			bdp = next;
2207ec21e2ecSJeff Kirsher 		}
2208ec21e2ecSJeff Kirsher 
2209a7312d58SClaudiu Manoil 		gfar_clear_txbd_status(bdp);
2210ec21e2ecSJeff Kirsher 		bdp = next_txbd(bdp, base, tx_ring_size);
2211ec21e2ecSJeff Kirsher 
2212ec21e2ecSJeff Kirsher 		for (i = 0; i < frags; i++) {
2213a7312d58SClaudiu Manoil 			dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2214a7312d58SClaudiu Manoil 				       be16_to_cpu(bdp->length),
2215a7312d58SClaudiu Manoil 				       DMA_TO_DEVICE);
2216a7312d58SClaudiu Manoil 			gfar_clear_txbd_status(bdp);
2217ec21e2ecSJeff Kirsher 			bdp = next_txbd(bdp, base, tx_ring_size);
2218ec21e2ecSJeff Kirsher 		}
2219ec21e2ecSJeff Kirsher 
222050ad076bSClaudiu Manoil 		bytes_sent += GFAR_CB(skb)->bytes_sent;
2221d8a0f1b0SPaul Gortmaker 
2222ec21e2ecSJeff Kirsher 		dev_kfree_skb_any(skb);
2223ec21e2ecSJeff Kirsher 
2224ec21e2ecSJeff Kirsher 		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2225ec21e2ecSJeff Kirsher 
2226ec21e2ecSJeff Kirsher 		skb_dirtytx = (skb_dirtytx + 1) &
2227ec21e2ecSJeff Kirsher 			      TX_RING_MOD_MASK(tx_ring_size);
2228ec21e2ecSJeff Kirsher 
2229ec21e2ecSJeff Kirsher 		howmany++;
2230bc602280SClaudiu Manoil 		spin_lock(&tx_queue->txlock);
2231ec21e2ecSJeff Kirsher 		tx_queue->num_txbdfree += nr_txbds;
2232bc602280SClaudiu Manoil 		spin_unlock(&tx_queue->txlock);
2233ec21e2ecSJeff Kirsher 	}
2234ec21e2ecSJeff Kirsher 
2235ec21e2ecSJeff Kirsher 	/* If we freed a buffer, we can restart transmission, if necessary */
22360851133bSClaudiu Manoil 	if (tx_queue->num_txbdfree &&
22370851133bSClaudiu Manoil 	    netif_tx_queue_stopped(txq) &&
22380851133bSClaudiu Manoil 	    !(test_bit(GFAR_DOWN, &priv->state)))
22390851133bSClaudiu Manoil 		netif_wake_subqueue(priv->ndev, tqi);
2240ec21e2ecSJeff Kirsher 
2241ec21e2ecSJeff Kirsher 	/* Update dirty indicators */
2242ec21e2ecSJeff Kirsher 	tx_queue->skb_dirtytx = skb_dirtytx;
2243ec21e2ecSJeff Kirsher 	tx_queue->dirty_tx = bdp;
2244ec21e2ecSJeff Kirsher 
2245d8a0f1b0SPaul Gortmaker 	netdev_tx_completed_queue(txq, howmany, bytes_sent);
2246ec21e2ecSJeff Kirsher }
2247ec21e2ecSJeff Kirsher 
2248f23223f1SClaudiu Manoil static void count_errors(u32 lstatus, struct net_device *ndev)
2249ec21e2ecSJeff Kirsher {
2250f23223f1SClaudiu Manoil 	struct gfar_private *priv = netdev_priv(ndev);
2251f23223f1SClaudiu Manoil 	struct net_device_stats *stats = &ndev->stats;
2252ec21e2ecSJeff Kirsher 	struct gfar_extra_stats *estats = &priv->extra_stats;
2253ec21e2ecSJeff Kirsher 
22540977f817SJan Ceuleers 	/* If the packet was truncated, none of the other errors matter */
2255f966082eSClaudiu Manoil 	if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
2256ec21e2ecSJeff Kirsher 		stats->rx_length_errors++;
2257ec21e2ecSJeff Kirsher 
2258212079dfSPaul Gortmaker 		atomic64_inc(&estats->rx_trunc);
2259ec21e2ecSJeff Kirsher 
2260ec21e2ecSJeff Kirsher 		return;
2261ec21e2ecSJeff Kirsher 	}
2262ec21e2ecSJeff Kirsher 	/* Count the errors, if there were any */
2263f966082eSClaudiu Manoil 	if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
2264ec21e2ecSJeff Kirsher 		stats->rx_length_errors++;
2265ec21e2ecSJeff Kirsher 
2266f966082eSClaudiu Manoil 		if (lstatus & BD_LFLAG(RXBD_LARGE))
2267212079dfSPaul Gortmaker 			atomic64_inc(&estats->rx_large);
2268ec21e2ecSJeff Kirsher 		else
2269212079dfSPaul Gortmaker 			atomic64_inc(&estats->rx_short);
2270ec21e2ecSJeff Kirsher 	}
2271f966082eSClaudiu Manoil 	if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
2272ec21e2ecSJeff Kirsher 		stats->rx_frame_errors++;
2273212079dfSPaul Gortmaker 		atomic64_inc(&estats->rx_nonoctet);
2274ec21e2ecSJeff Kirsher 	}
2275f966082eSClaudiu Manoil 	if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
2276212079dfSPaul Gortmaker 		atomic64_inc(&estats->rx_crcerr);
2277ec21e2ecSJeff Kirsher 		stats->rx_crc_errors++;
2278ec21e2ecSJeff Kirsher 	}
2279f966082eSClaudiu Manoil 	if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
2280212079dfSPaul Gortmaker 		atomic64_inc(&estats->rx_overrun);
2281f966082eSClaudiu Manoil 		stats->rx_over_errors++;
2282ec21e2ecSJeff Kirsher 	}
2283ec21e2ecSJeff Kirsher }
2284ec21e2ecSJeff Kirsher 
22857ad38784SArseny Solokha static irqreturn_t gfar_receive(int irq, void *grp_id)
2286ec21e2ecSJeff Kirsher {
2287aeb12c5eSClaudiu Manoil 	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2288aeb12c5eSClaudiu Manoil 	unsigned long flags;
22893e905b80SClaudiu Manoil 	u32 imask, ievent;
22903e905b80SClaudiu Manoil 
22913e905b80SClaudiu Manoil 	ievent = gfar_read(&grp->regs->ievent);
22923e905b80SClaudiu Manoil 
22933e905b80SClaudiu Manoil 	if (unlikely(ievent & IEVENT_FGPI)) {
22943e905b80SClaudiu Manoil 		gfar_write(&grp->regs->ievent, IEVENT_FGPI);
22953e905b80SClaudiu Manoil 		return IRQ_HANDLED;
22963e905b80SClaudiu Manoil 	}
2297aeb12c5eSClaudiu Manoil 
2298aeb12c5eSClaudiu Manoil 	if (likely(napi_schedule_prep(&grp->napi_rx))) {
2299aeb12c5eSClaudiu Manoil 		spin_lock_irqsave(&grp->grplock, flags);
2300aeb12c5eSClaudiu Manoil 		imask = gfar_read(&grp->regs->imask);
2301aeb12c5eSClaudiu Manoil 		imask &= IMASK_RX_DISABLED;
2302aeb12c5eSClaudiu Manoil 		gfar_write(&grp->regs->imask, imask);
2303aeb12c5eSClaudiu Manoil 		spin_unlock_irqrestore(&grp->grplock, flags);
2304aeb12c5eSClaudiu Manoil 		__napi_schedule(&grp->napi_rx);
2305aeb12c5eSClaudiu Manoil 	} else {
2306aeb12c5eSClaudiu Manoil 		/* Clear IEVENT, so interrupts aren't called again
2307aeb12c5eSClaudiu Manoil 		 * because of the packets that have already arrived.
2308aeb12c5eSClaudiu Manoil 		 */
2309aeb12c5eSClaudiu Manoil 		gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2310aeb12c5eSClaudiu Manoil 	}
2311aeb12c5eSClaudiu Manoil 
2312aeb12c5eSClaudiu Manoil 	return IRQ_HANDLED;
2313aeb12c5eSClaudiu Manoil }
2314aeb12c5eSClaudiu Manoil 
2315aeb12c5eSClaudiu Manoil /* Interrupt Handler for Transmit complete */
2316aeb12c5eSClaudiu Manoil static irqreturn_t gfar_transmit(int irq, void *grp_id)
2317aeb12c5eSClaudiu Manoil {
2318aeb12c5eSClaudiu Manoil 	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2319aeb12c5eSClaudiu Manoil 	unsigned long flags;
2320aeb12c5eSClaudiu Manoil 	u32 imask;
2321aeb12c5eSClaudiu Manoil 
2322aeb12c5eSClaudiu Manoil 	if (likely(napi_schedule_prep(&grp->napi_tx))) {
2323aeb12c5eSClaudiu Manoil 		spin_lock_irqsave(&grp->grplock, flags);
2324aeb12c5eSClaudiu Manoil 		imask = gfar_read(&grp->regs->imask);
2325aeb12c5eSClaudiu Manoil 		imask &= IMASK_TX_DISABLED;
2326aeb12c5eSClaudiu Manoil 		gfar_write(&grp->regs->imask, imask);
2327aeb12c5eSClaudiu Manoil 		spin_unlock_irqrestore(&grp->grplock, flags);
2328aeb12c5eSClaudiu Manoil 		__napi_schedule(&grp->napi_tx);
2329aeb12c5eSClaudiu Manoil 	} else {
2330aeb12c5eSClaudiu Manoil 		/* Clear IEVENT, so interrupts aren't called again
2331aeb12c5eSClaudiu Manoil 		 * because of the packets that have already arrived.
2332aeb12c5eSClaudiu Manoil 		 */
2333aeb12c5eSClaudiu Manoil 		gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2334aeb12c5eSClaudiu Manoil 	}
2335aeb12c5eSClaudiu Manoil 
2336ec21e2ecSJeff Kirsher 	return IRQ_HANDLED;
2337ec21e2ecSJeff Kirsher }
2338ec21e2ecSJeff Kirsher 
233975354148SClaudiu Manoil static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
234075354148SClaudiu Manoil 			     struct sk_buff *skb, bool first)
234175354148SClaudiu Manoil {
2342202a0a70SAndy Spencer 	int size = lstatus & BD_LENGTH_MASK;
234375354148SClaudiu Manoil 	struct page *page = rxb->page;
234475354148SClaudiu Manoil 
23456c389fc9SZefir Kurtisi 	if (likely(first)) {
234675354148SClaudiu Manoil 		skb_put(skb, size);
23476c389fc9SZefir Kurtisi 	} else {
23486c389fc9SZefir Kurtisi 		/* the last fragments' length contains the full frame length */
2349d903ec77SAndy Spencer 		if (lstatus & BD_LFLAG(RXBD_LAST))
23506c389fc9SZefir Kurtisi 			size -= skb->len;
23516c389fc9SZefir Kurtisi 
2352d8861babSMichael Braun 		WARN(size < 0, "gianfar: rx fragment size underflow");
2353d8861babSMichael Braun 		if (size < 0)
2354d8861babSMichael Braun 			return false;
2355d8861babSMichael Braun 
235675354148SClaudiu Manoil 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
235775354148SClaudiu Manoil 				rxb->page_offset + RXBUF_ALIGNMENT,
235875354148SClaudiu Manoil 				size, GFAR_RXB_TRUESIZE);
23596c389fc9SZefir Kurtisi 	}
236075354148SClaudiu Manoil 
236175354148SClaudiu Manoil 	/* try reuse page */
236269fed99bSEric Dumazet 	if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
236375354148SClaudiu Manoil 		return false;
236475354148SClaudiu Manoil 
236575354148SClaudiu Manoil 	/* change offset to the other half */
236675354148SClaudiu Manoil 	rxb->page_offset ^= GFAR_RXB_TRUESIZE;
236775354148SClaudiu Manoil 
2368fe896d18SJoonsoo Kim 	page_ref_inc(page);
236975354148SClaudiu Manoil 
237075354148SClaudiu Manoil 	return true;
237175354148SClaudiu Manoil }
237275354148SClaudiu Manoil 
237375354148SClaudiu Manoil static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
237475354148SClaudiu Manoil 			       struct gfar_rx_buff *old_rxb)
237575354148SClaudiu Manoil {
237675354148SClaudiu Manoil 	struct gfar_rx_buff *new_rxb;
237775354148SClaudiu Manoil 	u16 nta = rxq->next_to_alloc;
237875354148SClaudiu Manoil 
237975354148SClaudiu Manoil 	new_rxb = &rxq->rx_buff[nta];
238075354148SClaudiu Manoil 
238175354148SClaudiu Manoil 	/* find next buf that can reuse a page */
238275354148SClaudiu Manoil 	nta++;
238375354148SClaudiu Manoil 	rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
238475354148SClaudiu Manoil 
238575354148SClaudiu Manoil 	/* copy page reference */
238675354148SClaudiu Manoil 	*new_rxb = *old_rxb;
238775354148SClaudiu Manoil 
238875354148SClaudiu Manoil 	/* sync for use by the device */
238975354148SClaudiu Manoil 	dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
239075354148SClaudiu Manoil 					 old_rxb->page_offset,
239175354148SClaudiu Manoil 					 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
239275354148SClaudiu Manoil }
239375354148SClaudiu Manoil 
239475354148SClaudiu Manoil static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
239575354148SClaudiu Manoil 					    u32 lstatus, struct sk_buff *skb)
239675354148SClaudiu Manoil {
239775354148SClaudiu Manoil 	struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
239875354148SClaudiu Manoil 	struct page *page = rxb->page;
239975354148SClaudiu Manoil 	bool first = false;
240075354148SClaudiu Manoil 
240175354148SClaudiu Manoil 	if (likely(!skb)) {
240275354148SClaudiu Manoil 		void *buff_addr = page_address(page) + rxb->page_offset;
240375354148SClaudiu Manoil 
240475354148SClaudiu Manoil 		skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
240575354148SClaudiu Manoil 		if (unlikely(!skb)) {
240675354148SClaudiu Manoil 			gfar_rx_alloc_err(rx_queue);
240775354148SClaudiu Manoil 			return NULL;
240875354148SClaudiu Manoil 		}
240975354148SClaudiu Manoil 		skb_reserve(skb, RXBUF_ALIGNMENT);
241075354148SClaudiu Manoil 		first = true;
241175354148SClaudiu Manoil 	}
241275354148SClaudiu Manoil 
241375354148SClaudiu Manoil 	dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
241475354148SClaudiu Manoil 				      GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
241575354148SClaudiu Manoil 
241675354148SClaudiu Manoil 	if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
241775354148SClaudiu Manoil 		/* reuse the free half of the page */
241875354148SClaudiu Manoil 		gfar_reuse_rx_page(rx_queue, rxb);
241975354148SClaudiu Manoil 	} else {
242075354148SClaudiu Manoil 		/* page cannot be reused, unmap it */
242175354148SClaudiu Manoil 		dma_unmap_page(rx_queue->dev, rxb->dma,
242275354148SClaudiu Manoil 			       PAGE_SIZE, DMA_FROM_DEVICE);
242375354148SClaudiu Manoil 	}
242475354148SClaudiu Manoil 
242575354148SClaudiu Manoil 	/* clear rxb content */
242675354148SClaudiu Manoil 	rxb->page = NULL;
242775354148SClaudiu Manoil 
242875354148SClaudiu Manoil 	return skb;
242975354148SClaudiu Manoil }
243075354148SClaudiu Manoil 
2431ec21e2ecSJeff Kirsher static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2432ec21e2ecSJeff Kirsher {
2433ec21e2ecSJeff Kirsher 	/* If valid headers were found, and valid sums
2434ec21e2ecSJeff Kirsher 	 * were verified, then we tell the kernel that no
24350977f817SJan Ceuleers 	 * checksumming is necessary.  Otherwise, it is [FIXME]
24360977f817SJan Ceuleers 	 */
243726eb9374SClaudiu Manoil 	if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
243826eb9374SClaudiu Manoil 	    (RXFCB_CIP | RXFCB_CTU))
2439ec21e2ecSJeff Kirsher 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2440ec21e2ecSJeff Kirsher 	else
2441ec21e2ecSJeff Kirsher 		skb_checksum_none_assert(skb);
2442ec21e2ecSJeff Kirsher }
2443ec21e2ecSJeff Kirsher 
24440977f817SJan Ceuleers /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2445f23223f1SClaudiu Manoil static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
2446ec21e2ecSJeff Kirsher {
2447f23223f1SClaudiu Manoil 	struct gfar_private *priv = netdev_priv(ndev);
2448ec21e2ecSJeff Kirsher 	struct rxfcb *fcb = NULL;
2449ec21e2ecSJeff Kirsher 
2450ec21e2ecSJeff Kirsher 	/* fcb is at the beginning if exists */
2451ec21e2ecSJeff Kirsher 	fcb = (struct rxfcb *)skb->data;
2452ec21e2ecSJeff Kirsher 
24530977f817SJan Ceuleers 	/* Remove the FCB from the skb
24540977f817SJan Ceuleers 	 * Remove the padded bytes, if there are any
24550977f817SJan Ceuleers 	 */
2456f23223f1SClaudiu Manoil 	if (priv->uses_rxfcb)
245776f31e8bSClaudiu Manoil 		skb_pull(skb, GMAC_FCB_LEN);
2458ec21e2ecSJeff Kirsher 
2459ec21e2ecSJeff Kirsher 	/* Get receive timestamp from the skb */
2460ec21e2ecSJeff Kirsher 	if (priv->hwts_rx_en) {
2461ec21e2ecSJeff Kirsher 		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2462ec21e2ecSJeff Kirsher 		u64 *ns = (u64 *) skb->data;
2463bc4598bcSJan Ceuleers 
2464ec21e2ecSJeff Kirsher 		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2465f54af12fSYangbo Lu 		shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2466ec21e2ecSJeff Kirsher 	}
2467ec21e2ecSJeff Kirsher 
2468ec21e2ecSJeff Kirsher 	if (priv->padding)
2469ec21e2ecSJeff Kirsher 		skb_pull(skb, priv->padding);
2470ec21e2ecSJeff Kirsher 
2471d903ec77SAndy Spencer 	/* Trim off the FCS */
2472d903ec77SAndy Spencer 	pskb_trim(skb, skb->len - ETH_FCS_LEN);
2473d903ec77SAndy Spencer 
2474f23223f1SClaudiu Manoil 	if (ndev->features & NETIF_F_RXCSUM)
2475ec21e2ecSJeff Kirsher 		gfar_rx_checksum(skb, fcb);
2476ec21e2ecSJeff Kirsher 
2477f646968fSPatrick McHardy 	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2478823dcd25SDavid S. Miller 	 * Even if vlan rx accel is disabled, on some chips
2479823dcd25SDavid S. Miller 	 * RXFCB_VLN is pseudo randomly set.
2480823dcd25SDavid S. Miller 	 */
2481f23223f1SClaudiu Manoil 	if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
248226eb9374SClaudiu Manoil 	    be16_to_cpu(fcb->flags) & RXFCB_VLN)
248326eb9374SClaudiu Manoil 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
248426eb9374SClaudiu Manoil 				       be16_to_cpu(fcb->vlctl));
2485ec21e2ecSJeff Kirsher }
2486ec21e2ecSJeff Kirsher 
2487ec21e2ecSJeff Kirsher /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2488ec21e2ecSJeff Kirsher  * until the budget/quota has been reached. Returns the number
2489ec21e2ecSJeff Kirsher  * of frames handled
2490ec21e2ecSJeff Kirsher  */
24917ad38784SArseny Solokha static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
24927ad38784SArseny Solokha 			      int rx_work_limit)
2493ec21e2ecSJeff Kirsher {
2494f23223f1SClaudiu Manoil 	struct net_device *ndev = rx_queue->ndev;
2495f23223f1SClaudiu Manoil 	struct gfar_private *priv = netdev_priv(ndev);
249675354148SClaudiu Manoil 	struct rxbd8 *bdp;
249775354148SClaudiu Manoil 	int i, howmany = 0;
249875354148SClaudiu Manoil 	struct sk_buff *skb = rx_queue->skb;
249975354148SClaudiu Manoil 	int cleaned_cnt = gfar_rxbd_unused(rx_queue);
250075354148SClaudiu Manoil 	unsigned int total_bytes = 0, total_pkts = 0;
2501ec21e2ecSJeff Kirsher 
2502ec21e2ecSJeff Kirsher 	/* Get the first full descriptor */
250376f31e8bSClaudiu Manoil 	i = rx_queue->next_to_clean;
2504ec21e2ecSJeff Kirsher 
250576f31e8bSClaudiu Manoil 	while (rx_work_limit--) {
2506f966082eSClaudiu Manoil 		u32 lstatus;
2507ec21e2ecSJeff Kirsher 
250876f31e8bSClaudiu Manoil 		if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
250976f31e8bSClaudiu Manoil 			gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
251076f31e8bSClaudiu Manoil 			cleaned_cnt = 0;
251176f31e8bSClaudiu Manoil 		}
2512bc4598bcSJan Ceuleers 
251376f31e8bSClaudiu Manoil 		bdp = &rx_queue->rx_bd_base[i];
2514f966082eSClaudiu Manoil 		lstatus = be32_to_cpu(bdp->lstatus);
2515f966082eSClaudiu Manoil 		if (lstatus & BD_LFLAG(RXBD_EMPTY))
251676f31e8bSClaudiu Manoil 			break;
251776f31e8bSClaudiu Manoil 
2518d8861babSMichael Braun 		/* lost RXBD_LAST descriptor due to overrun */
2519d8861babSMichael Braun 		if (skb &&
2520d8861babSMichael Braun 		    (lstatus & BD_LFLAG(RXBD_FIRST))) {
2521d8861babSMichael Braun 			/* discard faulty buffer */
2522d8861babSMichael Braun 			dev_kfree_skb(skb);
2523d8861babSMichael Braun 			skb = NULL;
2524d8861babSMichael Braun 			rx_queue->stats.rx_dropped++;
2525d8861babSMichael Braun 
2526d8861babSMichael Braun 			/* can continue normally */
2527d8861babSMichael Braun 		}
2528d8861babSMichael Braun 
252976f31e8bSClaudiu Manoil 		/* order rx buffer descriptor reads */
2530ec21e2ecSJeff Kirsher 		rmb();
2531ec21e2ecSJeff Kirsher 
253276f31e8bSClaudiu Manoil 		/* fetch next to clean buffer from the ring */
253375354148SClaudiu Manoil 		skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
253475354148SClaudiu Manoil 		if (unlikely(!skb))
253575354148SClaudiu Manoil 			break;
2536ec21e2ecSJeff Kirsher 
253775354148SClaudiu Manoil 		cleaned_cnt++;
253875354148SClaudiu Manoil 		howmany++;
2539ec21e2ecSJeff Kirsher 
254075354148SClaudiu Manoil 		if (unlikely(++i == rx_queue->rx_ring_size))
254175354148SClaudiu Manoil 			i = 0;
2542ec21e2ecSJeff Kirsher 
254375354148SClaudiu Manoil 		rx_queue->next_to_clean = i;
254475354148SClaudiu Manoil 
254575354148SClaudiu Manoil 		/* fetch next buffer if not the last in frame */
254675354148SClaudiu Manoil 		if (!(lstatus & BD_LFLAG(RXBD_LAST)))
254775354148SClaudiu Manoil 			continue;
254875354148SClaudiu Manoil 
254975354148SClaudiu Manoil 		if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
2550f23223f1SClaudiu Manoil 			count_errors(lstatus, ndev);
2551ec21e2ecSJeff Kirsher 
255276f31e8bSClaudiu Manoil 			/* discard faulty buffer */
2553acb600deSEric Dumazet 			dev_kfree_skb(skb);
255475354148SClaudiu Manoil 			skb = NULL;
255575354148SClaudiu Manoil 			rx_queue->stats.rx_dropped++;
255675354148SClaudiu Manoil 			continue;
255775354148SClaudiu Manoil 		}
255876f31e8bSClaudiu Manoil 
2559590399ddSClaudiu Manoil 		gfar_process_frame(ndev, skb);
2560590399ddSClaudiu Manoil 
2561ec21e2ecSJeff Kirsher 		/* Increment the number of packets */
256275354148SClaudiu Manoil 		total_pkts++;
256375354148SClaudiu Manoil 		total_bytes += skb->len;
2564ec21e2ecSJeff Kirsher 
2565ec21e2ecSJeff Kirsher 		skb_record_rx_queue(skb, rx_queue->qindex);
256675354148SClaudiu Manoil 
2567590399ddSClaudiu Manoil 		skb->protocol = eth_type_trans(skb, ndev);
2568f23223f1SClaudiu Manoil 
2569f23223f1SClaudiu Manoil 		/* Send the packet up the stack */
2570f23223f1SClaudiu Manoil 		napi_gro_receive(&rx_queue->grp->napi_rx, skb);
2571ec21e2ecSJeff Kirsher 
257275354148SClaudiu Manoil 		skb = NULL;
2573ec21e2ecSJeff Kirsher 	}
2574ec21e2ecSJeff Kirsher 
257575354148SClaudiu Manoil 	/* Store incomplete frames for completion */
257675354148SClaudiu Manoil 	rx_queue->skb = skb;
2577ec21e2ecSJeff Kirsher 
257875354148SClaudiu Manoil 	rx_queue->stats.rx_packets += total_pkts;
257975354148SClaudiu Manoil 	rx_queue->stats.rx_bytes += total_bytes;
258076f31e8bSClaudiu Manoil 
258176f31e8bSClaudiu Manoil 	if (cleaned_cnt)
258276f31e8bSClaudiu Manoil 		gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
258376f31e8bSClaudiu Manoil 
258476f31e8bSClaudiu Manoil 	/* Update Last Free RxBD pointer for LFC */
258576f31e8bSClaudiu Manoil 	if (unlikely(priv->tx_actual_en)) {
2586b4b67f26SScott Wood 		u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
2587b4b67f26SScott Wood 
2588b4b67f26SScott Wood 		gfar_write(rx_queue->rfbptr, bdp_dma);
258976f31e8bSClaudiu Manoil 	}
2590ec21e2ecSJeff Kirsher 
2591ec21e2ecSJeff Kirsher 	return howmany;
2592ec21e2ecSJeff Kirsher }
2593ec21e2ecSJeff Kirsher 
2594aeb12c5eSClaudiu Manoil static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
25955eaedf31SClaudiu Manoil {
25965eaedf31SClaudiu Manoil 	struct gfar_priv_grp *gfargrp =
2597aeb12c5eSClaudiu Manoil 		container_of(napi, struct gfar_priv_grp, napi_rx);
25985eaedf31SClaudiu Manoil 	struct gfar __iomem *regs = gfargrp->regs;
259971ff9e3dSClaudiu Manoil 	struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
26005eaedf31SClaudiu Manoil 	int work_done = 0;
26015eaedf31SClaudiu Manoil 
26025eaedf31SClaudiu Manoil 	/* Clear IEVENT, so interrupts aren't called again
26035eaedf31SClaudiu Manoil 	 * because of the packets that have already arrived
26045eaedf31SClaudiu Manoil 	 */
2605aeb12c5eSClaudiu Manoil 	gfar_write(&regs->ievent, IEVENT_RX_MASK);
26065eaedf31SClaudiu Manoil 
26075eaedf31SClaudiu Manoil 	work_done = gfar_clean_rx_ring(rx_queue, budget);
26085eaedf31SClaudiu Manoil 
26095eaedf31SClaudiu Manoil 	if (work_done < budget) {
2610aeb12c5eSClaudiu Manoil 		u32 imask;
26116ad20165SEric Dumazet 		napi_complete_done(napi, work_done);
26125eaedf31SClaudiu Manoil 		/* Clear the halt bit in RSTAT */
26135eaedf31SClaudiu Manoil 		gfar_write(&regs->rstat, gfargrp->rstat);
26145eaedf31SClaudiu Manoil 
2615aeb12c5eSClaudiu Manoil 		spin_lock_irq(&gfargrp->grplock);
2616aeb12c5eSClaudiu Manoil 		imask = gfar_read(&regs->imask);
2617aeb12c5eSClaudiu Manoil 		imask |= IMASK_RX_DEFAULT;
2618aeb12c5eSClaudiu Manoil 		gfar_write(&regs->imask, imask);
2619aeb12c5eSClaudiu Manoil 		spin_unlock_irq(&gfargrp->grplock);
26205eaedf31SClaudiu Manoil 	}
26215eaedf31SClaudiu Manoil 
26225eaedf31SClaudiu Manoil 	return work_done;
26235eaedf31SClaudiu Manoil }
26245eaedf31SClaudiu Manoil 
2625aeb12c5eSClaudiu Manoil static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2626ec21e2ecSJeff Kirsher {
2627bc4598bcSJan Ceuleers 	struct gfar_priv_grp *gfargrp =
2628aeb12c5eSClaudiu Manoil 		container_of(napi, struct gfar_priv_grp, napi_tx);
2629aeb12c5eSClaudiu Manoil 	struct gfar __iomem *regs = gfargrp->regs;
263071ff9e3dSClaudiu Manoil 	struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2631aeb12c5eSClaudiu Manoil 	u32 imask;
2632aeb12c5eSClaudiu Manoil 
2633aeb12c5eSClaudiu Manoil 	/* Clear IEVENT, so interrupts aren't called again
2634aeb12c5eSClaudiu Manoil 	 * because of the packets that have already arrived
2635aeb12c5eSClaudiu Manoil 	 */
2636aeb12c5eSClaudiu Manoil 	gfar_write(&regs->ievent, IEVENT_TX_MASK);
2637aeb12c5eSClaudiu Manoil 
2638aeb12c5eSClaudiu Manoil 	/* run Tx cleanup to completion */
2639aeb12c5eSClaudiu Manoil 	if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2640aeb12c5eSClaudiu Manoil 		gfar_clean_tx_ring(tx_queue);
2641aeb12c5eSClaudiu Manoil 
2642aeb12c5eSClaudiu Manoil 	napi_complete(napi);
2643aeb12c5eSClaudiu Manoil 
2644aeb12c5eSClaudiu Manoil 	spin_lock_irq(&gfargrp->grplock);
2645aeb12c5eSClaudiu Manoil 	imask = gfar_read(&regs->imask);
2646aeb12c5eSClaudiu Manoil 	imask |= IMASK_TX_DEFAULT;
2647aeb12c5eSClaudiu Manoil 	gfar_write(&regs->imask, imask);
2648aeb12c5eSClaudiu Manoil 	spin_unlock_irq(&gfargrp->grplock);
2649aeb12c5eSClaudiu Manoil 
2650aeb12c5eSClaudiu Manoil 	return 0;
2651aeb12c5eSClaudiu Manoil }
2652aeb12c5eSClaudiu Manoil 
26537d993c5fSArseny Solokha /* GFAR error interrupt handler */
26547d993c5fSArseny Solokha static irqreturn_t gfar_error(int irq, void *grp_id)
26557d993c5fSArseny Solokha {
26567d993c5fSArseny Solokha 	struct gfar_priv_grp *gfargrp = grp_id;
26577d993c5fSArseny Solokha 	struct gfar __iomem *regs = gfargrp->regs;
26587d993c5fSArseny Solokha 	struct gfar_private *priv= gfargrp->priv;
26597d993c5fSArseny Solokha 	struct net_device *dev = priv->ndev;
26607d993c5fSArseny Solokha 
26617d993c5fSArseny Solokha 	/* Save ievent for future reference */
26627d993c5fSArseny Solokha 	u32 events = gfar_read(&regs->ievent);
26637d993c5fSArseny Solokha 
26647d993c5fSArseny Solokha 	/* Clear IEVENT */
26657d993c5fSArseny Solokha 	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
26667d993c5fSArseny Solokha 
26677d993c5fSArseny Solokha 	/* Magic Packet is not an error. */
26687d993c5fSArseny Solokha 	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
26697d993c5fSArseny Solokha 	    (events & IEVENT_MAG))
26707d993c5fSArseny Solokha 		events &= ~IEVENT_MAG;
26717d993c5fSArseny Solokha 
26727d993c5fSArseny Solokha 	/* Hmm... */
26737d993c5fSArseny Solokha 	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
26747d993c5fSArseny Solokha 		netdev_dbg(dev,
26757d993c5fSArseny Solokha 			   "error interrupt (ievent=0x%08x imask=0x%08x)\n",
26767d993c5fSArseny Solokha 			   events, gfar_read(&regs->imask));
26777d993c5fSArseny Solokha 
26787d993c5fSArseny Solokha 	/* Update the error counters */
26797d993c5fSArseny Solokha 	if (events & IEVENT_TXE) {
26807d993c5fSArseny Solokha 		dev->stats.tx_errors++;
26817d993c5fSArseny Solokha 
26827d993c5fSArseny Solokha 		if (events & IEVENT_LC)
26837d993c5fSArseny Solokha 			dev->stats.tx_window_errors++;
26847d993c5fSArseny Solokha 		if (events & IEVENT_CRL)
26857d993c5fSArseny Solokha 			dev->stats.tx_aborted_errors++;
26867d993c5fSArseny Solokha 		if (events & IEVENT_XFUN) {
26877d993c5fSArseny Solokha 			netif_dbg(priv, tx_err, dev,
26887d993c5fSArseny Solokha 				  "TX FIFO underrun, packet dropped\n");
26897d993c5fSArseny Solokha 			dev->stats.tx_dropped++;
26907d993c5fSArseny Solokha 			atomic64_inc(&priv->extra_stats.tx_underrun);
26917d993c5fSArseny Solokha 
26927d993c5fSArseny Solokha 			schedule_work(&priv->reset_task);
26937d993c5fSArseny Solokha 		}
26947d993c5fSArseny Solokha 		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
26957d993c5fSArseny Solokha 	}
26967d993c5fSArseny Solokha 	if (events & IEVENT_BSY) {
26977d993c5fSArseny Solokha 		dev->stats.rx_over_errors++;
26987d993c5fSArseny Solokha 		atomic64_inc(&priv->extra_stats.rx_bsy);
26997d993c5fSArseny Solokha 
27007d993c5fSArseny Solokha 		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
27017d993c5fSArseny Solokha 			  gfar_read(&regs->rstat));
27027d993c5fSArseny Solokha 	}
27037d993c5fSArseny Solokha 	if (events & IEVENT_BABR) {
27047d993c5fSArseny Solokha 		dev->stats.rx_errors++;
27057d993c5fSArseny Solokha 		atomic64_inc(&priv->extra_stats.rx_babr);
27067d993c5fSArseny Solokha 
27077d993c5fSArseny Solokha 		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
27087d993c5fSArseny Solokha 	}
27097d993c5fSArseny Solokha 	if (events & IEVENT_EBERR) {
27107d993c5fSArseny Solokha 		atomic64_inc(&priv->extra_stats.eberr);
27117d993c5fSArseny Solokha 		netif_dbg(priv, rx_err, dev, "bus error\n");
27127d993c5fSArseny Solokha 	}
27137d993c5fSArseny Solokha 	if (events & IEVENT_RXC)
27147d993c5fSArseny Solokha 		netif_dbg(priv, rx_status, dev, "control frame\n");
27157d993c5fSArseny Solokha 
27167d993c5fSArseny Solokha 	if (events & IEVENT_BABT) {
27177d993c5fSArseny Solokha 		atomic64_inc(&priv->extra_stats.tx_babt);
27187d993c5fSArseny Solokha 		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
27197d993c5fSArseny Solokha 	}
27207d993c5fSArseny Solokha 	return IRQ_HANDLED;
27217d993c5fSArseny Solokha }
27227d993c5fSArseny Solokha 
27237d993c5fSArseny Solokha /* The interrupt handler for devices with one interrupt */
27247d993c5fSArseny Solokha static irqreturn_t gfar_interrupt(int irq, void *grp_id)
27257d993c5fSArseny Solokha {
27267d993c5fSArseny Solokha 	struct gfar_priv_grp *gfargrp = grp_id;
27277d993c5fSArseny Solokha 
27287d993c5fSArseny Solokha 	/* Save ievent for future reference */
27297d993c5fSArseny Solokha 	u32 events = gfar_read(&gfargrp->regs->ievent);
27307d993c5fSArseny Solokha 
27317d993c5fSArseny Solokha 	/* Check for reception */
27327d993c5fSArseny Solokha 	if (events & IEVENT_RX_MASK)
27337d993c5fSArseny Solokha 		gfar_receive(irq, grp_id);
27347d993c5fSArseny Solokha 
27357d993c5fSArseny Solokha 	/* Check for transmit completion */
27367d993c5fSArseny Solokha 	if (events & IEVENT_TX_MASK)
27377d993c5fSArseny Solokha 		gfar_transmit(irq, grp_id);
27387d993c5fSArseny Solokha 
27397d993c5fSArseny Solokha 	/* Check for errors */
27407d993c5fSArseny Solokha 	if (events & IEVENT_ERR_MASK)
27417d993c5fSArseny Solokha 		gfar_error(irq, grp_id);
27427d993c5fSArseny Solokha 
27437d993c5fSArseny Solokha 	return IRQ_HANDLED;
27447d993c5fSArseny Solokha }
2745aeb12c5eSClaudiu Manoil 
2746ec21e2ecSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
27470977f817SJan Ceuleers /* Polling 'interrupt' - used by things like netconsole to send skbs
2748ec21e2ecSJeff Kirsher  * without having to re-enable interrupts. It's not called while
2749ec21e2ecSJeff Kirsher  * the interrupt routine is executing.
2750ec21e2ecSJeff Kirsher  */
2751ec21e2ecSJeff Kirsher static void gfar_netpoll(struct net_device *dev)
2752ec21e2ecSJeff Kirsher {
2753ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
27543a2e16c8SJan Ceuleers 	int i;
2755ec21e2ecSJeff Kirsher 
2756ec21e2ecSJeff Kirsher 	/* If the device has multiple interrupts, run tx/rx */
2757ec21e2ecSJeff Kirsher 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2758ec21e2ecSJeff Kirsher 		for (i = 0; i < priv->num_grps; i++) {
275962ed839dSPaul Gortmaker 			struct gfar_priv_grp *grp = &priv->gfargrp[i];
276062ed839dSPaul Gortmaker 
276162ed839dSPaul Gortmaker 			disable_irq(gfar_irq(grp, TX)->irq);
276262ed839dSPaul Gortmaker 			disable_irq(gfar_irq(grp, RX)->irq);
276362ed839dSPaul Gortmaker 			disable_irq(gfar_irq(grp, ER)->irq);
276462ed839dSPaul Gortmaker 			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
276562ed839dSPaul Gortmaker 			enable_irq(gfar_irq(grp, ER)->irq);
276662ed839dSPaul Gortmaker 			enable_irq(gfar_irq(grp, RX)->irq);
276762ed839dSPaul Gortmaker 			enable_irq(gfar_irq(grp, TX)->irq);
2768ec21e2ecSJeff Kirsher 		}
2769ec21e2ecSJeff Kirsher 	} else {
2770ec21e2ecSJeff Kirsher 		for (i = 0; i < priv->num_grps; i++) {
277162ed839dSPaul Gortmaker 			struct gfar_priv_grp *grp = &priv->gfargrp[i];
277262ed839dSPaul Gortmaker 
277362ed839dSPaul Gortmaker 			disable_irq(gfar_irq(grp, TX)->irq);
277462ed839dSPaul Gortmaker 			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
277562ed839dSPaul Gortmaker 			enable_irq(gfar_irq(grp, TX)->irq);
2776ec21e2ecSJeff Kirsher 		}
2777ec21e2ecSJeff Kirsher 	}
2778ec21e2ecSJeff Kirsher }
2779ec21e2ecSJeff Kirsher #endif
2780ec21e2ecSJeff Kirsher 
27817d993c5fSArseny Solokha static void free_grp_irqs(struct gfar_priv_grp *grp)
2782ec21e2ecSJeff Kirsher {
27837d993c5fSArseny Solokha 	free_irq(gfar_irq(grp, TX)->irq, grp);
27847d993c5fSArseny Solokha 	free_irq(gfar_irq(grp, RX)->irq, grp);
27857d993c5fSArseny Solokha 	free_irq(gfar_irq(grp, ER)->irq, grp);
2786ec21e2ecSJeff Kirsher }
2787ec21e2ecSJeff Kirsher 
27887d993c5fSArseny Solokha static int register_grp_irqs(struct gfar_priv_grp *grp)
27897d993c5fSArseny Solokha {
27907d993c5fSArseny Solokha 	struct gfar_private *priv = grp->priv;
27917d993c5fSArseny Solokha 	struct net_device *dev = priv->ndev;
27927d993c5fSArseny Solokha 	int err;
27937d993c5fSArseny Solokha 
27947d993c5fSArseny Solokha 	/* If the device has multiple interrupts, register for
27957d993c5fSArseny Solokha 	 * them.  Otherwise, only register for the one
2796ec21e2ecSJeff Kirsher 	 */
27977d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
27987d993c5fSArseny Solokha 		/* Install our interrupt handlers for Error,
27997d993c5fSArseny Solokha 		 * Transmit, and Receive
28007d993c5fSArseny Solokha 		 */
28017d993c5fSArseny Solokha 		err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
28027d993c5fSArseny Solokha 				  gfar_irq(grp, ER)->name, grp);
28037d993c5fSArseny Solokha 		if (err < 0) {
28047d993c5fSArseny Solokha 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
28057d993c5fSArseny Solokha 				  gfar_irq(grp, ER)->irq);
28067d993c5fSArseny Solokha 
28077d993c5fSArseny Solokha 			goto err_irq_fail;
28087d993c5fSArseny Solokha 		}
28097d993c5fSArseny Solokha 		enable_irq_wake(gfar_irq(grp, ER)->irq);
28107d993c5fSArseny Solokha 
28117d993c5fSArseny Solokha 		err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
28127d993c5fSArseny Solokha 				  gfar_irq(grp, TX)->name, grp);
28137d993c5fSArseny Solokha 		if (err < 0) {
28147d993c5fSArseny Solokha 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
28157d993c5fSArseny Solokha 				  gfar_irq(grp, TX)->irq);
28167d993c5fSArseny Solokha 			goto tx_irq_fail;
28177d993c5fSArseny Solokha 		}
28187d993c5fSArseny Solokha 		err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
28197d993c5fSArseny Solokha 				  gfar_irq(grp, RX)->name, grp);
28207d993c5fSArseny Solokha 		if (err < 0) {
28217d993c5fSArseny Solokha 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
28227d993c5fSArseny Solokha 				  gfar_irq(grp, RX)->irq);
28237d993c5fSArseny Solokha 			goto rx_irq_fail;
28247d993c5fSArseny Solokha 		}
28257d993c5fSArseny Solokha 		enable_irq_wake(gfar_irq(grp, RX)->irq);
28267d993c5fSArseny Solokha 
28277d993c5fSArseny Solokha 	} else {
28287d993c5fSArseny Solokha 		err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
28297d993c5fSArseny Solokha 				  gfar_irq(grp, TX)->name, grp);
28307d993c5fSArseny Solokha 		if (err < 0) {
28317d993c5fSArseny Solokha 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
28327d993c5fSArseny Solokha 				  gfar_irq(grp, TX)->irq);
28337d993c5fSArseny Solokha 			goto err_irq_fail;
28347d993c5fSArseny Solokha 		}
28357d993c5fSArseny Solokha 		enable_irq_wake(gfar_irq(grp, TX)->irq);
28367d993c5fSArseny Solokha 	}
28377d993c5fSArseny Solokha 
28387d993c5fSArseny Solokha 	return 0;
28397d993c5fSArseny Solokha 
28407d993c5fSArseny Solokha rx_irq_fail:
28417d993c5fSArseny Solokha 	free_irq(gfar_irq(grp, TX)->irq, grp);
28427d993c5fSArseny Solokha tx_irq_fail:
28437d993c5fSArseny Solokha 	free_irq(gfar_irq(grp, ER)->irq, grp);
28447d993c5fSArseny Solokha err_irq_fail:
28457d993c5fSArseny Solokha 	return err;
28467d993c5fSArseny Solokha 
28477d993c5fSArseny Solokha }
28487d993c5fSArseny Solokha 
28497d993c5fSArseny Solokha static void gfar_free_irq(struct gfar_private *priv)
28507d993c5fSArseny Solokha {
28517d993c5fSArseny Solokha 	int i;
28527d993c5fSArseny Solokha 
28537d993c5fSArseny Solokha 	/* Free the IRQs */
28547d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
28557d993c5fSArseny Solokha 		for (i = 0; i < priv->num_grps; i++)
28567d993c5fSArseny Solokha 			free_grp_irqs(&priv->gfargrp[i]);
28577d993c5fSArseny Solokha 	} else {
28587d993c5fSArseny Solokha 		for (i = 0; i < priv->num_grps; i++)
28597d993c5fSArseny Solokha 			free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
28607d993c5fSArseny Solokha 				 &priv->gfargrp[i]);
28617d993c5fSArseny Solokha 	}
28627d993c5fSArseny Solokha }
28637d993c5fSArseny Solokha 
28647d993c5fSArseny Solokha static int gfar_request_irq(struct gfar_private *priv)
28657d993c5fSArseny Solokha {
28667d993c5fSArseny Solokha 	int err, i, j;
28677d993c5fSArseny Solokha 
28687d993c5fSArseny Solokha 	for (i = 0; i < priv->num_grps; i++) {
28697d993c5fSArseny Solokha 		err = register_grp_irqs(&priv->gfargrp[i]);
28707d993c5fSArseny Solokha 		if (err) {
28717d993c5fSArseny Solokha 			for (j = 0; j < i; j++)
28727d993c5fSArseny Solokha 				free_grp_irqs(&priv->gfargrp[j]);
28737d993c5fSArseny Solokha 			return err;
28747d993c5fSArseny Solokha 		}
28757d993c5fSArseny Solokha 	}
28767d993c5fSArseny Solokha 
28777d993c5fSArseny Solokha 	return 0;
28787d993c5fSArseny Solokha }
28797d993c5fSArseny Solokha 
28807d993c5fSArseny Solokha /* Called when something needs to use the ethernet device
28817d993c5fSArseny Solokha  * Returns 0 for success.
28827d993c5fSArseny Solokha  */
28837d993c5fSArseny Solokha static int gfar_enet_open(struct net_device *dev)
2884ec21e2ecSJeff Kirsher {
2885ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
28867d993c5fSArseny Solokha 	int err;
2887ec21e2ecSJeff Kirsher 
28887d993c5fSArseny Solokha 	err = init_phy(dev);
28897d993c5fSArseny Solokha 	if (err)
28907d993c5fSArseny Solokha 		return err;
28917d993c5fSArseny Solokha 
28927d993c5fSArseny Solokha 	err = gfar_request_irq(priv);
28937d993c5fSArseny Solokha 	if (err)
28947d993c5fSArseny Solokha 		return err;
28957d993c5fSArseny Solokha 
28967d993c5fSArseny Solokha 	err = startup_gfar(dev);
28977d993c5fSArseny Solokha 	if (err)
28987d993c5fSArseny Solokha 		return err;
28997d993c5fSArseny Solokha 
29007d993c5fSArseny Solokha 	return err;
29017d993c5fSArseny Solokha }
29027d993c5fSArseny Solokha 
29037d993c5fSArseny Solokha /* Stops the kernel queue, and halts the controller */
29047d993c5fSArseny Solokha static int gfar_close(struct net_device *dev)
29057d993c5fSArseny Solokha {
29067d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
29077d993c5fSArseny Solokha 
29087d993c5fSArseny Solokha 	cancel_work_sync(&priv->reset_task);
29097d993c5fSArseny Solokha 	stop_gfar(dev);
29107d993c5fSArseny Solokha 
29117d993c5fSArseny Solokha 	/* Disconnect from the PHY */
29127d993c5fSArseny Solokha 	phy_disconnect(dev->phydev);
29137d993c5fSArseny Solokha 
29147d993c5fSArseny Solokha 	gfar_free_irq(priv);
29157d993c5fSArseny Solokha 
29167d993c5fSArseny Solokha 	return 0;
29177d993c5fSArseny Solokha }
29187d993c5fSArseny Solokha 
29197d993c5fSArseny Solokha /* Clears each of the exact match registers to zero, so they
29207d993c5fSArseny Solokha  * don't interfere with normal reception
29217d993c5fSArseny Solokha  */
29227d993c5fSArseny Solokha static void gfar_clear_exact_match(struct net_device *dev)
29237d993c5fSArseny Solokha {
29247d993c5fSArseny Solokha 	int idx;
29257d993c5fSArseny Solokha 	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
29267d993c5fSArseny Solokha 
29277d993c5fSArseny Solokha 	for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
29287d993c5fSArseny Solokha 		gfar_set_mac_for_addr(dev, idx, zero_arr);
2929ec21e2ecSJeff Kirsher }
2930ec21e2ecSJeff Kirsher 
2931ec21e2ecSJeff Kirsher /* Update the hash table based on the current list of multicast
2932ec21e2ecSJeff Kirsher  * addresses we subscribe to.  Also, change the promiscuity of
2933ec21e2ecSJeff Kirsher  * the device based on the flags (this function is called
29340977f817SJan Ceuleers  * whenever dev->flags is changed
29350977f817SJan Ceuleers  */
2936ec21e2ecSJeff Kirsher static void gfar_set_multi(struct net_device *dev)
2937ec21e2ecSJeff Kirsher {
2938ec21e2ecSJeff Kirsher 	struct netdev_hw_addr *ha;
2939ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
2940ec21e2ecSJeff Kirsher 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
2941ec21e2ecSJeff Kirsher 	u32 tempval;
2942ec21e2ecSJeff Kirsher 
2943ec21e2ecSJeff Kirsher 	if (dev->flags & IFF_PROMISC) {
2944ec21e2ecSJeff Kirsher 		/* Set RCTRL to PROM */
2945ec21e2ecSJeff Kirsher 		tempval = gfar_read(&regs->rctrl);
2946ec21e2ecSJeff Kirsher 		tempval |= RCTRL_PROM;
2947ec21e2ecSJeff Kirsher 		gfar_write(&regs->rctrl, tempval);
2948ec21e2ecSJeff Kirsher 	} else {
2949ec21e2ecSJeff Kirsher 		/* Set RCTRL to not PROM */
2950ec21e2ecSJeff Kirsher 		tempval = gfar_read(&regs->rctrl);
2951ec21e2ecSJeff Kirsher 		tempval &= ~(RCTRL_PROM);
2952ec21e2ecSJeff Kirsher 		gfar_write(&regs->rctrl, tempval);
2953ec21e2ecSJeff Kirsher 	}
2954ec21e2ecSJeff Kirsher 
2955ec21e2ecSJeff Kirsher 	if (dev->flags & IFF_ALLMULTI) {
2956ec21e2ecSJeff Kirsher 		/* Set the hash to rx all multicast frames */
2957ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr0, 0xffffffff);
2958ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr1, 0xffffffff);
2959ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr2, 0xffffffff);
2960ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr3, 0xffffffff);
2961ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr4, 0xffffffff);
2962ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr5, 0xffffffff);
2963ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr6, 0xffffffff);
2964ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr7, 0xffffffff);
2965ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr0, 0xffffffff);
2966ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr1, 0xffffffff);
2967ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr2, 0xffffffff);
2968ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr3, 0xffffffff);
2969ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr4, 0xffffffff);
2970ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr5, 0xffffffff);
2971ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr6, 0xffffffff);
2972ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr7, 0xffffffff);
2973ec21e2ecSJeff Kirsher 	} else {
2974ec21e2ecSJeff Kirsher 		int em_num;
2975ec21e2ecSJeff Kirsher 		int idx;
2976ec21e2ecSJeff Kirsher 
2977ec21e2ecSJeff Kirsher 		/* zero out the hash */
2978ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr0, 0x0);
2979ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr1, 0x0);
2980ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr2, 0x0);
2981ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr3, 0x0);
2982ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr4, 0x0);
2983ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr5, 0x0);
2984ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr6, 0x0);
2985ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr7, 0x0);
2986ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr0, 0x0);
2987ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr1, 0x0);
2988ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr2, 0x0);
2989ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr3, 0x0);
2990ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr4, 0x0);
2991ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr5, 0x0);
2992ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr6, 0x0);
2993ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr7, 0x0);
2994ec21e2ecSJeff Kirsher 
2995ec21e2ecSJeff Kirsher 		/* If we have extended hash tables, we need to
2996ec21e2ecSJeff Kirsher 		 * clear the exact match registers to prepare for
29970977f817SJan Ceuleers 		 * setting them
29980977f817SJan Ceuleers 		 */
2999ec21e2ecSJeff Kirsher 		if (priv->extended_hash) {
3000ec21e2ecSJeff Kirsher 			em_num = GFAR_EM_NUM + 1;
3001ec21e2ecSJeff Kirsher 			gfar_clear_exact_match(dev);
3002ec21e2ecSJeff Kirsher 			idx = 1;
3003ec21e2ecSJeff Kirsher 		} else {
3004ec21e2ecSJeff Kirsher 			idx = 0;
3005ec21e2ecSJeff Kirsher 			em_num = 0;
3006ec21e2ecSJeff Kirsher 		}
3007ec21e2ecSJeff Kirsher 
3008ec21e2ecSJeff Kirsher 		if (netdev_mc_empty(dev))
3009ec21e2ecSJeff Kirsher 			return;
3010ec21e2ecSJeff Kirsher 
3011ec21e2ecSJeff Kirsher 		/* Parse the list, and set the appropriate bits */
3012ec21e2ecSJeff Kirsher 		netdev_for_each_mc_addr(ha, dev) {
3013ec21e2ecSJeff Kirsher 			if (idx < em_num) {
3014ec21e2ecSJeff Kirsher 				gfar_set_mac_for_addr(dev, idx, ha->addr);
3015ec21e2ecSJeff Kirsher 				idx++;
3016ec21e2ecSJeff Kirsher 			} else
3017ec21e2ecSJeff Kirsher 				gfar_set_hash_for_addr(dev, ha->addr);
3018ec21e2ecSJeff Kirsher 		}
3019ec21e2ecSJeff Kirsher 	}
3020ec21e2ecSJeff Kirsher }
3021ec21e2ecSJeff Kirsher 
30227d993c5fSArseny Solokha void gfar_mac_reset(struct gfar_private *priv)
30236ce29b0eSClaudiu Manoil {
30246ce29b0eSClaudiu Manoil 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
30257d993c5fSArseny Solokha 	u32 tempval;
30266ce29b0eSClaudiu Manoil 
30277d993c5fSArseny Solokha 	/* Reset MAC layer */
30287d993c5fSArseny Solokha 	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
30296ce29b0eSClaudiu Manoil 
30307d993c5fSArseny Solokha 	/* We need to delay at least 3 TX clocks */
30317d993c5fSArseny Solokha 	udelay(3);
30326ce29b0eSClaudiu Manoil 
30337d993c5fSArseny Solokha 	/* the soft reset bit is not self-resetting, so we need to
30347d993c5fSArseny Solokha 	 * clear it before resuming normal operation
30356ce29b0eSClaudiu Manoil 	 */
30367d993c5fSArseny Solokha 	gfar_write(&regs->maccfg1, 0);
30376ce29b0eSClaudiu Manoil 
30387d993c5fSArseny Solokha 	udelay(3);
30396ce29b0eSClaudiu Manoil 
30407d993c5fSArseny Solokha 	gfar_rx_offload_en(priv);
30416ce29b0eSClaudiu Manoil 
30427d993c5fSArseny Solokha 	/* Initialize the max receive frame/buffer lengths */
30437d993c5fSArseny Solokha 	gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
30447d993c5fSArseny Solokha 	gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
3045b4b67f26SScott Wood 
30467d993c5fSArseny Solokha 	/* Initialize the Minimum Frame Length Register */
30477d993c5fSArseny Solokha 	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
304845b679c9SMatei Pavaluca 
30497d993c5fSArseny Solokha 	/* Initialize MACCFG2. */
30507d993c5fSArseny Solokha 	tempval = MACCFG2_INIT_SETTINGS;
305145b679c9SMatei Pavaluca 
30527d993c5fSArseny Solokha 	/* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
30537d993c5fSArseny Solokha 	 * are marked as truncated.  Avoid this by MACCFG2[Huge Frame]=1,
30547d993c5fSArseny Solokha 	 * and by checking RxBD[LG] and discarding larger than MAXFRM.
30557d993c5fSArseny Solokha 	 */
30567d993c5fSArseny Solokha 	if (gfar_has_errata(priv, GFAR_ERRATA_74))
30577d993c5fSArseny Solokha 		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
305845b679c9SMatei Pavaluca 
30596ce29b0eSClaudiu Manoil 	gfar_write(&regs->maccfg2, tempval);
30606ce29b0eSClaudiu Manoil 
30617d993c5fSArseny Solokha 	/* Clear mac addr hash registers */
30627d993c5fSArseny Solokha 	gfar_write(&regs->igaddr0, 0);
30637d993c5fSArseny Solokha 	gfar_write(&regs->igaddr1, 0);
30647d993c5fSArseny Solokha 	gfar_write(&regs->igaddr2, 0);
30657d993c5fSArseny Solokha 	gfar_write(&regs->igaddr3, 0);
30667d993c5fSArseny Solokha 	gfar_write(&regs->igaddr4, 0);
30677d993c5fSArseny Solokha 	gfar_write(&regs->igaddr5, 0);
30687d993c5fSArseny Solokha 	gfar_write(&regs->igaddr6, 0);
30697d993c5fSArseny Solokha 	gfar_write(&regs->igaddr7, 0);
30706ce29b0eSClaudiu Manoil 
30717d993c5fSArseny Solokha 	gfar_write(&regs->gaddr0, 0);
30727d993c5fSArseny Solokha 	gfar_write(&regs->gaddr1, 0);
30737d993c5fSArseny Solokha 	gfar_write(&regs->gaddr2, 0);
30747d993c5fSArseny Solokha 	gfar_write(&regs->gaddr3, 0);
30757d993c5fSArseny Solokha 	gfar_write(&regs->gaddr4, 0);
30767d993c5fSArseny Solokha 	gfar_write(&regs->gaddr5, 0);
30777d993c5fSArseny Solokha 	gfar_write(&regs->gaddr6, 0);
30787d993c5fSArseny Solokha 	gfar_write(&regs->gaddr7, 0);
30797d993c5fSArseny Solokha 
30807d993c5fSArseny Solokha 	if (priv->extended_hash)
30817d993c5fSArseny Solokha 		gfar_clear_exact_match(priv->ndev);
30827d993c5fSArseny Solokha 
30837d993c5fSArseny Solokha 	gfar_mac_rx_config(priv);
30847d993c5fSArseny Solokha 
30857d993c5fSArseny Solokha 	gfar_mac_tx_config(priv);
30867d993c5fSArseny Solokha 
30877d993c5fSArseny Solokha 	gfar_set_mac_address(priv->ndev);
30887d993c5fSArseny Solokha 
30897d993c5fSArseny Solokha 	gfar_set_multi(priv->ndev);
30907d993c5fSArseny Solokha 
30917d993c5fSArseny Solokha 	/* clear ievent and imask before configuring coalescing */
30927d993c5fSArseny Solokha 	gfar_ints_disable(priv);
30937d993c5fSArseny Solokha 
30947d993c5fSArseny Solokha 	/* Configure the coalescing support */
30957d993c5fSArseny Solokha 	gfar_configure_coalescing_all(priv);
30967d993c5fSArseny Solokha }
30977d993c5fSArseny Solokha 
30987d993c5fSArseny Solokha static void gfar_hw_init(struct gfar_private *priv)
30997d993c5fSArseny Solokha {
31007d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
31017d993c5fSArseny Solokha 	u32 attrs;
31027d993c5fSArseny Solokha 
31037d993c5fSArseny Solokha 	/* Stop the DMA engine now, in case it was running before
31047d993c5fSArseny Solokha 	 * (The firmware could have used it, and left it running).
31057d993c5fSArseny Solokha 	 */
31067d993c5fSArseny Solokha 	gfar_halt(priv);
31077d993c5fSArseny Solokha 
31087d993c5fSArseny Solokha 	gfar_mac_reset(priv);
31097d993c5fSArseny Solokha 
31107d993c5fSArseny Solokha 	/* Zero out the rmon mib registers if it has them */
31117d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
31127d993c5fSArseny Solokha 		memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
31137d993c5fSArseny Solokha 
31147d993c5fSArseny Solokha 		/* Mask off the CAM interrupts */
31157d993c5fSArseny Solokha 		gfar_write(&regs->rmon.cam1, 0xffffffff);
31167d993c5fSArseny Solokha 		gfar_write(&regs->rmon.cam2, 0xffffffff);
31177d993c5fSArseny Solokha 	}
31187d993c5fSArseny Solokha 
31197d993c5fSArseny Solokha 	/* Initialize ECNTRL */
31207d993c5fSArseny Solokha 	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
31217d993c5fSArseny Solokha 
31227d993c5fSArseny Solokha 	/* Set the extraction length and index */
31237d993c5fSArseny Solokha 	attrs = ATTRELI_EL(priv->rx_stash_size) |
31247d993c5fSArseny Solokha 		ATTRELI_EI(priv->rx_stash_index);
31257d993c5fSArseny Solokha 
31267d993c5fSArseny Solokha 	gfar_write(&regs->attreli, attrs);
31277d993c5fSArseny Solokha 
31287d993c5fSArseny Solokha 	/* Start with defaults, and add stashing
31297d993c5fSArseny Solokha 	 * depending on driver parameters
31307d993c5fSArseny Solokha 	 */
31317d993c5fSArseny Solokha 	attrs = ATTR_INIT_SETTINGS;
31327d993c5fSArseny Solokha 
31337d993c5fSArseny Solokha 	if (priv->bd_stash_en)
31347d993c5fSArseny Solokha 		attrs |= ATTR_BDSTASH;
31357d993c5fSArseny Solokha 
31367d993c5fSArseny Solokha 	if (priv->rx_stash_size != 0)
31377d993c5fSArseny Solokha 		attrs |= ATTR_BUFSTASH;
31387d993c5fSArseny Solokha 
31397d993c5fSArseny Solokha 	gfar_write(&regs->attr, attrs);
31407d993c5fSArseny Solokha 
31417d993c5fSArseny Solokha 	/* FIFO configs */
31427d993c5fSArseny Solokha 	gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
31437d993c5fSArseny Solokha 	gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
31447d993c5fSArseny Solokha 	gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
31457d993c5fSArseny Solokha 
31467d993c5fSArseny Solokha 	/* Program the interrupt steering regs, only for MG devices */
31477d993c5fSArseny Solokha 	if (priv->num_grps > 1)
31487d993c5fSArseny Solokha 		gfar_write_isrg(priv);
31497d993c5fSArseny Solokha }
31507d993c5fSArseny Solokha 
31517d993c5fSArseny Solokha static const struct net_device_ops gfar_netdev_ops = {
31527d993c5fSArseny Solokha 	.ndo_open = gfar_enet_open,
31537d993c5fSArseny Solokha 	.ndo_start_xmit = gfar_start_xmit,
31547d993c5fSArseny Solokha 	.ndo_stop = gfar_close,
31557d993c5fSArseny Solokha 	.ndo_change_mtu = gfar_change_mtu,
31567d993c5fSArseny Solokha 	.ndo_set_features = gfar_set_features,
31577d993c5fSArseny Solokha 	.ndo_set_rx_mode = gfar_set_multi,
31587d993c5fSArseny Solokha 	.ndo_tx_timeout = gfar_timeout,
31597d993c5fSArseny Solokha 	.ndo_do_ioctl = gfar_ioctl,
31607d993c5fSArseny Solokha 	.ndo_get_stats = gfar_get_stats,
31617d993c5fSArseny Solokha 	.ndo_change_carrier = fixed_phy_change_carrier,
31627d993c5fSArseny Solokha 	.ndo_set_mac_address = gfar_set_mac_addr,
31637d993c5fSArseny Solokha 	.ndo_validate_addr = eth_validate_addr,
31647d993c5fSArseny Solokha #ifdef CONFIG_NET_POLL_CONTROLLER
31657d993c5fSArseny Solokha 	.ndo_poll_controller = gfar_netpoll,
31667d993c5fSArseny Solokha #endif
31677d993c5fSArseny Solokha };
31687d993c5fSArseny Solokha 
31697d993c5fSArseny Solokha /* Set up the ethernet device structure, private data,
31707d993c5fSArseny Solokha  * and anything else we need before we start
31717d993c5fSArseny Solokha  */
31727d993c5fSArseny Solokha static int gfar_probe(struct platform_device *ofdev)
31737d993c5fSArseny Solokha {
31747d993c5fSArseny Solokha 	struct device_node *np = ofdev->dev.of_node;
31757d993c5fSArseny Solokha 	struct net_device *dev = NULL;
31767d993c5fSArseny Solokha 	struct gfar_private *priv = NULL;
31777d993c5fSArseny Solokha 	int err = 0, i;
31787d993c5fSArseny Solokha 
31797d993c5fSArseny Solokha 	err = gfar_of_init(ofdev, &dev);
31807d993c5fSArseny Solokha 
31817d993c5fSArseny Solokha 	if (err)
31827d993c5fSArseny Solokha 		return err;
31837d993c5fSArseny Solokha 
31847d993c5fSArseny Solokha 	priv = netdev_priv(dev);
31857d993c5fSArseny Solokha 	priv->ndev = dev;
31867d993c5fSArseny Solokha 	priv->ofdev = ofdev;
31877d993c5fSArseny Solokha 	priv->dev = &ofdev->dev;
31887d993c5fSArseny Solokha 	SET_NETDEV_DEV(dev, &ofdev->dev);
31897d993c5fSArseny Solokha 
31907d993c5fSArseny Solokha 	INIT_WORK(&priv->reset_task, gfar_reset_task);
31917d993c5fSArseny Solokha 
31927d993c5fSArseny Solokha 	platform_set_drvdata(ofdev, priv);
31937d993c5fSArseny Solokha 
31947d993c5fSArseny Solokha 	gfar_detect_errata(priv);
31957d993c5fSArseny Solokha 
31967d993c5fSArseny Solokha 	/* Set the dev->base_addr to the gfar reg region */
31977d993c5fSArseny Solokha 	dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
31987d993c5fSArseny Solokha 
31997d993c5fSArseny Solokha 	/* Fill in the dev structure */
32007d993c5fSArseny Solokha 	dev->watchdog_timeo = TX_TIMEOUT;
32017d993c5fSArseny Solokha 	/* MTU range: 50 - 9586 */
32027d993c5fSArseny Solokha 	dev->mtu = 1500;
32037d993c5fSArseny Solokha 	dev->min_mtu = 50;
32047d993c5fSArseny Solokha 	dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
32057d993c5fSArseny Solokha 	dev->netdev_ops = &gfar_netdev_ops;
32067d993c5fSArseny Solokha 	dev->ethtool_ops = &gfar_ethtool_ops;
32077d993c5fSArseny Solokha 
32087d993c5fSArseny Solokha 	/* Register for napi ...We are registering NAPI for each grp */
32097d993c5fSArseny Solokha 	for (i = 0; i < priv->num_grps; i++) {
32107d993c5fSArseny Solokha 		netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
32117d993c5fSArseny Solokha 			       gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
32127d993c5fSArseny Solokha 		netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
32137d993c5fSArseny Solokha 				  gfar_poll_tx_sq, 2);
32147d993c5fSArseny Solokha 	}
32157d993c5fSArseny Solokha 
32167d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
32177d993c5fSArseny Solokha 		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
32187d993c5fSArseny Solokha 				   NETIF_F_RXCSUM;
32197d993c5fSArseny Solokha 		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
32207d993c5fSArseny Solokha 				 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
32217d993c5fSArseny Solokha 	}
32227d993c5fSArseny Solokha 
32237d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
32247d993c5fSArseny Solokha 		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
32257d993c5fSArseny Solokha 				    NETIF_F_HW_VLAN_CTAG_RX;
32267d993c5fSArseny Solokha 		dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
32277d993c5fSArseny Solokha 	}
32287d993c5fSArseny Solokha 
32297d993c5fSArseny Solokha 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
32307d993c5fSArseny Solokha 
32317d993c5fSArseny Solokha 	gfar_init_addr_hash_table(priv);
32327d993c5fSArseny Solokha 
32337d993c5fSArseny Solokha 	/* Insert receive time stamps into padding alignment bytes, and
32347d993c5fSArseny Solokha 	 * plus 2 bytes padding to ensure the cpu alignment.
32357d993c5fSArseny Solokha 	 */
32367d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
32377d993c5fSArseny Solokha 		priv->padding = 8 + DEFAULT_PADDING;
32387d993c5fSArseny Solokha 
32397d993c5fSArseny Solokha 	if (dev->features & NETIF_F_IP_CSUM ||
32407d993c5fSArseny Solokha 	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3241d6a076d6SClaudiu Manoil 		dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
32427d993c5fSArseny Solokha 
32437d993c5fSArseny Solokha 	/* Initializing some of the rx/tx queue level parameters */
32447d993c5fSArseny Solokha 	for (i = 0; i < priv->num_tx_queues; i++) {
32457d993c5fSArseny Solokha 		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
32467d993c5fSArseny Solokha 		priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
32477d993c5fSArseny Solokha 		priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
32487d993c5fSArseny Solokha 		priv->tx_queue[i]->txic = DEFAULT_TXIC;
32497d993c5fSArseny Solokha 	}
32507d993c5fSArseny Solokha 
32517d993c5fSArseny Solokha 	for (i = 0; i < priv->num_rx_queues; i++) {
32527d993c5fSArseny Solokha 		priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
32537d993c5fSArseny Solokha 		priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
32547d993c5fSArseny Solokha 		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
32557d993c5fSArseny Solokha 	}
32567d993c5fSArseny Solokha 
32577d993c5fSArseny Solokha 	/* Always enable rx filer if available */
32587d993c5fSArseny Solokha 	priv->rx_filer_enable =
32597d993c5fSArseny Solokha 	    (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
32607d993c5fSArseny Solokha 	/* Enable most messages by default */
32617d993c5fSArseny Solokha 	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
32627d993c5fSArseny Solokha 	/* use pritority h/w tx queue scheduling for single queue devices */
32637d993c5fSArseny Solokha 	if (priv->num_tx_queues == 1)
32647d993c5fSArseny Solokha 		priv->prio_sched_en = 1;
32657d993c5fSArseny Solokha 
32667d993c5fSArseny Solokha 	set_bit(GFAR_DOWN, &priv->state);
32677d993c5fSArseny Solokha 
32687d993c5fSArseny Solokha 	gfar_hw_init(priv);
32697d993c5fSArseny Solokha 
32707d993c5fSArseny Solokha 	/* Carrier starts down, phylib will bring it up */
32717d993c5fSArseny Solokha 	netif_carrier_off(dev);
32727d993c5fSArseny Solokha 
32737d993c5fSArseny Solokha 	err = register_netdev(dev);
32747d993c5fSArseny Solokha 
32757d993c5fSArseny Solokha 	if (err) {
32767d993c5fSArseny Solokha 		pr_err("%s: Cannot register net device, aborting\n", dev->name);
32777d993c5fSArseny Solokha 		goto register_fail;
32787d993c5fSArseny Solokha 	}
32797d993c5fSArseny Solokha 
32807d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
32817d993c5fSArseny Solokha 		priv->wol_supported |= GFAR_WOL_MAGIC;
32827d993c5fSArseny Solokha 
32837d993c5fSArseny Solokha 	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
32847d993c5fSArseny Solokha 	    priv->rx_filer_enable)
32857d993c5fSArseny Solokha 		priv->wol_supported |= GFAR_WOL_FILER_UCAST;
32867d993c5fSArseny Solokha 
32877d993c5fSArseny Solokha 	device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
32887d993c5fSArseny Solokha 
32897d993c5fSArseny Solokha 	/* fill out IRQ number and name fields */
32907d993c5fSArseny Solokha 	for (i = 0; i < priv->num_grps; i++) {
32917d993c5fSArseny Solokha 		struct gfar_priv_grp *grp = &priv->gfargrp[i];
32927d993c5fSArseny Solokha 		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
32937d993c5fSArseny Solokha 			sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
32947d993c5fSArseny Solokha 				dev->name, "_g", '0' + i, "_tx");
32957d993c5fSArseny Solokha 			sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
32967d993c5fSArseny Solokha 				dev->name, "_g", '0' + i, "_rx");
32977d993c5fSArseny Solokha 			sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
32987d993c5fSArseny Solokha 				dev->name, "_g", '0' + i, "_er");
32997d993c5fSArseny Solokha 		} else
33007d993c5fSArseny Solokha 			strcpy(gfar_irq(grp, TX)->name, dev->name);
33017d993c5fSArseny Solokha 	}
33027d993c5fSArseny Solokha 
33037d993c5fSArseny Solokha 	/* Initialize the filer table */
33047d993c5fSArseny Solokha 	gfar_init_filer_table(priv);
33057d993c5fSArseny Solokha 
33067d993c5fSArseny Solokha 	/* Print out the device info */
33077d993c5fSArseny Solokha 	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
33087d993c5fSArseny Solokha 
33097d993c5fSArseny Solokha 	/* Even more device info helps when determining which kernel
33107d993c5fSArseny Solokha 	 * provided which set of benchmarks.
33117d993c5fSArseny Solokha 	 */
33127d993c5fSArseny Solokha 	netdev_info(dev, "Running with NAPI enabled\n");
33137d993c5fSArseny Solokha 	for (i = 0; i < priv->num_rx_queues; i++)
33147d993c5fSArseny Solokha 		netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
33157d993c5fSArseny Solokha 			    i, priv->rx_queue[i]->rx_ring_size);
33167d993c5fSArseny Solokha 	for (i = 0; i < priv->num_tx_queues; i++)
33177d993c5fSArseny Solokha 		netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
33187d993c5fSArseny Solokha 			    i, priv->tx_queue[i]->tx_ring_size);
33197d993c5fSArseny Solokha 
33207d993c5fSArseny Solokha 	return 0;
33217d993c5fSArseny Solokha 
33227d993c5fSArseny Solokha register_fail:
33237d993c5fSArseny Solokha 	if (of_phy_is_fixed_link(np))
33247d993c5fSArseny Solokha 		of_phy_deregister_fixed_link(np);
33257d993c5fSArseny Solokha 	unmap_group_regs(priv);
33267d993c5fSArseny Solokha 	gfar_free_rx_queues(priv);
33277d993c5fSArseny Solokha 	gfar_free_tx_queues(priv);
33287d993c5fSArseny Solokha 	of_node_put(priv->phy_node);
33297d993c5fSArseny Solokha 	of_node_put(priv->tbi_node);
33307d993c5fSArseny Solokha 	free_gfar_dev(priv);
33317d993c5fSArseny Solokha 	return err;
33327d993c5fSArseny Solokha }
33337d993c5fSArseny Solokha 
33347d993c5fSArseny Solokha static int gfar_remove(struct platform_device *ofdev)
33357d993c5fSArseny Solokha {
33367d993c5fSArseny Solokha 	struct gfar_private *priv = platform_get_drvdata(ofdev);
33377d993c5fSArseny Solokha 	struct device_node *np = ofdev->dev.of_node;
33387d993c5fSArseny Solokha 
33397d993c5fSArseny Solokha 	of_node_put(priv->phy_node);
33407d993c5fSArseny Solokha 	of_node_put(priv->tbi_node);
33417d993c5fSArseny Solokha 
33427d993c5fSArseny Solokha 	unregister_netdev(priv->ndev);
33437d993c5fSArseny Solokha 
33447d993c5fSArseny Solokha 	if (of_phy_is_fixed_link(np))
33457d993c5fSArseny Solokha 		of_phy_deregister_fixed_link(np);
33467d993c5fSArseny Solokha 
33477d993c5fSArseny Solokha 	unmap_group_regs(priv);
33487d993c5fSArseny Solokha 	gfar_free_rx_queues(priv);
33497d993c5fSArseny Solokha 	gfar_free_tx_queues(priv);
33507d993c5fSArseny Solokha 	free_gfar_dev(priv);
33517d993c5fSArseny Solokha 
33527d993c5fSArseny Solokha 	return 0;
33537d993c5fSArseny Solokha }
33547d993c5fSArseny Solokha 
33557d993c5fSArseny Solokha #ifdef CONFIG_PM
33567d993c5fSArseny Solokha 
33577d993c5fSArseny Solokha static void __gfar_filer_disable(struct gfar_private *priv)
33587d993c5fSArseny Solokha {
33597d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
33607d993c5fSArseny Solokha 	u32 temp;
33617d993c5fSArseny Solokha 
33627d993c5fSArseny Solokha 	temp = gfar_read(&regs->rctrl);
33637d993c5fSArseny Solokha 	temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
33647d993c5fSArseny Solokha 	gfar_write(&regs->rctrl, temp);
33657d993c5fSArseny Solokha }
33667d993c5fSArseny Solokha 
33677d993c5fSArseny Solokha static void __gfar_filer_enable(struct gfar_private *priv)
33687d993c5fSArseny Solokha {
33697d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
33707d993c5fSArseny Solokha 	u32 temp;
33717d993c5fSArseny Solokha 
33727d993c5fSArseny Solokha 	temp = gfar_read(&regs->rctrl);
33737d993c5fSArseny Solokha 	temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
33747d993c5fSArseny Solokha 	gfar_write(&regs->rctrl, temp);
33757d993c5fSArseny Solokha }
33767d993c5fSArseny Solokha 
33777d993c5fSArseny Solokha /* Filer rules implementing wol capabilities */
33787d993c5fSArseny Solokha static void gfar_filer_config_wol(struct gfar_private *priv)
33797d993c5fSArseny Solokha {
33807d993c5fSArseny Solokha 	unsigned int i;
33817d993c5fSArseny Solokha 	u32 rqfcr;
33827d993c5fSArseny Solokha 
33837d993c5fSArseny Solokha 	__gfar_filer_disable(priv);
33847d993c5fSArseny Solokha 
33857d993c5fSArseny Solokha 	/* clear the filer table, reject any packet by default */
33867d993c5fSArseny Solokha 	rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
33877d993c5fSArseny Solokha 	for (i = 0; i <= MAX_FILER_IDX; i++)
33887d993c5fSArseny Solokha 		gfar_write_filer(priv, i, rqfcr, 0);
33897d993c5fSArseny Solokha 
33907d993c5fSArseny Solokha 	i = 0;
33917d993c5fSArseny Solokha 	if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
33927d993c5fSArseny Solokha 		/* unicast packet, accept it */
33937d993c5fSArseny Solokha 		struct net_device *ndev = priv->ndev;
33947d993c5fSArseny Solokha 		/* get the default rx queue index */
33957d993c5fSArseny Solokha 		u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
33967d993c5fSArseny Solokha 		u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
33977d993c5fSArseny Solokha 				    (ndev->dev_addr[1] << 8) |
33987d993c5fSArseny Solokha 				     ndev->dev_addr[2];
33997d993c5fSArseny Solokha 
34007d993c5fSArseny Solokha 		rqfcr = (qindex << 10) | RQFCR_AND |
34017d993c5fSArseny Solokha 			RQFCR_CMP_EXACT | RQFCR_PID_DAH;
34027d993c5fSArseny Solokha 
34037d993c5fSArseny Solokha 		gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
34047d993c5fSArseny Solokha 
34057d993c5fSArseny Solokha 		dest_mac_addr = (ndev->dev_addr[3] << 16) |
34067d993c5fSArseny Solokha 				(ndev->dev_addr[4] << 8) |
34077d993c5fSArseny Solokha 				 ndev->dev_addr[5];
34087d993c5fSArseny Solokha 		rqfcr = (qindex << 10) | RQFCR_GPI |
34097d993c5fSArseny Solokha 			RQFCR_CMP_EXACT | RQFCR_PID_DAL;
34107d993c5fSArseny Solokha 		gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
34117d993c5fSArseny Solokha 	}
34127d993c5fSArseny Solokha 
34137d993c5fSArseny Solokha 	__gfar_filer_enable(priv);
34147d993c5fSArseny Solokha }
34157d993c5fSArseny Solokha 
34167d993c5fSArseny Solokha static void gfar_filer_restore_table(struct gfar_private *priv)
34177d993c5fSArseny Solokha {
34187d993c5fSArseny Solokha 	u32 rqfcr, rqfpr;
34197d993c5fSArseny Solokha 	unsigned int i;
34207d993c5fSArseny Solokha 
34217d993c5fSArseny Solokha 	__gfar_filer_disable(priv);
34227d993c5fSArseny Solokha 
34237d993c5fSArseny Solokha 	for (i = 0; i <= MAX_FILER_IDX; i++) {
34247d993c5fSArseny Solokha 		rqfcr = priv->ftp_rqfcr[i];
34257d993c5fSArseny Solokha 		rqfpr = priv->ftp_rqfpr[i];
34267d993c5fSArseny Solokha 		gfar_write_filer(priv, i, rqfcr, rqfpr);
34277d993c5fSArseny Solokha 	}
34287d993c5fSArseny Solokha 
34297d993c5fSArseny Solokha 	__gfar_filer_enable(priv);
34307d993c5fSArseny Solokha }
34317d993c5fSArseny Solokha 
34327d993c5fSArseny Solokha /* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
34337d993c5fSArseny Solokha static void gfar_start_wol_filer(struct gfar_private *priv)
34347d993c5fSArseny Solokha {
34357d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
34367d993c5fSArseny Solokha 	u32 tempval;
34377d993c5fSArseny Solokha 	int i = 0;
34387d993c5fSArseny Solokha 
34397d993c5fSArseny Solokha 	/* Enable Rx hw queues */
34407d993c5fSArseny Solokha 	gfar_write(&regs->rqueue, priv->rqueue);
34417d993c5fSArseny Solokha 
34427d993c5fSArseny Solokha 	/* Initialize DMACTRL to have WWR and WOP */
34437d993c5fSArseny Solokha 	tempval = gfar_read(&regs->dmactrl);
34447d993c5fSArseny Solokha 	tempval |= DMACTRL_INIT_SETTINGS;
34457d993c5fSArseny Solokha 	gfar_write(&regs->dmactrl, tempval);
34467d993c5fSArseny Solokha 
34477d993c5fSArseny Solokha 	/* Make sure we aren't stopped */
34487d993c5fSArseny Solokha 	tempval = gfar_read(&regs->dmactrl);
34497d993c5fSArseny Solokha 	tempval &= ~DMACTRL_GRS;
34507d993c5fSArseny Solokha 	gfar_write(&regs->dmactrl, tempval);
34517d993c5fSArseny Solokha 
34527d993c5fSArseny Solokha 	for (i = 0; i < priv->num_grps; i++) {
34537d993c5fSArseny Solokha 		regs = priv->gfargrp[i].regs;
34547d993c5fSArseny Solokha 		/* Clear RHLT, so that the DMA starts polling now */
34557d993c5fSArseny Solokha 		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
34567d993c5fSArseny Solokha 		/* enable the Filer General Purpose Interrupt */
34577d993c5fSArseny Solokha 		gfar_write(&regs->imask, IMASK_FGPI);
34587d993c5fSArseny Solokha 	}
34597d993c5fSArseny Solokha 
34607d993c5fSArseny Solokha 	/* Enable Rx DMA */
34617d993c5fSArseny Solokha 	tempval = gfar_read(&regs->maccfg1);
34627d993c5fSArseny Solokha 	tempval |= MACCFG1_RX_EN;
34637d993c5fSArseny Solokha 	gfar_write(&regs->maccfg1, tempval);
34647d993c5fSArseny Solokha }
34657d993c5fSArseny Solokha 
34667d993c5fSArseny Solokha static int gfar_suspend(struct device *dev)
34677d993c5fSArseny Solokha {
34687d993c5fSArseny Solokha 	struct gfar_private *priv = dev_get_drvdata(dev);
34697d993c5fSArseny Solokha 	struct net_device *ndev = priv->ndev;
34707d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
34717d993c5fSArseny Solokha 	u32 tempval;
34727d993c5fSArseny Solokha 	u16 wol = priv->wol_opts;
34737d993c5fSArseny Solokha 
34747d993c5fSArseny Solokha 	if (!netif_running(ndev))
34757d993c5fSArseny Solokha 		return 0;
34767d993c5fSArseny Solokha 
34777d993c5fSArseny Solokha 	disable_napi(priv);
34787d993c5fSArseny Solokha 	netif_tx_lock(ndev);
34797d993c5fSArseny Solokha 	netif_device_detach(ndev);
34807d993c5fSArseny Solokha 	netif_tx_unlock(ndev);
34817d993c5fSArseny Solokha 
34827d993c5fSArseny Solokha 	gfar_halt(priv);
34837d993c5fSArseny Solokha 
34847d993c5fSArseny Solokha 	if (wol & GFAR_WOL_MAGIC) {
34857d993c5fSArseny Solokha 		/* Enable interrupt on Magic Packet */
34867d993c5fSArseny Solokha 		gfar_write(&regs->imask, IMASK_MAG);
34877d993c5fSArseny Solokha 
34887d993c5fSArseny Solokha 		/* Enable Magic Packet mode */
34897d993c5fSArseny Solokha 		tempval = gfar_read(&regs->maccfg2);
34907d993c5fSArseny Solokha 		tempval |= MACCFG2_MPEN;
34917d993c5fSArseny Solokha 		gfar_write(&regs->maccfg2, tempval);
34927d993c5fSArseny Solokha 
34937d993c5fSArseny Solokha 		/* re-enable the Rx block */
34947d993c5fSArseny Solokha 		tempval = gfar_read(&regs->maccfg1);
34957d993c5fSArseny Solokha 		tempval |= MACCFG1_RX_EN;
34967d993c5fSArseny Solokha 		gfar_write(&regs->maccfg1, tempval);
34977d993c5fSArseny Solokha 
34987d993c5fSArseny Solokha 	} else if (wol & GFAR_WOL_FILER_UCAST) {
34997d993c5fSArseny Solokha 		gfar_filer_config_wol(priv);
35007d993c5fSArseny Solokha 		gfar_start_wol_filer(priv);
35017d993c5fSArseny Solokha 
35027d993c5fSArseny Solokha 	} else {
35037d993c5fSArseny Solokha 		phy_stop(ndev->phydev);
35047d993c5fSArseny Solokha 	}
35057d993c5fSArseny Solokha 
35067d993c5fSArseny Solokha 	return 0;
35077d993c5fSArseny Solokha }
35087d993c5fSArseny Solokha 
35097d993c5fSArseny Solokha static int gfar_resume(struct device *dev)
35107d993c5fSArseny Solokha {
35117d993c5fSArseny Solokha 	struct gfar_private *priv = dev_get_drvdata(dev);
35127d993c5fSArseny Solokha 	struct net_device *ndev = priv->ndev;
35137d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
35147d993c5fSArseny Solokha 	u32 tempval;
35157d993c5fSArseny Solokha 	u16 wol = priv->wol_opts;
35167d993c5fSArseny Solokha 
35177d993c5fSArseny Solokha 	if (!netif_running(ndev))
35187d993c5fSArseny Solokha 		return 0;
35197d993c5fSArseny Solokha 
35207d993c5fSArseny Solokha 	if (wol & GFAR_WOL_MAGIC) {
35217d993c5fSArseny Solokha 		/* Disable Magic Packet mode */
35227d993c5fSArseny Solokha 		tempval = gfar_read(&regs->maccfg2);
35237d993c5fSArseny Solokha 		tempval &= ~MACCFG2_MPEN;
35247d993c5fSArseny Solokha 		gfar_write(&regs->maccfg2, tempval);
35257d993c5fSArseny Solokha 
35267d993c5fSArseny Solokha 	} else if (wol & GFAR_WOL_FILER_UCAST) {
35277d993c5fSArseny Solokha 		/* need to stop rx only, tx is already down */
35287d993c5fSArseny Solokha 		gfar_halt(priv);
35297d993c5fSArseny Solokha 		gfar_filer_restore_table(priv);
35307d993c5fSArseny Solokha 
35317d993c5fSArseny Solokha 	} else {
35327d993c5fSArseny Solokha 		phy_start(ndev->phydev);
35337d993c5fSArseny Solokha 	}
35347d993c5fSArseny Solokha 
35357d993c5fSArseny Solokha 	gfar_start(priv);
35367d993c5fSArseny Solokha 
35377d993c5fSArseny Solokha 	netif_device_attach(ndev);
35387d993c5fSArseny Solokha 	enable_napi(priv);
35397d993c5fSArseny Solokha 
35407d993c5fSArseny Solokha 	return 0;
35417d993c5fSArseny Solokha }
35427d993c5fSArseny Solokha 
35437d993c5fSArseny Solokha static int gfar_restore(struct device *dev)
35447d993c5fSArseny Solokha {
35457d993c5fSArseny Solokha 	struct gfar_private *priv = dev_get_drvdata(dev);
35467d993c5fSArseny Solokha 	struct net_device *ndev = priv->ndev;
35477d993c5fSArseny Solokha 
35487d993c5fSArseny Solokha 	if (!netif_running(ndev)) {
35497d993c5fSArseny Solokha 		netif_device_attach(ndev);
35507d993c5fSArseny Solokha 
35517d993c5fSArseny Solokha 		return 0;
35527d993c5fSArseny Solokha 	}
35537d993c5fSArseny Solokha 
35547d993c5fSArseny Solokha 	gfar_init_bds(ndev);
35557d993c5fSArseny Solokha 
35567d993c5fSArseny Solokha 	gfar_mac_reset(priv);
35577d993c5fSArseny Solokha 
35587d993c5fSArseny Solokha 	gfar_init_tx_rx_base(priv);
35597d993c5fSArseny Solokha 
35607d993c5fSArseny Solokha 	gfar_start(priv);
35617d993c5fSArseny Solokha 
35626ce29b0eSClaudiu Manoil 	priv->oldlink = 0;
35636ce29b0eSClaudiu Manoil 	priv->oldspeed = 0;
35646ce29b0eSClaudiu Manoil 	priv->oldduplex = -1;
35657d993c5fSArseny Solokha 
35667d993c5fSArseny Solokha 	if (ndev->phydev)
35677d993c5fSArseny Solokha 		phy_start(ndev->phydev);
35687d993c5fSArseny Solokha 
35697d993c5fSArseny Solokha 	netif_device_attach(ndev);
35707d993c5fSArseny Solokha 	enable_napi(priv);
35717d993c5fSArseny Solokha 
35727d993c5fSArseny Solokha 	return 0;
35736ce29b0eSClaudiu Manoil }
35746ce29b0eSClaudiu Manoil 
35757d993c5fSArseny Solokha static const struct dev_pm_ops gfar_pm_ops = {
35767d993c5fSArseny Solokha 	.suspend = gfar_suspend,
35777d993c5fSArseny Solokha 	.resume = gfar_resume,
35787d993c5fSArseny Solokha 	.freeze = gfar_suspend,
35797d993c5fSArseny Solokha 	.thaw = gfar_resume,
35807d993c5fSArseny Solokha 	.restore = gfar_restore,
35817d993c5fSArseny Solokha };
35827d993c5fSArseny Solokha 
35837d993c5fSArseny Solokha #define GFAR_PM_OPS (&gfar_pm_ops)
35847d993c5fSArseny Solokha 
35857d993c5fSArseny Solokha #else
35867d993c5fSArseny Solokha 
35877d993c5fSArseny Solokha #define GFAR_PM_OPS NULL
35887d993c5fSArseny Solokha 
35897d993c5fSArseny Solokha #endif
35906ce29b0eSClaudiu Manoil 
359194e5a2a8SFabian Frederick static const struct of_device_id gfar_match[] =
3592ec21e2ecSJeff Kirsher {
3593ec21e2ecSJeff Kirsher 	{
3594ec21e2ecSJeff Kirsher 		.type = "network",
3595ec21e2ecSJeff Kirsher 		.compatible = "gianfar",
3596ec21e2ecSJeff Kirsher 	},
3597ec21e2ecSJeff Kirsher 	{
3598ec21e2ecSJeff Kirsher 		.compatible = "fsl,etsec2",
3599ec21e2ecSJeff Kirsher 	},
3600ec21e2ecSJeff Kirsher 	{},
3601ec21e2ecSJeff Kirsher };
3602ec21e2ecSJeff Kirsher MODULE_DEVICE_TABLE(of, gfar_match);
3603ec21e2ecSJeff Kirsher 
3604ec21e2ecSJeff Kirsher /* Structure for a device driver */
3605ec21e2ecSJeff Kirsher static struct platform_driver gfar_driver = {
3606ec21e2ecSJeff Kirsher 	.driver = {
3607ec21e2ecSJeff Kirsher 		.name = "fsl-gianfar",
3608ec21e2ecSJeff Kirsher 		.pm = GFAR_PM_OPS,
3609ec21e2ecSJeff Kirsher 		.of_match_table = gfar_match,
3610ec21e2ecSJeff Kirsher 	},
3611ec21e2ecSJeff Kirsher 	.probe = gfar_probe,
3612ec21e2ecSJeff Kirsher 	.remove = gfar_remove,
3613ec21e2ecSJeff Kirsher };
3614ec21e2ecSJeff Kirsher 
3615db62f684SAxel Lin module_platform_driver(gfar_driver);
3616