xref: /openbmc/linux/drivers/net/ethernet/freescale/gianfar.c (revision d8861bab48b6c1fc3cdbcab8ff9d1eaea43afe7f)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
20977f817SJan Ceuleers /* drivers/net/ethernet/freescale/gianfar.c
3ec21e2ecSJeff Kirsher  *
4ec21e2ecSJeff Kirsher  * Gianfar Ethernet Driver
5ec21e2ecSJeff Kirsher  * This driver is designed for the non-CPM ethernet controllers
6ec21e2ecSJeff Kirsher  * on the 85xx and 83xx family of integrated processors
7ec21e2ecSJeff Kirsher  * Based on 8260_io/fcc_enet.c
8ec21e2ecSJeff Kirsher  *
9ec21e2ecSJeff Kirsher  * Author: Andy Fleming
10ec21e2ecSJeff Kirsher  * Maintainer: Kumar Gala
11ec21e2ecSJeff Kirsher  * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12ec21e2ecSJeff Kirsher  *
1320862788SClaudiu Manoil  * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
14ec21e2ecSJeff Kirsher  * Copyright 2007 MontaVista Software, Inc.
15ec21e2ecSJeff Kirsher  *
16ec21e2ecSJeff Kirsher  *  Gianfar:  AKA Lambda Draconis, "Dragon"
17ec21e2ecSJeff Kirsher  *  RA 11 31 24.2
18ec21e2ecSJeff Kirsher  *  Dec +69 19 52
19ec21e2ecSJeff Kirsher  *  V 3.84
20ec21e2ecSJeff Kirsher  *  B-V +1.62
21ec21e2ecSJeff Kirsher  *
22ec21e2ecSJeff Kirsher  *  Theory of operation
23ec21e2ecSJeff Kirsher  *
24ec21e2ecSJeff Kirsher  *  The driver is initialized through of_device. Configuration information
25ec21e2ecSJeff Kirsher  *  is therefore conveyed through an OF-style device tree.
26ec21e2ecSJeff Kirsher  *
27ec21e2ecSJeff Kirsher  *  The Gianfar Ethernet Controller uses a ring of buffer
28ec21e2ecSJeff Kirsher  *  descriptors.  The beginning is indicated by a register
29ec21e2ecSJeff Kirsher  *  pointing to the physical address of the start of the ring.
30ec21e2ecSJeff Kirsher  *  The end is determined by a "wrap" bit being set in the
31ec21e2ecSJeff Kirsher  *  last descriptor of the ring.
32ec21e2ecSJeff Kirsher  *
33ec21e2ecSJeff Kirsher  *  When a packet is received, the RXF bit in the
34ec21e2ecSJeff Kirsher  *  IEVENT register is set, triggering an interrupt when the
35ec21e2ecSJeff Kirsher  *  corresponding bit in the IMASK register is also set (if
36ec21e2ecSJeff Kirsher  *  interrupt coalescing is active, then the interrupt may not
37ec21e2ecSJeff Kirsher  *  happen immediately, but will wait until either a set number
38ec21e2ecSJeff Kirsher  *  of frames or amount of time have passed).  In NAPI, the
39ec21e2ecSJeff Kirsher  *  interrupt handler will signal there is work to be done, and
40ec21e2ecSJeff Kirsher  *  exit. This method will start at the last known empty
41ec21e2ecSJeff Kirsher  *  descriptor, and process every subsequent descriptor until there
42ec21e2ecSJeff Kirsher  *  are none left with data (NAPI will stop after a set number of
43ec21e2ecSJeff Kirsher  *  packets to give time to other tasks, but will eventually
44ec21e2ecSJeff Kirsher  *  process all the packets).  The data arrives inside a
45ec21e2ecSJeff Kirsher  *  pre-allocated skb, and so after the skb is passed up to the
46ec21e2ecSJeff Kirsher  *  stack, a new skb must be allocated, and the address field in
47ec21e2ecSJeff Kirsher  *  the buffer descriptor must be updated to indicate this new
48ec21e2ecSJeff Kirsher  *  skb.
49ec21e2ecSJeff Kirsher  *
50ec21e2ecSJeff Kirsher  *  When the kernel requests that a packet be transmitted, the
51ec21e2ecSJeff Kirsher  *  driver starts where it left off last time, and points the
52ec21e2ecSJeff Kirsher  *  descriptor at the buffer which was passed in.  The driver
53ec21e2ecSJeff Kirsher  *  then informs the DMA engine that there are packets ready to
54ec21e2ecSJeff Kirsher  *  be transmitted.  Once the controller is finished transmitting
55ec21e2ecSJeff Kirsher  *  the packet, an interrupt may be triggered (under the same
56ec21e2ecSJeff Kirsher  *  conditions as for reception, but depending on the TXF bit).
57ec21e2ecSJeff Kirsher  *  The driver then cleans up the buffer.
58ec21e2ecSJeff Kirsher  */
59ec21e2ecSJeff Kirsher 
60ec21e2ecSJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
61ec21e2ecSJeff Kirsher 
62ec21e2ecSJeff Kirsher #include <linux/kernel.h>
63ec21e2ecSJeff Kirsher #include <linux/string.h>
64ec21e2ecSJeff Kirsher #include <linux/errno.h>
65ec21e2ecSJeff Kirsher #include <linux/unistd.h>
66ec21e2ecSJeff Kirsher #include <linux/slab.h>
67ec21e2ecSJeff Kirsher #include <linux/interrupt.h>
68ec21e2ecSJeff Kirsher #include <linux/delay.h>
69ec21e2ecSJeff Kirsher #include <linux/netdevice.h>
70ec21e2ecSJeff Kirsher #include <linux/etherdevice.h>
71ec21e2ecSJeff Kirsher #include <linux/skbuff.h>
72ec21e2ecSJeff Kirsher #include <linux/if_vlan.h>
73ec21e2ecSJeff Kirsher #include <linux/spinlock.h>
74ec21e2ecSJeff Kirsher #include <linux/mm.h>
755af50730SRob Herring #include <linux/of_address.h>
765af50730SRob Herring #include <linux/of_irq.h>
77ec21e2ecSJeff Kirsher #include <linux/of_mdio.h>
78ec21e2ecSJeff Kirsher #include <linux/of_platform.h>
79ec21e2ecSJeff Kirsher #include <linux/ip.h>
80ec21e2ecSJeff Kirsher #include <linux/tcp.h>
81ec21e2ecSJeff Kirsher #include <linux/udp.h>
82ec21e2ecSJeff Kirsher #include <linux/in.h>
83ec21e2ecSJeff Kirsher #include <linux/net_tstamp.h>
84ec21e2ecSJeff Kirsher 
85ec21e2ecSJeff Kirsher #include <asm/io.h>
86d6ef0bccSClaudiu Manoil #ifdef CONFIG_PPC
87ec21e2ecSJeff Kirsher #include <asm/reg.h>
882969b1f7SClaudiu Manoil #include <asm/mpc85xx.h>
89d6ef0bccSClaudiu Manoil #endif
90ec21e2ecSJeff Kirsher #include <asm/irq.h>
917c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
92ec21e2ecSJeff Kirsher #include <linux/module.h>
93ec21e2ecSJeff Kirsher #include <linux/dma-mapping.h>
94ec21e2ecSJeff Kirsher #include <linux/crc32.h>
95ec21e2ecSJeff Kirsher #include <linux/mii.h>
96ec21e2ecSJeff Kirsher #include <linux/phy.h>
97ec21e2ecSJeff Kirsher #include <linux/phy_fixed.h>
98ec21e2ecSJeff Kirsher #include <linux/of.h>
99ec21e2ecSJeff Kirsher #include <linux/of_net.h>
100ec21e2ecSJeff Kirsher 
101ec21e2ecSJeff Kirsher #include "gianfar.h"
102ec21e2ecSJeff Kirsher 
1038fcc6033SAbhimanyu #define TX_TIMEOUT      (5*HZ)
104ec21e2ecSJeff Kirsher 
105ec21e2ecSJeff Kirsher MODULE_AUTHOR("Freescale Semiconductor, Inc");
106ec21e2ecSJeff Kirsher MODULE_DESCRIPTION("Gianfar Ethernet Driver");
107ec21e2ecSJeff Kirsher MODULE_LICENSE("GPL");
108ec21e2ecSJeff Kirsher 
109ec21e2ecSJeff Kirsher static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
110ec21e2ecSJeff Kirsher 			    dma_addr_t buf)
111ec21e2ecSJeff Kirsher {
112ec21e2ecSJeff Kirsher 	u32 lstatus;
113ec21e2ecSJeff Kirsher 
114a7312d58SClaudiu Manoil 	bdp->bufPtr = cpu_to_be32(buf);
115ec21e2ecSJeff Kirsher 
116ec21e2ecSJeff Kirsher 	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
117ec21e2ecSJeff Kirsher 	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
118ec21e2ecSJeff Kirsher 		lstatus |= BD_LFLAG(RXBD_WRAP);
119ec21e2ecSJeff Kirsher 
120d55398baSClaudiu Manoil 	gfar_wmb();
121ec21e2ecSJeff Kirsher 
122a7312d58SClaudiu Manoil 	bdp->lstatus = cpu_to_be32(lstatus);
123ec21e2ecSJeff Kirsher }
124ec21e2ecSJeff Kirsher 
125ec21e2ecSJeff Kirsher static void gfar_init_tx_rx_base(struct gfar_private *priv)
126ec21e2ecSJeff Kirsher {
127ec21e2ecSJeff Kirsher 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
128ec21e2ecSJeff Kirsher 	u32 __iomem *baddr;
129ec21e2ecSJeff Kirsher 	int i;
130ec21e2ecSJeff Kirsher 
131ec21e2ecSJeff Kirsher 	baddr = &regs->tbase0;
132ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_tx_queues; i++) {
133ec21e2ecSJeff Kirsher 		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
134ec21e2ecSJeff Kirsher 		baddr += 2;
135ec21e2ecSJeff Kirsher 	}
136ec21e2ecSJeff Kirsher 
137ec21e2ecSJeff Kirsher 	baddr = &regs->rbase0;
138ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_rx_queues; i++) {
139ec21e2ecSJeff Kirsher 		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
140ec21e2ecSJeff Kirsher 		baddr += 2;
141ec21e2ecSJeff Kirsher 	}
142ec21e2ecSJeff Kirsher }
143ec21e2ecSJeff Kirsher 
14445b679c9SMatei Pavaluca static void gfar_init_rqprm(struct gfar_private *priv)
14545b679c9SMatei Pavaluca {
14645b679c9SMatei Pavaluca 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
14745b679c9SMatei Pavaluca 	u32 __iomem *baddr;
14845b679c9SMatei Pavaluca 	int i;
14945b679c9SMatei Pavaluca 
15045b679c9SMatei Pavaluca 	baddr = &regs->rqprm0;
15145b679c9SMatei Pavaluca 	for (i = 0; i < priv->num_rx_queues; i++) {
15245b679c9SMatei Pavaluca 		gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
15345b679c9SMatei Pavaluca 			   (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
15445b679c9SMatei Pavaluca 		baddr++;
15545b679c9SMatei Pavaluca 	}
15645b679c9SMatei Pavaluca }
15745b679c9SMatei Pavaluca 
15875354148SClaudiu Manoil static void gfar_rx_offload_en(struct gfar_private *priv)
15988302648SClaudiu Manoil {
16088302648SClaudiu Manoil 	/* set this when rx hw offload (TOE) functions are being used */
16188302648SClaudiu Manoil 	priv->uses_rxfcb = 0;
16288302648SClaudiu Manoil 
16388302648SClaudiu Manoil 	if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
16488302648SClaudiu Manoil 		priv->uses_rxfcb = 1;
16588302648SClaudiu Manoil 
16615bf176dSClaudiu Manoil 	if (priv->hwts_rx_en || priv->rx_filer_enable)
16788302648SClaudiu Manoil 		priv->uses_rxfcb = 1;
16888302648SClaudiu Manoil }
16988302648SClaudiu Manoil 
170a328ac92SClaudiu Manoil static void gfar_mac_rx_config(struct gfar_private *priv)
171ec21e2ecSJeff Kirsher {
172ec21e2ecSJeff Kirsher 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
173ec21e2ecSJeff Kirsher 	u32 rctrl = 0;
174ec21e2ecSJeff Kirsher 
175ec21e2ecSJeff Kirsher 	if (priv->rx_filer_enable) {
17615bf176dSClaudiu Manoil 		rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
177ec21e2ecSJeff Kirsher 		/* Program the RIR0 reg with the required distribution */
17871ff9e3dSClaudiu Manoil 		if (priv->poll_mode == GFAR_SQ_POLLING)
17971ff9e3dSClaudiu Manoil 			gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
18071ff9e3dSClaudiu Manoil 		else /* GFAR_MQ_POLLING */
18171ff9e3dSClaudiu Manoil 			gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
182ec21e2ecSJeff Kirsher 	}
183ec21e2ecSJeff Kirsher 
184f5ae6279SClaudiu Manoil 	/* Restore PROMISC mode */
185a328ac92SClaudiu Manoil 	if (priv->ndev->flags & IFF_PROMISC)
186f5ae6279SClaudiu Manoil 		rctrl |= RCTRL_PROM;
187f5ae6279SClaudiu Manoil 
18888302648SClaudiu Manoil 	if (priv->ndev->features & NETIF_F_RXCSUM)
189ec21e2ecSJeff Kirsher 		rctrl |= RCTRL_CHECKSUMMING;
190ec21e2ecSJeff Kirsher 
19188302648SClaudiu Manoil 	if (priv->extended_hash)
19288302648SClaudiu Manoil 		rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
193ec21e2ecSJeff Kirsher 
194ec21e2ecSJeff Kirsher 	if (priv->padding) {
195ec21e2ecSJeff Kirsher 		rctrl &= ~RCTRL_PAL_MASK;
196ec21e2ecSJeff Kirsher 		rctrl |= RCTRL_PADDING(priv->padding);
197ec21e2ecSJeff Kirsher 	}
198ec21e2ecSJeff Kirsher 
199ec21e2ecSJeff Kirsher 	/* Enable HW time stamping if requested from user space */
20088302648SClaudiu Manoil 	if (priv->hwts_rx_en)
201ec21e2ecSJeff Kirsher 		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
202ec21e2ecSJeff Kirsher 
20388302648SClaudiu Manoil 	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
204ec21e2ecSJeff Kirsher 		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
205ec21e2ecSJeff Kirsher 
20645b679c9SMatei Pavaluca 	/* Clear the LFC bit */
20745b679c9SMatei Pavaluca 	gfar_write(&regs->rctrl, rctrl);
20845b679c9SMatei Pavaluca 	/* Init flow control threshold values */
20945b679c9SMatei Pavaluca 	gfar_init_rqprm(priv);
21045b679c9SMatei Pavaluca 	gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
21145b679c9SMatei Pavaluca 	rctrl |= RCTRL_LFC;
21245b679c9SMatei Pavaluca 
213ec21e2ecSJeff Kirsher 	/* Init rctrl based on our settings */
214ec21e2ecSJeff Kirsher 	gfar_write(&regs->rctrl, rctrl);
215a328ac92SClaudiu Manoil }
216ec21e2ecSJeff Kirsher 
217a328ac92SClaudiu Manoil static void gfar_mac_tx_config(struct gfar_private *priv)
218a328ac92SClaudiu Manoil {
219a328ac92SClaudiu Manoil 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
220a328ac92SClaudiu Manoil 	u32 tctrl = 0;
221a328ac92SClaudiu Manoil 
222a328ac92SClaudiu Manoil 	if (priv->ndev->features & NETIF_F_IP_CSUM)
223ec21e2ecSJeff Kirsher 		tctrl |= TCTRL_INIT_CSUM;
224ec21e2ecSJeff Kirsher 
225b98b8babSClaudiu Manoil 	if (priv->prio_sched_en)
226ec21e2ecSJeff Kirsher 		tctrl |= TCTRL_TXSCHED_PRIO;
227b98b8babSClaudiu Manoil 	else {
228b98b8babSClaudiu Manoil 		tctrl |= TCTRL_TXSCHED_WRRS;
229b98b8babSClaudiu Manoil 		gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
230b98b8babSClaudiu Manoil 		gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
231b98b8babSClaudiu Manoil 	}
232ec21e2ecSJeff Kirsher 
23388302648SClaudiu Manoil 	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
23488302648SClaudiu Manoil 		tctrl |= TCTRL_VLINS;
23588302648SClaudiu Manoil 
236ec21e2ecSJeff Kirsher 	gfar_write(&regs->tctrl, tctrl);
237ec21e2ecSJeff Kirsher }
238ec21e2ecSJeff Kirsher 
239f19015baSClaudiu Manoil static void gfar_configure_coalescing(struct gfar_private *priv,
240f19015baSClaudiu Manoil 			       unsigned long tx_mask, unsigned long rx_mask)
241f19015baSClaudiu Manoil {
242f19015baSClaudiu Manoil 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
243f19015baSClaudiu Manoil 	u32 __iomem *baddr;
244f19015baSClaudiu Manoil 
245f19015baSClaudiu Manoil 	if (priv->mode == MQ_MG_MODE) {
246f19015baSClaudiu Manoil 		int i = 0;
247f19015baSClaudiu Manoil 
248f19015baSClaudiu Manoil 		baddr = &regs->txic0;
249f19015baSClaudiu Manoil 		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
250f19015baSClaudiu Manoil 			gfar_write(baddr + i, 0);
251f19015baSClaudiu Manoil 			if (likely(priv->tx_queue[i]->txcoalescing))
252f19015baSClaudiu Manoil 				gfar_write(baddr + i, priv->tx_queue[i]->txic);
253f19015baSClaudiu Manoil 		}
254f19015baSClaudiu Manoil 
255f19015baSClaudiu Manoil 		baddr = &regs->rxic0;
256f19015baSClaudiu Manoil 		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
257f19015baSClaudiu Manoil 			gfar_write(baddr + i, 0);
258f19015baSClaudiu Manoil 			if (likely(priv->rx_queue[i]->rxcoalescing))
259f19015baSClaudiu Manoil 				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
260f19015baSClaudiu Manoil 		}
261f19015baSClaudiu Manoil 	} else {
262f19015baSClaudiu Manoil 		/* Backward compatible case -- even if we enable
263f19015baSClaudiu Manoil 		 * multiple queues, there's only single reg to program
264f19015baSClaudiu Manoil 		 */
265f19015baSClaudiu Manoil 		gfar_write(&regs->txic, 0);
266f19015baSClaudiu Manoil 		if (likely(priv->tx_queue[0]->txcoalescing))
267f19015baSClaudiu Manoil 			gfar_write(&regs->txic, priv->tx_queue[0]->txic);
268f19015baSClaudiu Manoil 
269f19015baSClaudiu Manoil 		gfar_write(&regs->rxic, 0);
270f19015baSClaudiu Manoil 		if (unlikely(priv->rx_queue[0]->rxcoalescing))
271f19015baSClaudiu Manoil 			gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
272f19015baSClaudiu Manoil 	}
273f19015baSClaudiu Manoil }
274f19015baSClaudiu Manoil 
2757ad38784SArseny Solokha static void gfar_configure_coalescing_all(struct gfar_private *priv)
276f19015baSClaudiu Manoil {
277f19015baSClaudiu Manoil 	gfar_configure_coalescing(priv, 0xFF, 0xFF);
278f19015baSClaudiu Manoil }
279f19015baSClaudiu Manoil 
280ec21e2ecSJeff Kirsher static struct net_device_stats *gfar_get_stats(struct net_device *dev)
281ec21e2ecSJeff Kirsher {
282ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
283ec21e2ecSJeff Kirsher 	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
284ec21e2ecSJeff Kirsher 	unsigned long tx_packets = 0, tx_bytes = 0;
2853a2e16c8SJan Ceuleers 	int i;
286ec21e2ecSJeff Kirsher 
287ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_rx_queues; i++) {
288ec21e2ecSJeff Kirsher 		rx_packets += priv->rx_queue[i]->stats.rx_packets;
289ec21e2ecSJeff Kirsher 		rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
290ec21e2ecSJeff Kirsher 		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
291ec21e2ecSJeff Kirsher 	}
292ec21e2ecSJeff Kirsher 
293ec21e2ecSJeff Kirsher 	dev->stats.rx_packets = rx_packets;
294ec21e2ecSJeff Kirsher 	dev->stats.rx_bytes   = rx_bytes;
295ec21e2ecSJeff Kirsher 	dev->stats.rx_dropped = rx_dropped;
296ec21e2ecSJeff Kirsher 
297ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_tx_queues; i++) {
298ec21e2ecSJeff Kirsher 		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
299ec21e2ecSJeff Kirsher 		tx_packets += priv->tx_queue[i]->stats.tx_packets;
300ec21e2ecSJeff Kirsher 	}
301ec21e2ecSJeff Kirsher 
302ec21e2ecSJeff Kirsher 	dev->stats.tx_bytes   = tx_bytes;
303ec21e2ecSJeff Kirsher 	dev->stats.tx_packets = tx_packets;
304ec21e2ecSJeff Kirsher 
305ec21e2ecSJeff Kirsher 	return &dev->stats;
306ec21e2ecSJeff Kirsher }
307ec21e2ecSJeff Kirsher 
3087d993c5fSArseny Solokha /* Set the appropriate hash bit for the given addr */
3097d993c5fSArseny Solokha /* The algorithm works like so:
3107d993c5fSArseny Solokha  * 1) Take the Destination Address (ie the multicast address), and
3117d993c5fSArseny Solokha  * do a CRC on it (little endian), and reverse the bits of the
3127d993c5fSArseny Solokha  * result.
3137d993c5fSArseny Solokha  * 2) Use the 8 most significant bits as a hash into a 256-entry
3147d993c5fSArseny Solokha  * table.  The table is controlled through 8 32-bit registers:
3157d993c5fSArseny Solokha  * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
3167d993c5fSArseny Solokha  * gaddr7.  This means that the 3 most significant bits in the
3177d993c5fSArseny Solokha  * hash index which gaddr register to use, and the 5 other bits
3187d993c5fSArseny Solokha  * indicate which bit (assuming an IBM numbering scheme, which
3197d993c5fSArseny Solokha  * for PowerPC (tm) is usually the case) in the register holds
3207d993c5fSArseny Solokha  * the entry.
3217d993c5fSArseny Solokha  */
3227d993c5fSArseny Solokha static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3237d993c5fSArseny Solokha {
3247d993c5fSArseny Solokha 	u32 tempval;
3257d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
3267d993c5fSArseny Solokha 	u32 result = ether_crc(ETH_ALEN, addr);
3277d993c5fSArseny Solokha 	int width = priv->hash_width;
3287d993c5fSArseny Solokha 	u8 whichbit = (result >> (32 - width)) & 0x1f;
3297d993c5fSArseny Solokha 	u8 whichreg = result >> (32 - width + 5);
3307d993c5fSArseny Solokha 	u32 value = (1 << (31-whichbit));
3317d993c5fSArseny Solokha 
3327d993c5fSArseny Solokha 	tempval = gfar_read(priv->hash_regs[whichreg]);
3337d993c5fSArseny Solokha 	tempval |= value;
3347d993c5fSArseny Solokha 	gfar_write(priv->hash_regs[whichreg], tempval);
3357d993c5fSArseny Solokha }
3367d993c5fSArseny Solokha 
3377d993c5fSArseny Solokha /* There are multiple MAC Address register pairs on some controllers
3387d993c5fSArseny Solokha  * This function sets the numth pair to a given address
3397d993c5fSArseny Solokha  */
3407d993c5fSArseny Solokha static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3417d993c5fSArseny Solokha 				  const u8 *addr)
3427d993c5fSArseny Solokha {
3437d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
3447d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3457d993c5fSArseny Solokha 	u32 tempval;
3467d993c5fSArseny Solokha 	u32 __iomem *macptr = &regs->macstnaddr1;
3477d993c5fSArseny Solokha 
3487d993c5fSArseny Solokha 	macptr += num*2;
3497d993c5fSArseny Solokha 
3507d993c5fSArseny Solokha 	/* For a station address of 0x12345678ABCD in transmission
3517d993c5fSArseny Solokha 	 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
3527d993c5fSArseny Solokha 	 * MACnADDR2 is set to 0x34120000.
3537d993c5fSArseny Solokha 	 */
3547d993c5fSArseny Solokha 	tempval = (addr[5] << 24) | (addr[4] << 16) |
3557d993c5fSArseny Solokha 		  (addr[3] << 8)  |  addr[2];
3567d993c5fSArseny Solokha 
3577d993c5fSArseny Solokha 	gfar_write(macptr, tempval);
3587d993c5fSArseny Solokha 
3597d993c5fSArseny Solokha 	tempval = (addr[1] << 24) | (addr[0] << 16);
3607d993c5fSArseny Solokha 
3617d993c5fSArseny Solokha 	gfar_write(macptr+1, tempval);
3627d993c5fSArseny Solokha }
3637d993c5fSArseny Solokha 
3643d23a05cSClaudiu Manoil static int gfar_set_mac_addr(struct net_device *dev, void *p)
3653d23a05cSClaudiu Manoil {
3663d23a05cSClaudiu Manoil 	eth_mac_addr(dev, p);
3673d23a05cSClaudiu Manoil 
3683d23a05cSClaudiu Manoil 	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
3693d23a05cSClaudiu Manoil 
3703d23a05cSClaudiu Manoil 	return 0;
3713d23a05cSClaudiu Manoil }
3723d23a05cSClaudiu Manoil 
373efeddce7SClaudiu Manoil static void gfar_ints_disable(struct gfar_private *priv)
374efeddce7SClaudiu Manoil {
375efeddce7SClaudiu Manoil 	int i;
376efeddce7SClaudiu Manoil 	for (i = 0; i < priv->num_grps; i++) {
377efeddce7SClaudiu Manoil 		struct gfar __iomem *regs = priv->gfargrp[i].regs;
378efeddce7SClaudiu Manoil 		/* Clear IEVENT */
379efeddce7SClaudiu Manoil 		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
380efeddce7SClaudiu Manoil 
381efeddce7SClaudiu Manoil 		/* Initialize IMASK */
382efeddce7SClaudiu Manoil 		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
383efeddce7SClaudiu Manoil 	}
384efeddce7SClaudiu Manoil }
385efeddce7SClaudiu Manoil 
386efeddce7SClaudiu Manoil static void gfar_ints_enable(struct gfar_private *priv)
387efeddce7SClaudiu Manoil {
388efeddce7SClaudiu Manoil 	int i;
389efeddce7SClaudiu Manoil 	for (i = 0; i < priv->num_grps; i++) {
390efeddce7SClaudiu Manoil 		struct gfar __iomem *regs = priv->gfargrp[i].regs;
391efeddce7SClaudiu Manoil 		/* Unmask the interrupts we look for */
392efeddce7SClaudiu Manoil 		gfar_write(&regs->imask, IMASK_DEFAULT);
393efeddce7SClaudiu Manoil 	}
394efeddce7SClaudiu Manoil }
395efeddce7SClaudiu Manoil 
39620862788SClaudiu Manoil static int gfar_alloc_tx_queues(struct gfar_private *priv)
39720862788SClaudiu Manoil {
39820862788SClaudiu Manoil 	int i;
39920862788SClaudiu Manoil 
40020862788SClaudiu Manoil 	for (i = 0; i < priv->num_tx_queues; i++) {
40120862788SClaudiu Manoil 		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
40220862788SClaudiu Manoil 					    GFP_KERNEL);
40320862788SClaudiu Manoil 		if (!priv->tx_queue[i])
40420862788SClaudiu Manoil 			return -ENOMEM;
40520862788SClaudiu Manoil 
40620862788SClaudiu Manoil 		priv->tx_queue[i]->tx_skbuff = NULL;
40720862788SClaudiu Manoil 		priv->tx_queue[i]->qindex = i;
40820862788SClaudiu Manoil 		priv->tx_queue[i]->dev = priv->ndev;
40920862788SClaudiu Manoil 		spin_lock_init(&(priv->tx_queue[i]->txlock));
41020862788SClaudiu Manoil 	}
41120862788SClaudiu Manoil 	return 0;
41220862788SClaudiu Manoil }
41320862788SClaudiu Manoil 
41420862788SClaudiu Manoil static int gfar_alloc_rx_queues(struct gfar_private *priv)
41520862788SClaudiu Manoil {
41620862788SClaudiu Manoil 	int i;
41720862788SClaudiu Manoil 
41820862788SClaudiu Manoil 	for (i = 0; i < priv->num_rx_queues; i++) {
41920862788SClaudiu Manoil 		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
42020862788SClaudiu Manoil 					    GFP_KERNEL);
42120862788SClaudiu Manoil 		if (!priv->rx_queue[i])
42220862788SClaudiu Manoil 			return -ENOMEM;
42320862788SClaudiu Manoil 
42420862788SClaudiu Manoil 		priv->rx_queue[i]->qindex = i;
425f23223f1SClaudiu Manoil 		priv->rx_queue[i]->ndev = priv->ndev;
42620862788SClaudiu Manoil 	}
42720862788SClaudiu Manoil 	return 0;
42820862788SClaudiu Manoil }
42920862788SClaudiu Manoil 
43020862788SClaudiu Manoil static void gfar_free_tx_queues(struct gfar_private *priv)
431ec21e2ecSJeff Kirsher {
4323a2e16c8SJan Ceuleers 	int i;
433ec21e2ecSJeff Kirsher 
434ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_tx_queues; i++)
435ec21e2ecSJeff Kirsher 		kfree(priv->tx_queue[i]);
436ec21e2ecSJeff Kirsher }
437ec21e2ecSJeff Kirsher 
43820862788SClaudiu Manoil static void gfar_free_rx_queues(struct gfar_private *priv)
439ec21e2ecSJeff Kirsher {
4403a2e16c8SJan Ceuleers 	int i;
441ec21e2ecSJeff Kirsher 
442ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_rx_queues; i++)
443ec21e2ecSJeff Kirsher 		kfree(priv->rx_queue[i]);
444ec21e2ecSJeff Kirsher }
445ec21e2ecSJeff Kirsher 
446ec21e2ecSJeff Kirsher static void unmap_group_regs(struct gfar_private *priv)
447ec21e2ecSJeff Kirsher {
4483a2e16c8SJan Ceuleers 	int i;
449ec21e2ecSJeff Kirsher 
450ec21e2ecSJeff Kirsher 	for (i = 0; i < MAXGROUPS; i++)
451ec21e2ecSJeff Kirsher 		if (priv->gfargrp[i].regs)
452ec21e2ecSJeff Kirsher 			iounmap(priv->gfargrp[i].regs);
453ec21e2ecSJeff Kirsher }
454ec21e2ecSJeff Kirsher 
455ee873fdaSClaudiu Manoil static void free_gfar_dev(struct gfar_private *priv)
456ee873fdaSClaudiu Manoil {
457ee873fdaSClaudiu Manoil 	int i, j;
458ee873fdaSClaudiu Manoil 
459ee873fdaSClaudiu Manoil 	for (i = 0; i < priv->num_grps; i++)
460ee873fdaSClaudiu Manoil 		for (j = 0; j < GFAR_NUM_IRQS; j++) {
461ee873fdaSClaudiu Manoil 			kfree(priv->gfargrp[i].irqinfo[j]);
462ee873fdaSClaudiu Manoil 			priv->gfargrp[i].irqinfo[j] = NULL;
463ee873fdaSClaudiu Manoil 		}
464ee873fdaSClaudiu Manoil 
465ee873fdaSClaudiu Manoil 	free_netdev(priv->ndev);
466ee873fdaSClaudiu Manoil }
467ee873fdaSClaudiu Manoil 
468ec21e2ecSJeff Kirsher static void disable_napi(struct gfar_private *priv)
469ec21e2ecSJeff Kirsher {
4703a2e16c8SJan Ceuleers 	int i;
471ec21e2ecSJeff Kirsher 
472aeb12c5eSClaudiu Manoil 	for (i = 0; i < priv->num_grps; i++) {
473aeb12c5eSClaudiu Manoil 		napi_disable(&priv->gfargrp[i].napi_rx);
474aeb12c5eSClaudiu Manoil 		napi_disable(&priv->gfargrp[i].napi_tx);
475aeb12c5eSClaudiu Manoil 	}
476ec21e2ecSJeff Kirsher }
477ec21e2ecSJeff Kirsher 
478ec21e2ecSJeff Kirsher static void enable_napi(struct gfar_private *priv)
479ec21e2ecSJeff Kirsher {
4803a2e16c8SJan Ceuleers 	int i;
481ec21e2ecSJeff Kirsher 
482aeb12c5eSClaudiu Manoil 	for (i = 0; i < priv->num_grps; i++) {
483aeb12c5eSClaudiu Manoil 		napi_enable(&priv->gfargrp[i].napi_rx);
484aeb12c5eSClaudiu Manoil 		napi_enable(&priv->gfargrp[i].napi_tx);
485aeb12c5eSClaudiu Manoil 	}
486ec21e2ecSJeff Kirsher }
487ec21e2ecSJeff Kirsher 
488ec21e2ecSJeff Kirsher static int gfar_parse_group(struct device_node *np,
489ec21e2ecSJeff Kirsher 			    struct gfar_private *priv, const char *model)
490ec21e2ecSJeff Kirsher {
4915fedcc14SClaudiu Manoil 	struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
492ee873fdaSClaudiu Manoil 	int i;
493ee873fdaSClaudiu Manoil 
494ee873fdaSClaudiu Manoil 	for (i = 0; i < GFAR_NUM_IRQS; i++) {
495ee873fdaSClaudiu Manoil 		grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
496ee873fdaSClaudiu Manoil 					  GFP_KERNEL);
497ee873fdaSClaudiu Manoil 		if (!grp->irqinfo[i])
498ee873fdaSClaudiu Manoil 			return -ENOMEM;
499ee873fdaSClaudiu Manoil 	}
500ec21e2ecSJeff Kirsher 
5015fedcc14SClaudiu Manoil 	grp->regs = of_iomap(np, 0);
5025fedcc14SClaudiu Manoil 	if (!grp->regs)
503ec21e2ecSJeff Kirsher 		return -ENOMEM;
504ec21e2ecSJeff Kirsher 
505ee873fdaSClaudiu Manoil 	gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
506ec21e2ecSJeff Kirsher 
507ec21e2ecSJeff Kirsher 	/* If we aren't the FEC we have multiple interrupts */
508ec21e2ecSJeff Kirsher 	if (model && strcasecmp(model, "FEC")) {
509ee873fdaSClaudiu Manoil 		gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
510ee873fdaSClaudiu Manoil 		gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
511fea0f665SMark Brown 		if (!gfar_irq(grp, TX)->irq ||
512fea0f665SMark Brown 		    !gfar_irq(grp, RX)->irq ||
513fea0f665SMark Brown 		    !gfar_irq(grp, ER)->irq)
514ec21e2ecSJeff Kirsher 			return -EINVAL;
515ec21e2ecSJeff Kirsher 	}
516ec21e2ecSJeff Kirsher 
5175fedcc14SClaudiu Manoil 	grp->priv = priv;
5185fedcc14SClaudiu Manoil 	spin_lock_init(&grp->grplock);
519ec21e2ecSJeff Kirsher 	if (priv->mode == MQ_MG_MODE) {
52055917641SJingchang Lu 		u32 rxq_mask, txq_mask;
52155917641SJingchang Lu 		int ret;
52255917641SJingchang Lu 
52355917641SJingchang Lu 		grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
52455917641SJingchang Lu 		grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
52555917641SJingchang Lu 
52655917641SJingchang Lu 		ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
52755917641SJingchang Lu 		if (!ret) {
52855917641SJingchang Lu 			grp->rx_bit_map = rxq_mask ?
52955917641SJingchang Lu 			rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
53055917641SJingchang Lu 		}
53155917641SJingchang Lu 
53255917641SJingchang Lu 		ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
53355917641SJingchang Lu 		if (!ret) {
53455917641SJingchang Lu 			grp->tx_bit_map = txq_mask ?
53555917641SJingchang Lu 			txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
53655917641SJingchang Lu 		}
53771ff9e3dSClaudiu Manoil 
53871ff9e3dSClaudiu Manoil 		if (priv->poll_mode == GFAR_SQ_POLLING) {
53971ff9e3dSClaudiu Manoil 			/* One Q per interrupt group: Q0 to G0, Q1 to G1 */
54071ff9e3dSClaudiu Manoil 			grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
54171ff9e3dSClaudiu Manoil 			grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
54271ff9e3dSClaudiu Manoil 		}
543ec21e2ecSJeff Kirsher 	} else {
5445fedcc14SClaudiu Manoil 		grp->rx_bit_map = 0xFF;
5455fedcc14SClaudiu Manoil 		grp->tx_bit_map = 0xFF;
546ec21e2ecSJeff Kirsher 	}
54720862788SClaudiu Manoil 
54820862788SClaudiu Manoil 	/* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
54920862788SClaudiu Manoil 	 * right to left, so we need to revert the 8 bits to get the q index
55020862788SClaudiu Manoil 	 */
55120862788SClaudiu Manoil 	grp->rx_bit_map = bitrev8(grp->rx_bit_map);
55220862788SClaudiu Manoil 	grp->tx_bit_map = bitrev8(grp->tx_bit_map);
55320862788SClaudiu Manoil 
55420862788SClaudiu Manoil 	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
55520862788SClaudiu Manoil 	 * also assign queues to groups
55620862788SClaudiu Manoil 	 */
55720862788SClaudiu Manoil 	for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
55871ff9e3dSClaudiu Manoil 		if (!grp->rx_queue)
55971ff9e3dSClaudiu Manoil 			grp->rx_queue = priv->rx_queue[i];
56020862788SClaudiu Manoil 		grp->num_rx_queues++;
56120862788SClaudiu Manoil 		grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
56220862788SClaudiu Manoil 		priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
56320862788SClaudiu Manoil 		priv->rx_queue[i]->grp = grp;
56420862788SClaudiu Manoil 	}
56520862788SClaudiu Manoil 
56620862788SClaudiu Manoil 	for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
56771ff9e3dSClaudiu Manoil 		if (!grp->tx_queue)
56871ff9e3dSClaudiu Manoil 			grp->tx_queue = priv->tx_queue[i];
56920862788SClaudiu Manoil 		grp->num_tx_queues++;
57020862788SClaudiu Manoil 		grp->tstat |= (TSTAT_CLEAR_THALT >> i);
57120862788SClaudiu Manoil 		priv->tqueue |= (TQUEUE_EN0 >> i);
57220862788SClaudiu Manoil 		priv->tx_queue[i]->grp = grp;
57320862788SClaudiu Manoil 	}
57420862788SClaudiu Manoil 
575ec21e2ecSJeff Kirsher 	priv->num_grps++;
576ec21e2ecSJeff Kirsher 
577ec21e2ecSJeff Kirsher 	return 0;
578ec21e2ecSJeff Kirsher }
579ec21e2ecSJeff Kirsher 
580f50724cdSTobias Waldekranz static int gfar_of_group_count(struct device_node *np)
581f50724cdSTobias Waldekranz {
582f50724cdSTobias Waldekranz 	struct device_node *child;
583f50724cdSTobias Waldekranz 	int num = 0;
584f50724cdSTobias Waldekranz 
585f50724cdSTobias Waldekranz 	for_each_available_child_of_node(np, child)
586bf5849f1SRob Herring 		if (of_node_name_eq(child, "queue-group"))
587f50724cdSTobias Waldekranz 			num++;
588f50724cdSTobias Waldekranz 
589f50724cdSTobias Waldekranz 	return num;
590f50724cdSTobias Waldekranz }
591f50724cdSTobias Waldekranz 
5927d993c5fSArseny Solokha /* Reads the controller's registers to determine what interface
5937d993c5fSArseny Solokha  * connects it to the PHY.
5947d993c5fSArseny Solokha  */
5957d993c5fSArseny Solokha static phy_interface_t gfar_get_interface(struct net_device *dev)
5967d993c5fSArseny Solokha {
5977d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
5987d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
5997d993c5fSArseny Solokha 	u32 ecntrl;
6007d993c5fSArseny Solokha 
6017d993c5fSArseny Solokha 	ecntrl = gfar_read(&regs->ecntrl);
6027d993c5fSArseny Solokha 
6037d993c5fSArseny Solokha 	if (ecntrl & ECNTRL_SGMII_MODE)
6047d993c5fSArseny Solokha 		return PHY_INTERFACE_MODE_SGMII;
6057d993c5fSArseny Solokha 
6067d993c5fSArseny Solokha 	if (ecntrl & ECNTRL_TBI_MODE) {
6077d993c5fSArseny Solokha 		if (ecntrl & ECNTRL_REDUCED_MODE)
6087d993c5fSArseny Solokha 			return PHY_INTERFACE_MODE_RTBI;
6097d993c5fSArseny Solokha 		else
6107d993c5fSArseny Solokha 			return PHY_INTERFACE_MODE_TBI;
6117d993c5fSArseny Solokha 	}
6127d993c5fSArseny Solokha 
6137d993c5fSArseny Solokha 	if (ecntrl & ECNTRL_REDUCED_MODE) {
6147d993c5fSArseny Solokha 		if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
6157d993c5fSArseny Solokha 			return PHY_INTERFACE_MODE_RMII;
6167d993c5fSArseny Solokha 		}
6177d993c5fSArseny Solokha 		else {
6187d993c5fSArseny Solokha 			phy_interface_t interface = priv->interface;
6197d993c5fSArseny Solokha 
6207d993c5fSArseny Solokha 			/* This isn't autodetected right now, so it must
6217d993c5fSArseny Solokha 			 * be set by the device tree or platform code.
6227d993c5fSArseny Solokha 			 */
6237d993c5fSArseny Solokha 			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
6247d993c5fSArseny Solokha 				return PHY_INTERFACE_MODE_RGMII_ID;
6257d993c5fSArseny Solokha 
6267d993c5fSArseny Solokha 			return PHY_INTERFACE_MODE_RGMII;
6277d993c5fSArseny Solokha 		}
6287d993c5fSArseny Solokha 	}
6297d993c5fSArseny Solokha 
6307d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
6317d993c5fSArseny Solokha 		return PHY_INTERFACE_MODE_GMII;
6327d993c5fSArseny Solokha 
6337d993c5fSArseny Solokha 	return PHY_INTERFACE_MODE_MII;
6347d993c5fSArseny Solokha }
6357d993c5fSArseny Solokha 
636ec21e2ecSJeff Kirsher static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
637ec21e2ecSJeff Kirsher {
638ec21e2ecSJeff Kirsher 	const char *model;
639ec21e2ecSJeff Kirsher 	const void *mac_addr;
640ec21e2ecSJeff Kirsher 	int err = 0, i;
6410c65b2b9SAndrew Lunn 	phy_interface_t interface;
642ec21e2ecSJeff Kirsher 	struct net_device *dev = NULL;
643ec21e2ecSJeff Kirsher 	struct gfar_private *priv = NULL;
644ec21e2ecSJeff Kirsher 	struct device_node *np = ofdev->dev.of_node;
645ec21e2ecSJeff Kirsher 	struct device_node *child = NULL;
64655917641SJingchang Lu 	u32 stash_len = 0;
64755917641SJingchang Lu 	u32 stash_idx = 0;
648ec21e2ecSJeff Kirsher 	unsigned int num_tx_qs, num_rx_qs;
649b338ce27SClaudiu Manoil 	unsigned short mode, poll_mode;
650ec21e2ecSJeff Kirsher 
6514b222ca6SKevin Hao 	if (!np)
652ec21e2ecSJeff Kirsher 		return -ENODEV;
653ec21e2ecSJeff Kirsher 
654b338ce27SClaudiu Manoil 	if (of_device_is_compatible(np, "fsl,etsec2")) {
655b338ce27SClaudiu Manoil 		mode = MQ_MG_MODE;
656b338ce27SClaudiu Manoil 		poll_mode = GFAR_SQ_POLLING;
657b338ce27SClaudiu Manoil 	} else {
658b338ce27SClaudiu Manoil 		mode = SQ_SG_MODE;
659b338ce27SClaudiu Manoil 		poll_mode = GFAR_SQ_POLLING;
660b338ce27SClaudiu Manoil 	}
661b338ce27SClaudiu Manoil 
662b338ce27SClaudiu Manoil 	if (mode == SQ_SG_MODE) {
66371ff9e3dSClaudiu Manoil 		num_tx_qs = 1;
66471ff9e3dSClaudiu Manoil 		num_rx_qs = 1;
66571ff9e3dSClaudiu Manoil 	} else { /* MQ_MG_MODE */
666c65d7533SClaudiu Manoil 		/* get the actual number of supported groups */
667f50724cdSTobias Waldekranz 		unsigned int num_grps = gfar_of_group_count(np);
668c65d7533SClaudiu Manoil 
669c65d7533SClaudiu Manoil 		if (num_grps == 0 || num_grps > MAXGROUPS) {
670c65d7533SClaudiu Manoil 			dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
671c65d7533SClaudiu Manoil 				num_grps);
672c65d7533SClaudiu Manoil 			pr_err("Cannot do alloc_etherdev, aborting\n");
673c65d7533SClaudiu Manoil 			return -EINVAL;
674c65d7533SClaudiu Manoil 		}
675c65d7533SClaudiu Manoil 
676b338ce27SClaudiu Manoil 		if (poll_mode == GFAR_SQ_POLLING) {
677c65d7533SClaudiu Manoil 			num_tx_qs = num_grps; /* one txq per int group */
678c65d7533SClaudiu Manoil 			num_rx_qs = num_grps; /* one rxq per int group */
67971ff9e3dSClaudiu Manoil 		} else { /* GFAR_MQ_POLLING */
68055917641SJingchang Lu 			u32 tx_queues, rx_queues;
68155917641SJingchang Lu 			int ret;
68255917641SJingchang Lu 
68355917641SJingchang Lu 			/* parse the num of HW tx and rx queues */
68455917641SJingchang Lu 			ret = of_property_read_u32(np, "fsl,num_tx_queues",
68555917641SJingchang Lu 						   &tx_queues);
68655917641SJingchang Lu 			num_tx_qs = ret ? 1 : tx_queues;
68755917641SJingchang Lu 
68855917641SJingchang Lu 			ret = of_property_read_u32(np, "fsl,num_rx_queues",
68955917641SJingchang Lu 						   &rx_queues);
69055917641SJingchang Lu 			num_rx_qs = ret ? 1 : rx_queues;
69171ff9e3dSClaudiu Manoil 		}
69271ff9e3dSClaudiu Manoil 	}
693ec21e2ecSJeff Kirsher 
694ec21e2ecSJeff Kirsher 	if (num_tx_qs > MAX_TX_QS) {
695ec21e2ecSJeff Kirsher 		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
696ec21e2ecSJeff Kirsher 		       num_tx_qs, MAX_TX_QS);
697ec21e2ecSJeff Kirsher 		pr_err("Cannot do alloc_etherdev, aborting\n");
698ec21e2ecSJeff Kirsher 		return -EINVAL;
699ec21e2ecSJeff Kirsher 	}
700ec21e2ecSJeff Kirsher 
701ec21e2ecSJeff Kirsher 	if (num_rx_qs > MAX_RX_QS) {
702ec21e2ecSJeff Kirsher 		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
703ec21e2ecSJeff Kirsher 		       num_rx_qs, MAX_RX_QS);
704ec21e2ecSJeff Kirsher 		pr_err("Cannot do alloc_etherdev, aborting\n");
705ec21e2ecSJeff Kirsher 		return -EINVAL;
706ec21e2ecSJeff Kirsher 	}
707ec21e2ecSJeff Kirsher 
708ec21e2ecSJeff Kirsher 	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
709ec21e2ecSJeff Kirsher 	dev = *pdev;
710ec21e2ecSJeff Kirsher 	if (NULL == dev)
711ec21e2ecSJeff Kirsher 		return -ENOMEM;
712ec21e2ecSJeff Kirsher 
713ec21e2ecSJeff Kirsher 	priv = netdev_priv(dev);
714ec21e2ecSJeff Kirsher 	priv->ndev = dev;
715ec21e2ecSJeff Kirsher 
716b338ce27SClaudiu Manoil 	priv->mode = mode;
717b338ce27SClaudiu Manoil 	priv->poll_mode = poll_mode;
718b338ce27SClaudiu Manoil 
719ec21e2ecSJeff Kirsher 	priv->num_tx_queues = num_tx_qs;
720ec21e2ecSJeff Kirsher 	netif_set_real_num_rx_queues(dev, num_rx_qs);
721ec21e2ecSJeff Kirsher 	priv->num_rx_queues = num_rx_qs;
72220862788SClaudiu Manoil 
72320862788SClaudiu Manoil 	err = gfar_alloc_tx_queues(priv);
72420862788SClaudiu Manoil 	if (err)
72520862788SClaudiu Manoil 		goto tx_alloc_failed;
72620862788SClaudiu Manoil 
72720862788SClaudiu Manoil 	err = gfar_alloc_rx_queues(priv);
72820862788SClaudiu Manoil 	if (err)
72920862788SClaudiu Manoil 		goto rx_alloc_failed;
730ec21e2ecSJeff Kirsher 
73155917641SJingchang Lu 	err = of_property_read_string(np, "model", &model);
73255917641SJingchang Lu 	if (err) {
73355917641SJingchang Lu 		pr_err("Device model property missing, aborting\n");
73455917641SJingchang Lu 		goto rx_alloc_failed;
73555917641SJingchang Lu 	}
73655917641SJingchang Lu 
737ec21e2ecSJeff Kirsher 	/* Init Rx queue filer rule set linked list */
738ec21e2ecSJeff Kirsher 	INIT_LIST_HEAD(&priv->rx_list.list);
739ec21e2ecSJeff Kirsher 	priv->rx_list.count = 0;
740ec21e2ecSJeff Kirsher 	mutex_init(&priv->rx_queue_access);
741ec21e2ecSJeff Kirsher 
742ec21e2ecSJeff Kirsher 	for (i = 0; i < MAXGROUPS; i++)
743ec21e2ecSJeff Kirsher 		priv->gfargrp[i].regs = NULL;
744ec21e2ecSJeff Kirsher 
745ec21e2ecSJeff Kirsher 	/* Parse and initialize group specific information */
746b338ce27SClaudiu Manoil 	if (priv->mode == MQ_MG_MODE) {
747f50724cdSTobias Waldekranz 		for_each_available_child_of_node(np, child) {
748bf5849f1SRob Herring 			if (!of_node_name_eq(child, "queue-group"))
749f50724cdSTobias Waldekranz 				continue;
750f50724cdSTobias Waldekranz 
751ec21e2ecSJeff Kirsher 			err = gfar_parse_group(child, priv, model);
752989e4da0SSumera Priyadarsini 			if (err) {
753989e4da0SSumera Priyadarsini 				of_node_put(child);
754ec21e2ecSJeff Kirsher 				goto err_grp_init;
755ec21e2ecSJeff Kirsher 			}
756989e4da0SSumera Priyadarsini 		}
757b338ce27SClaudiu Manoil 	} else { /* SQ_SG_MODE */
758ec21e2ecSJeff Kirsher 		err = gfar_parse_group(np, priv, model);
759ec21e2ecSJeff Kirsher 		if (err)
760ec21e2ecSJeff Kirsher 			goto err_grp_init;
761ec21e2ecSJeff Kirsher 	}
762ec21e2ecSJeff Kirsher 
7633f8c0f7eSSaurabh Sengar 	if (of_property_read_bool(np, "bd-stash")) {
764ec21e2ecSJeff Kirsher 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
765ec21e2ecSJeff Kirsher 		priv->bd_stash_en = 1;
766ec21e2ecSJeff Kirsher 	}
767ec21e2ecSJeff Kirsher 
76855917641SJingchang Lu 	err = of_property_read_u32(np, "rx-stash-len", &stash_len);
769ec21e2ecSJeff Kirsher 
77055917641SJingchang Lu 	if (err == 0)
77155917641SJingchang Lu 		priv->rx_stash_size = stash_len;
772ec21e2ecSJeff Kirsher 
77355917641SJingchang Lu 	err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
774ec21e2ecSJeff Kirsher 
77555917641SJingchang Lu 	if (err == 0)
77655917641SJingchang Lu 		priv->rx_stash_index = stash_idx;
777ec21e2ecSJeff Kirsher 
778ec21e2ecSJeff Kirsher 	if (stash_len || stash_idx)
779ec21e2ecSJeff Kirsher 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
780ec21e2ecSJeff Kirsher 
781ec21e2ecSJeff Kirsher 	mac_addr = of_get_mac_address(np);
782bc4598bcSJan Ceuleers 
783ff021f22SMaxim Kochetkov 	if (!IS_ERR(mac_addr)) {
7842d2924afSPetr Štetiar 		ether_addr_copy(dev->dev_addr, mac_addr);
785ff021f22SMaxim Kochetkov 	} else {
786ff021f22SMaxim Kochetkov 		eth_hw_addr_random(dev);
787ff021f22SMaxim Kochetkov 		dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
788ff021f22SMaxim Kochetkov 	}
789ec21e2ecSJeff Kirsher 
790ec21e2ecSJeff Kirsher 	if (model && !strcasecmp(model, "TSEC"))
79134018fd4SClaudiu Manoil 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
792ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_COALESCE |
793ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_RMON |
794ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_MULTI_INTR;
795bc4598bcSJan Ceuleers 
796ec21e2ecSJeff Kirsher 	if (model && !strcasecmp(model, "eTSEC"))
79734018fd4SClaudiu Manoil 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
798ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_COALESCE |
799ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_RMON |
800ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
801ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_CSUM |
802ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_VLAN |
803ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
804ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
8057bff47daSHamish Martin 				     FSL_GIANFAR_DEV_HAS_TIMER |
8067bff47daSHamish Martin 				     FSL_GIANFAR_DEV_HAS_RX_FILER;
807ec21e2ecSJeff Kirsher 
8088e578e73SArseny Solokha 	/* Use PHY connection type from the DT node if one is specified there.
8098e578e73SArseny Solokha 	 * rgmii-id really needs to be specified. Other types can be
8108e578e73SArseny Solokha 	 * detected by hardware
8118e578e73SArseny Solokha 	 */
8120c65b2b9SAndrew Lunn 	err = of_get_phy_mode(np, &interface);
8130c65b2b9SAndrew Lunn 	if (!err)
8140c65b2b9SAndrew Lunn 		priv->interface = interface;
815ec21e2ecSJeff Kirsher 	else
8168e578e73SArseny Solokha 		priv->interface = gfar_get_interface(dev);
817ec21e2ecSJeff Kirsher 
81855917641SJingchang Lu 	if (of_find_property(np, "fsl,magic-packet", NULL))
819ec21e2ecSJeff Kirsher 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
820ec21e2ecSJeff Kirsher 
8213e905b80SClaudiu Manoil 	if (of_get_property(np, "fsl,wake-on-filer", NULL))
8223e905b80SClaudiu Manoil 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
8233e905b80SClaudiu Manoil 
824ec21e2ecSJeff Kirsher 	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
825ec21e2ecSJeff Kirsher 
826be403645SFlorian Fainelli 	/* In the case of a fixed PHY, the DT node associated
827be403645SFlorian Fainelli 	 * to the PHY is the Ethernet MAC DT node.
828be403645SFlorian Fainelli 	 */
8296f2c9bd8SUwe Kleine-König 	if (!priv->phy_node && of_phy_is_fixed_link(np)) {
830be403645SFlorian Fainelli 		err = of_phy_register_fixed_link(np);
831be403645SFlorian Fainelli 		if (err)
832be403645SFlorian Fainelli 			goto err_grp_init;
833be403645SFlorian Fainelli 
8346f2c9bd8SUwe Kleine-König 		priv->phy_node = of_node_get(np);
835be403645SFlorian Fainelli 	}
836be403645SFlorian Fainelli 
837ec21e2ecSJeff Kirsher 	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
838ec21e2ecSJeff Kirsher 	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
839ec21e2ecSJeff Kirsher 
840ec21e2ecSJeff Kirsher 	return 0;
841ec21e2ecSJeff Kirsher 
842ec21e2ecSJeff Kirsher err_grp_init:
843ec21e2ecSJeff Kirsher 	unmap_group_regs(priv);
84420862788SClaudiu Manoil rx_alloc_failed:
84520862788SClaudiu Manoil 	gfar_free_rx_queues(priv);
84620862788SClaudiu Manoil tx_alloc_failed:
84720862788SClaudiu Manoil 	gfar_free_tx_queues(priv);
848ee873fdaSClaudiu Manoil 	free_gfar_dev(priv);
849ec21e2ecSJeff Kirsher 	return err;
850ec21e2ecSJeff Kirsher }
851ec21e2ecSJeff Kirsher 
852ec21e2ecSJeff Kirsher static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
853ec21e2ecSJeff Kirsher 				   u32 class)
854ec21e2ecSJeff Kirsher {
855ec21e2ecSJeff Kirsher 	u32 rqfpr = FPR_FILER_MASK;
856ec21e2ecSJeff Kirsher 	u32 rqfcr = 0x0;
857ec21e2ecSJeff Kirsher 
858ec21e2ecSJeff Kirsher 	rqfar--;
859ec21e2ecSJeff Kirsher 	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
860ec21e2ecSJeff Kirsher 	priv->ftp_rqfpr[rqfar] = rqfpr;
861ec21e2ecSJeff Kirsher 	priv->ftp_rqfcr[rqfar] = rqfcr;
862ec21e2ecSJeff Kirsher 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
863ec21e2ecSJeff Kirsher 
864ec21e2ecSJeff Kirsher 	rqfar--;
865ec21e2ecSJeff Kirsher 	rqfcr = RQFCR_CMP_NOMATCH;
866ec21e2ecSJeff Kirsher 	priv->ftp_rqfpr[rqfar] = rqfpr;
867ec21e2ecSJeff Kirsher 	priv->ftp_rqfcr[rqfar] = rqfcr;
868ec21e2ecSJeff Kirsher 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
869ec21e2ecSJeff Kirsher 
870ec21e2ecSJeff Kirsher 	rqfar--;
871ec21e2ecSJeff Kirsher 	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
872ec21e2ecSJeff Kirsher 	rqfpr = class;
873ec21e2ecSJeff Kirsher 	priv->ftp_rqfcr[rqfar] = rqfcr;
874ec21e2ecSJeff Kirsher 	priv->ftp_rqfpr[rqfar] = rqfpr;
875ec21e2ecSJeff Kirsher 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
876ec21e2ecSJeff Kirsher 
877ec21e2ecSJeff Kirsher 	rqfar--;
878ec21e2ecSJeff Kirsher 	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
879ec21e2ecSJeff Kirsher 	rqfpr = class;
880ec21e2ecSJeff Kirsher 	priv->ftp_rqfcr[rqfar] = rqfcr;
881ec21e2ecSJeff Kirsher 	priv->ftp_rqfpr[rqfar] = rqfpr;
882ec21e2ecSJeff Kirsher 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
883ec21e2ecSJeff Kirsher 
884ec21e2ecSJeff Kirsher 	return rqfar;
885ec21e2ecSJeff Kirsher }
886ec21e2ecSJeff Kirsher 
887ec21e2ecSJeff Kirsher static void gfar_init_filer_table(struct gfar_private *priv)
888ec21e2ecSJeff Kirsher {
889ec21e2ecSJeff Kirsher 	int i = 0x0;
890ec21e2ecSJeff Kirsher 	u32 rqfar = MAX_FILER_IDX;
891ec21e2ecSJeff Kirsher 	u32 rqfcr = 0x0;
892ec21e2ecSJeff Kirsher 	u32 rqfpr = FPR_FILER_MASK;
893ec21e2ecSJeff Kirsher 
894ec21e2ecSJeff Kirsher 	/* Default rule */
895ec21e2ecSJeff Kirsher 	rqfcr = RQFCR_CMP_MATCH;
896ec21e2ecSJeff Kirsher 	priv->ftp_rqfcr[rqfar] = rqfcr;
897ec21e2ecSJeff Kirsher 	priv->ftp_rqfpr[rqfar] = rqfpr;
898ec21e2ecSJeff Kirsher 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
899ec21e2ecSJeff Kirsher 
900ec21e2ecSJeff Kirsher 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
901ec21e2ecSJeff Kirsher 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
902ec21e2ecSJeff Kirsher 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
903ec21e2ecSJeff Kirsher 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
904ec21e2ecSJeff Kirsher 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
905ec21e2ecSJeff Kirsher 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
906ec21e2ecSJeff Kirsher 
907ec21e2ecSJeff Kirsher 	/* cur_filer_idx indicated the first non-masked rule */
908ec21e2ecSJeff Kirsher 	priv->cur_filer_idx = rqfar;
909ec21e2ecSJeff Kirsher 
910ec21e2ecSJeff Kirsher 	/* Rest are masked rules */
911ec21e2ecSJeff Kirsher 	rqfcr = RQFCR_CMP_NOMATCH;
912ec21e2ecSJeff Kirsher 	for (i = 0; i < rqfar; i++) {
913ec21e2ecSJeff Kirsher 		priv->ftp_rqfcr[i] = rqfcr;
914ec21e2ecSJeff Kirsher 		priv->ftp_rqfpr[i] = rqfpr;
915ec21e2ecSJeff Kirsher 		gfar_write_filer(priv, i, rqfcr, rqfpr);
916ec21e2ecSJeff Kirsher 	}
917ec21e2ecSJeff Kirsher }
918ec21e2ecSJeff Kirsher 
919d6ef0bccSClaudiu Manoil #ifdef CONFIG_PPC
9202969b1f7SClaudiu Manoil static void __gfar_detect_errata_83xx(struct gfar_private *priv)
921ec21e2ecSJeff Kirsher {
922ec21e2ecSJeff Kirsher 	unsigned int pvr = mfspr(SPRN_PVR);
923ec21e2ecSJeff Kirsher 	unsigned int svr = mfspr(SPRN_SVR);
924ec21e2ecSJeff Kirsher 	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
925ec21e2ecSJeff Kirsher 	unsigned int rev = svr & 0xffff;
926ec21e2ecSJeff Kirsher 
927ec21e2ecSJeff Kirsher 	/* MPC8313 Rev 2.0 and higher; All MPC837x */
928ec21e2ecSJeff Kirsher 	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
929ec21e2ecSJeff Kirsher 	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
930ec21e2ecSJeff Kirsher 		priv->errata |= GFAR_ERRATA_74;
931ec21e2ecSJeff Kirsher 
932ec21e2ecSJeff Kirsher 	/* MPC8313 and MPC837x all rev */
933ec21e2ecSJeff Kirsher 	if ((pvr == 0x80850010 && mod == 0x80b0) ||
934ec21e2ecSJeff Kirsher 	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
935ec21e2ecSJeff Kirsher 		priv->errata |= GFAR_ERRATA_76;
936ec21e2ecSJeff Kirsher 
9372969b1f7SClaudiu Manoil 	/* MPC8313 Rev < 2.0 */
9382969b1f7SClaudiu Manoil 	if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
939ec21e2ecSJeff Kirsher 		priv->errata |= GFAR_ERRATA_12;
9402969b1f7SClaudiu Manoil }
9412969b1f7SClaudiu Manoil 
9422969b1f7SClaudiu Manoil static void __gfar_detect_errata_85xx(struct gfar_private *priv)
9432969b1f7SClaudiu Manoil {
9442969b1f7SClaudiu Manoil 	unsigned int svr = mfspr(SPRN_SVR);
9452969b1f7SClaudiu Manoil 
9462969b1f7SClaudiu Manoil 	if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
9472969b1f7SClaudiu Manoil 		priv->errata |= GFAR_ERRATA_12;
9487bfc6082SAtsushi Nemoto 	/* P2020/P1010 Rev 1; MPC8548 Rev 2 */
94953fad773SClaudiu Manoil 	if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
9507bfc6082SAtsushi Nemoto 	    ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
9517bfc6082SAtsushi Nemoto 	    ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
95253fad773SClaudiu Manoil 		priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
9532969b1f7SClaudiu Manoil }
954d6ef0bccSClaudiu Manoil #endif
9552969b1f7SClaudiu Manoil 
9562969b1f7SClaudiu Manoil static void gfar_detect_errata(struct gfar_private *priv)
9572969b1f7SClaudiu Manoil {
9582969b1f7SClaudiu Manoil 	struct device *dev = &priv->ofdev->dev;
9592969b1f7SClaudiu Manoil 
9602969b1f7SClaudiu Manoil 	/* no plans to fix */
9612969b1f7SClaudiu Manoil 	priv->errata |= GFAR_ERRATA_A002;
9622969b1f7SClaudiu Manoil 
963d6ef0bccSClaudiu Manoil #ifdef CONFIG_PPC
9642969b1f7SClaudiu Manoil 	if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
9652969b1f7SClaudiu Manoil 		__gfar_detect_errata_85xx(priv);
9662969b1f7SClaudiu Manoil 	else /* non-mpc85xx parts, i.e. e300 core based */
9672969b1f7SClaudiu Manoil 		__gfar_detect_errata_83xx(priv);
968d6ef0bccSClaudiu Manoil #endif
969ec21e2ecSJeff Kirsher 
970ec21e2ecSJeff Kirsher 	if (priv->errata)
971ec21e2ecSJeff Kirsher 		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
972ec21e2ecSJeff Kirsher 			 priv->errata);
973ec21e2ecSJeff Kirsher }
974ec21e2ecSJeff Kirsher 
975898157edSXiubo Li static void gfar_init_addr_hash_table(struct gfar_private *priv)
97620862788SClaudiu Manoil {
97720862788SClaudiu Manoil 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
978ec21e2ecSJeff Kirsher 
979ec21e2ecSJeff Kirsher 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
980ec21e2ecSJeff Kirsher 		priv->extended_hash = 1;
981ec21e2ecSJeff Kirsher 		priv->hash_width = 9;
982ec21e2ecSJeff Kirsher 
983ec21e2ecSJeff Kirsher 		priv->hash_regs[0] = &regs->igaddr0;
984ec21e2ecSJeff Kirsher 		priv->hash_regs[1] = &regs->igaddr1;
985ec21e2ecSJeff Kirsher 		priv->hash_regs[2] = &regs->igaddr2;
986ec21e2ecSJeff Kirsher 		priv->hash_regs[3] = &regs->igaddr3;
987ec21e2ecSJeff Kirsher 		priv->hash_regs[4] = &regs->igaddr4;
988ec21e2ecSJeff Kirsher 		priv->hash_regs[5] = &regs->igaddr5;
989ec21e2ecSJeff Kirsher 		priv->hash_regs[6] = &regs->igaddr6;
990ec21e2ecSJeff Kirsher 		priv->hash_regs[7] = &regs->igaddr7;
991ec21e2ecSJeff Kirsher 		priv->hash_regs[8] = &regs->gaddr0;
992ec21e2ecSJeff Kirsher 		priv->hash_regs[9] = &regs->gaddr1;
993ec21e2ecSJeff Kirsher 		priv->hash_regs[10] = &regs->gaddr2;
994ec21e2ecSJeff Kirsher 		priv->hash_regs[11] = &regs->gaddr3;
995ec21e2ecSJeff Kirsher 		priv->hash_regs[12] = &regs->gaddr4;
996ec21e2ecSJeff Kirsher 		priv->hash_regs[13] = &regs->gaddr5;
997ec21e2ecSJeff Kirsher 		priv->hash_regs[14] = &regs->gaddr6;
998ec21e2ecSJeff Kirsher 		priv->hash_regs[15] = &regs->gaddr7;
999ec21e2ecSJeff Kirsher 
1000ec21e2ecSJeff Kirsher 	} else {
1001ec21e2ecSJeff Kirsher 		priv->extended_hash = 0;
1002ec21e2ecSJeff Kirsher 		priv->hash_width = 8;
1003ec21e2ecSJeff Kirsher 
1004ec21e2ecSJeff Kirsher 		priv->hash_regs[0] = &regs->gaddr0;
1005ec21e2ecSJeff Kirsher 		priv->hash_regs[1] = &regs->gaddr1;
1006ec21e2ecSJeff Kirsher 		priv->hash_regs[2] = &regs->gaddr2;
1007ec21e2ecSJeff Kirsher 		priv->hash_regs[3] = &regs->gaddr3;
1008ec21e2ecSJeff Kirsher 		priv->hash_regs[4] = &regs->gaddr4;
1009ec21e2ecSJeff Kirsher 		priv->hash_regs[5] = &regs->gaddr5;
1010ec21e2ecSJeff Kirsher 		priv->hash_regs[6] = &regs->gaddr6;
1011ec21e2ecSJeff Kirsher 		priv->hash_regs[7] = &regs->gaddr7;
1012ec21e2ecSJeff Kirsher 	}
101320862788SClaudiu Manoil }
101420862788SClaudiu Manoil 
1015ec21e2ecSJeff Kirsher static int __gfar_is_rx_idle(struct gfar_private *priv)
1016ec21e2ecSJeff Kirsher {
1017ec21e2ecSJeff Kirsher 	u32 res;
1018ec21e2ecSJeff Kirsher 
10190977f817SJan Ceuleers 	/* Normaly TSEC should not hang on GRS commands, so we should
1020ec21e2ecSJeff Kirsher 	 * actually wait for IEVENT_GRSC flag.
1021ec21e2ecSJeff Kirsher 	 */
1022ad3660c2SClaudiu Manoil 	if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1023ec21e2ecSJeff Kirsher 		return 0;
1024ec21e2ecSJeff Kirsher 
10250977f817SJan Ceuleers 	/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1026ec21e2ecSJeff Kirsher 	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1027ec21e2ecSJeff Kirsher 	 * and the Rx can be safely reset.
1028ec21e2ecSJeff Kirsher 	 */
1029ec21e2ecSJeff Kirsher 	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1030ec21e2ecSJeff Kirsher 	res &= 0x7f807f80;
1031ec21e2ecSJeff Kirsher 	if ((res & 0xffff) == (res >> 16))
1032ec21e2ecSJeff Kirsher 		return 1;
1033ec21e2ecSJeff Kirsher 
1034ec21e2ecSJeff Kirsher 	return 0;
1035ec21e2ecSJeff Kirsher }
1036ec21e2ecSJeff Kirsher 
1037ec21e2ecSJeff Kirsher /* Halt the receive and transmit queues */
1038c10650b6SClaudiu Manoil static void gfar_halt_nodisable(struct gfar_private *priv)
1039ec21e2ecSJeff Kirsher {
1040efeddce7SClaudiu Manoil 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1041ec21e2ecSJeff Kirsher 	u32 tempval;
1042a4feee89SClaudiu Manoil 	unsigned int timeout;
1043a4feee89SClaudiu Manoil 	int stopped;
1044ec21e2ecSJeff Kirsher 
1045efeddce7SClaudiu Manoil 	gfar_ints_disable(priv);
1046ec21e2ecSJeff Kirsher 
1047a4feee89SClaudiu Manoil 	if (gfar_is_dma_stopped(priv))
1048a4feee89SClaudiu Manoil 		return;
1049a4feee89SClaudiu Manoil 
1050ec21e2ecSJeff Kirsher 	/* Stop the DMA, and wait for it to stop */
1051ec21e2ecSJeff Kirsher 	tempval = gfar_read(&regs->dmactrl);
1052ec21e2ecSJeff Kirsher 	tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1053ec21e2ecSJeff Kirsher 	gfar_write(&regs->dmactrl, tempval);
1054ec21e2ecSJeff Kirsher 
1055a4feee89SClaudiu Manoil retry:
1056a4feee89SClaudiu Manoil 	timeout = 1000;
1057a4feee89SClaudiu Manoil 	while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1058a4feee89SClaudiu Manoil 		cpu_relax();
1059a4feee89SClaudiu Manoil 		timeout--;
1060ec21e2ecSJeff Kirsher 	}
1061a4feee89SClaudiu Manoil 
1062a4feee89SClaudiu Manoil 	if (!timeout)
1063a4feee89SClaudiu Manoil 		stopped = gfar_is_dma_stopped(priv);
1064a4feee89SClaudiu Manoil 
1065a4feee89SClaudiu Manoil 	if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1066a4feee89SClaudiu Manoil 	    !__gfar_is_rx_idle(priv))
1067a4feee89SClaudiu Manoil 		goto retry;
1068ec21e2ecSJeff Kirsher }
1069ec21e2ecSJeff Kirsher 
1070ec21e2ecSJeff Kirsher /* Halt the receive and transmit queues */
10717ad38784SArseny Solokha static void gfar_halt(struct gfar_private *priv)
1072ec21e2ecSJeff Kirsher {
1073ec21e2ecSJeff Kirsher 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1074ec21e2ecSJeff Kirsher 	u32 tempval;
1075ec21e2ecSJeff Kirsher 
1076c10650b6SClaudiu Manoil 	/* Dissable the Rx/Tx hw queues */
1077c10650b6SClaudiu Manoil 	gfar_write(&regs->rqueue, 0);
1078c10650b6SClaudiu Manoil 	gfar_write(&regs->tqueue, 0);
1079ec21e2ecSJeff Kirsher 
1080c10650b6SClaudiu Manoil 	mdelay(10);
1081c10650b6SClaudiu Manoil 
1082c10650b6SClaudiu Manoil 	gfar_halt_nodisable(priv);
1083c10650b6SClaudiu Manoil 
1084c10650b6SClaudiu Manoil 	/* Disable Rx/Tx DMA */
1085ec21e2ecSJeff Kirsher 	tempval = gfar_read(&regs->maccfg1);
1086ec21e2ecSJeff Kirsher 	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1087ec21e2ecSJeff Kirsher 	gfar_write(&regs->maccfg1, tempval);
1088ec21e2ecSJeff Kirsher }
1089ec21e2ecSJeff Kirsher 
1090ec21e2ecSJeff Kirsher static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1091ec21e2ecSJeff Kirsher {
1092ec21e2ecSJeff Kirsher 	struct txbd8 *txbdp;
1093ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(tx_queue->dev);
1094ec21e2ecSJeff Kirsher 	int i, j;
1095ec21e2ecSJeff Kirsher 
1096ec21e2ecSJeff Kirsher 	txbdp = tx_queue->tx_bd_base;
1097ec21e2ecSJeff Kirsher 
1098ec21e2ecSJeff Kirsher 	for (i = 0; i < tx_queue->tx_ring_size; i++) {
1099ec21e2ecSJeff Kirsher 		if (!tx_queue->tx_skbuff[i])
1100ec21e2ecSJeff Kirsher 			continue;
1101ec21e2ecSJeff Kirsher 
1102a7312d58SClaudiu Manoil 		dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1103a7312d58SClaudiu Manoil 				 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
1104ec21e2ecSJeff Kirsher 		txbdp->lstatus = 0;
1105ec21e2ecSJeff Kirsher 		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1106ec21e2ecSJeff Kirsher 		     j++) {
1107ec21e2ecSJeff Kirsher 			txbdp++;
1108a7312d58SClaudiu Manoil 			dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1109a7312d58SClaudiu Manoil 				       be16_to_cpu(txbdp->length),
1110a7312d58SClaudiu Manoil 				       DMA_TO_DEVICE);
1111ec21e2ecSJeff Kirsher 		}
1112ec21e2ecSJeff Kirsher 		txbdp++;
1113ec21e2ecSJeff Kirsher 		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1114ec21e2ecSJeff Kirsher 		tx_queue->tx_skbuff[i] = NULL;
1115ec21e2ecSJeff Kirsher 	}
1116ec21e2ecSJeff Kirsher 	kfree(tx_queue->tx_skbuff);
11171eb8f7a7SClaudiu Manoil 	tx_queue->tx_skbuff = NULL;
1118ec21e2ecSJeff Kirsher }
1119ec21e2ecSJeff Kirsher 
1120ec21e2ecSJeff Kirsher static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1121ec21e2ecSJeff Kirsher {
1122ec21e2ecSJeff Kirsher 	int i;
1123ec21e2ecSJeff Kirsher 
112475354148SClaudiu Manoil 	struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
112575354148SClaudiu Manoil 
112675354148SClaudiu Manoil 	dev_kfree_skb(rx_queue->skb);
1127ec21e2ecSJeff Kirsher 
1128ec21e2ecSJeff Kirsher 	for (i = 0; i < rx_queue->rx_ring_size; i++) {
112975354148SClaudiu Manoil 		struct	gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
113075354148SClaudiu Manoil 
1131ec21e2ecSJeff Kirsher 		rxbdp->lstatus = 0;
1132ec21e2ecSJeff Kirsher 		rxbdp->bufPtr = 0;
1133ec21e2ecSJeff Kirsher 		rxbdp++;
113475354148SClaudiu Manoil 
113575354148SClaudiu Manoil 		if (!rxb->page)
113675354148SClaudiu Manoil 			continue;
113775354148SClaudiu Manoil 
11384af0e5bbSArseny Solokha 		dma_unmap_page(rx_queue->dev, rxb->dma,
113975354148SClaudiu Manoil 			       PAGE_SIZE, DMA_FROM_DEVICE);
114075354148SClaudiu Manoil 		__free_page(rxb->page);
114175354148SClaudiu Manoil 
114275354148SClaudiu Manoil 		rxb->page = NULL;
1143ec21e2ecSJeff Kirsher 	}
114475354148SClaudiu Manoil 
114575354148SClaudiu Manoil 	kfree(rx_queue->rx_buff);
114675354148SClaudiu Manoil 	rx_queue->rx_buff = NULL;
1147ec21e2ecSJeff Kirsher }
1148ec21e2ecSJeff Kirsher 
1149ec21e2ecSJeff Kirsher /* If there are any tx skbs or rx skbs still around, free them.
11500977f817SJan Ceuleers  * Then free tx_skbuff and rx_skbuff
11510977f817SJan Ceuleers  */
1152ec21e2ecSJeff Kirsher static void free_skb_resources(struct gfar_private *priv)
1153ec21e2ecSJeff Kirsher {
1154ec21e2ecSJeff Kirsher 	struct gfar_priv_tx_q *tx_queue = NULL;
1155ec21e2ecSJeff Kirsher 	struct gfar_priv_rx_q *rx_queue = NULL;
1156ec21e2ecSJeff Kirsher 	int i;
1157ec21e2ecSJeff Kirsher 
1158ec21e2ecSJeff Kirsher 	/* Go through all the buffer descriptors and free their data buffers */
1159ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_tx_queues; i++) {
1160d8a0f1b0SPaul Gortmaker 		struct netdev_queue *txq;
1161bc4598bcSJan Ceuleers 
1162ec21e2ecSJeff Kirsher 		tx_queue = priv->tx_queue[i];
1163d8a0f1b0SPaul Gortmaker 		txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1164ec21e2ecSJeff Kirsher 		if (tx_queue->tx_skbuff)
1165ec21e2ecSJeff Kirsher 			free_skb_tx_queue(tx_queue);
1166d8a0f1b0SPaul Gortmaker 		netdev_tx_reset_queue(txq);
1167ec21e2ecSJeff Kirsher 	}
1168ec21e2ecSJeff Kirsher 
1169ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_rx_queues; i++) {
1170ec21e2ecSJeff Kirsher 		rx_queue = priv->rx_queue[i];
117175354148SClaudiu Manoil 		if (rx_queue->rx_buff)
1172ec21e2ecSJeff Kirsher 			free_skb_rx_queue(rx_queue);
1173ec21e2ecSJeff Kirsher 	}
1174ec21e2ecSJeff Kirsher 
1175369ec162SClaudiu Manoil 	dma_free_coherent(priv->dev,
1176ec21e2ecSJeff Kirsher 			  sizeof(struct txbd8) * priv->total_tx_ring_size +
1177ec21e2ecSJeff Kirsher 			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
1178ec21e2ecSJeff Kirsher 			  priv->tx_queue[0]->tx_bd_base,
1179ec21e2ecSJeff Kirsher 			  priv->tx_queue[0]->tx_bd_dma_base);
1180ec21e2ecSJeff Kirsher }
1181ec21e2ecSJeff Kirsher 
11827d993c5fSArseny Solokha void stop_gfar(struct net_device *dev)
11837d993c5fSArseny Solokha {
11847d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
11857d993c5fSArseny Solokha 
11867d993c5fSArseny Solokha 	netif_tx_stop_all_queues(dev);
11877d993c5fSArseny Solokha 
11887d993c5fSArseny Solokha 	smp_mb__before_atomic();
11897d993c5fSArseny Solokha 	set_bit(GFAR_DOWN, &priv->state);
11907d993c5fSArseny Solokha 	smp_mb__after_atomic();
11917d993c5fSArseny Solokha 
11927d993c5fSArseny Solokha 	disable_napi(priv);
11937d993c5fSArseny Solokha 
11947d993c5fSArseny Solokha 	/* disable ints and gracefully shut down Rx/Tx DMA */
11957d993c5fSArseny Solokha 	gfar_halt(priv);
11967d993c5fSArseny Solokha 
11977d993c5fSArseny Solokha 	phy_stop(dev->phydev);
11987d993c5fSArseny Solokha 
11997d993c5fSArseny Solokha 	free_skb_resources(priv);
12007d993c5fSArseny Solokha }
12017d993c5fSArseny Solokha 
12027ad38784SArseny Solokha static void gfar_start(struct gfar_private *priv)
1203ec21e2ecSJeff Kirsher {
1204ec21e2ecSJeff Kirsher 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1205ec21e2ecSJeff Kirsher 	u32 tempval;
1206ec21e2ecSJeff Kirsher 	int i = 0;
1207ec21e2ecSJeff Kirsher 
1208c10650b6SClaudiu Manoil 	/* Enable Rx/Tx hw queues */
1209c10650b6SClaudiu Manoil 	gfar_write(&regs->rqueue, priv->rqueue);
1210c10650b6SClaudiu Manoil 	gfar_write(&regs->tqueue, priv->tqueue);
1211ec21e2ecSJeff Kirsher 
1212ec21e2ecSJeff Kirsher 	/* Initialize DMACTRL to have WWR and WOP */
1213ec21e2ecSJeff Kirsher 	tempval = gfar_read(&regs->dmactrl);
1214ec21e2ecSJeff Kirsher 	tempval |= DMACTRL_INIT_SETTINGS;
1215ec21e2ecSJeff Kirsher 	gfar_write(&regs->dmactrl, tempval);
1216ec21e2ecSJeff Kirsher 
1217ec21e2ecSJeff Kirsher 	/* Make sure we aren't stopped */
1218ec21e2ecSJeff Kirsher 	tempval = gfar_read(&regs->dmactrl);
1219ec21e2ecSJeff Kirsher 	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1220ec21e2ecSJeff Kirsher 	gfar_write(&regs->dmactrl, tempval);
1221ec21e2ecSJeff Kirsher 
1222ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_grps; i++) {
1223ec21e2ecSJeff Kirsher 		regs = priv->gfargrp[i].regs;
1224ec21e2ecSJeff Kirsher 		/* Clear THLT/RHLT, so that the DMA starts polling now */
1225ec21e2ecSJeff Kirsher 		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1226ec21e2ecSJeff Kirsher 		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1227ec21e2ecSJeff Kirsher 	}
1228ec21e2ecSJeff Kirsher 
1229c10650b6SClaudiu Manoil 	/* Enable Rx/Tx DMA */
1230c10650b6SClaudiu Manoil 	tempval = gfar_read(&regs->maccfg1);
1231c10650b6SClaudiu Manoil 	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1232c10650b6SClaudiu Manoil 	gfar_write(&regs->maccfg1, tempval);
1233c10650b6SClaudiu Manoil 
1234efeddce7SClaudiu Manoil 	gfar_ints_enable(priv);
1235efeddce7SClaudiu Manoil 
1236860e9538SFlorian Westphal 	netif_trans_update(priv->ndev); /* prevent tx timeout */
1237ec21e2ecSJeff Kirsher }
1238ec21e2ecSJeff Kirsher 
12397d993c5fSArseny Solokha static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
124080ec396cSClaudiu Manoil {
12417d993c5fSArseny Solokha 	struct page *page;
12427d993c5fSArseny Solokha 	dma_addr_t addr;
12437d993c5fSArseny Solokha 
12447d993c5fSArseny Solokha 	page = dev_alloc_page();
12457d993c5fSArseny Solokha 	if (unlikely(!page))
12467d993c5fSArseny Solokha 		return false;
12477d993c5fSArseny Solokha 
12487d993c5fSArseny Solokha 	addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
12497d993c5fSArseny Solokha 	if (unlikely(dma_mapping_error(rxq->dev, addr))) {
12507d993c5fSArseny Solokha 		__free_page(page);
12517d993c5fSArseny Solokha 
12527d993c5fSArseny Solokha 		return false;
125380ec396cSClaudiu Manoil 	}
125480ec396cSClaudiu Manoil 
12557d993c5fSArseny Solokha 	rxb->dma = addr;
12567d993c5fSArseny Solokha 	rxb->page = page;
12577d993c5fSArseny Solokha 	rxb->page_offset = 0;
12587d993c5fSArseny Solokha 
12597d993c5fSArseny Solokha 	return true;
12607d993c5fSArseny Solokha }
12617d993c5fSArseny Solokha 
12627d993c5fSArseny Solokha static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
1263ec21e2ecSJeff Kirsher {
12647d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(rx_queue->ndev);
12657d993c5fSArseny Solokha 	struct gfar_extra_stats *estats = &priv->extra_stats;
1266ec21e2ecSJeff Kirsher 
12677d993c5fSArseny Solokha 	netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
12687d993c5fSArseny Solokha 	atomic64_inc(&estats->rx_alloc_err);
1269ec21e2ecSJeff Kirsher }
1270ec21e2ecSJeff Kirsher 
12717d993c5fSArseny Solokha static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
12727d993c5fSArseny Solokha 				int alloc_cnt)
127380ec396cSClaudiu Manoil {
12747d993c5fSArseny Solokha 	struct rxbd8 *bdp;
12757d993c5fSArseny Solokha 	struct gfar_rx_buff *rxb;
127680ec396cSClaudiu Manoil 	int i;
127780ec396cSClaudiu Manoil 
12787d993c5fSArseny Solokha 	i = rx_queue->next_to_use;
12797d993c5fSArseny Solokha 	bdp = &rx_queue->rx_bd_base[i];
12807d993c5fSArseny Solokha 	rxb = &rx_queue->rx_buff[i];
12817d993c5fSArseny Solokha 
12827d993c5fSArseny Solokha 	while (alloc_cnt--) {
12837d993c5fSArseny Solokha 		/* try reuse page */
12847d993c5fSArseny Solokha 		if (unlikely(!rxb->page)) {
12857d993c5fSArseny Solokha 			if (unlikely(!gfar_new_page(rx_queue, rxb))) {
12867d993c5fSArseny Solokha 				gfar_rx_alloc_err(rx_queue);
12877d993c5fSArseny Solokha 				break;
128880ec396cSClaudiu Manoil 			}
128980ec396cSClaudiu Manoil 		}
129080ec396cSClaudiu Manoil 
12917d993c5fSArseny Solokha 		/* Setup the new RxBD */
12927d993c5fSArseny Solokha 		gfar_init_rxbdp(rx_queue, bdp,
12937d993c5fSArseny Solokha 				rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
12947d993c5fSArseny Solokha 
12957d993c5fSArseny Solokha 		/* Update to the next pointer */
12967d993c5fSArseny Solokha 		bdp++;
12977d993c5fSArseny Solokha 		rxb++;
12987d993c5fSArseny Solokha 
12997d993c5fSArseny Solokha 		if (unlikely(++i == rx_queue->rx_ring_size)) {
13007d993c5fSArseny Solokha 			i = 0;
13017d993c5fSArseny Solokha 			bdp = rx_queue->rx_bd_base;
13027d993c5fSArseny Solokha 			rxb = rx_queue->rx_buff;
13037d993c5fSArseny Solokha 		}
13047d993c5fSArseny Solokha 	}
13057d993c5fSArseny Solokha 
13067d993c5fSArseny Solokha 	rx_queue->next_to_use = i;
13077d993c5fSArseny Solokha 	rx_queue->next_to_alloc = i;
13087d993c5fSArseny Solokha }
13097d993c5fSArseny Solokha 
13107d993c5fSArseny Solokha static void gfar_init_bds(struct net_device *ndev)
131180ec396cSClaudiu Manoil {
13127d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(ndev);
13137d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
13147d993c5fSArseny Solokha 	struct gfar_priv_tx_q *tx_queue = NULL;
13157d993c5fSArseny Solokha 	struct gfar_priv_rx_q *rx_queue = NULL;
13167d993c5fSArseny Solokha 	struct txbd8 *txbdp;
13177d993c5fSArseny Solokha 	u32 __iomem *rfbptr;
13187d993c5fSArseny Solokha 	int i, j;
131980ec396cSClaudiu Manoil 
13207d993c5fSArseny Solokha 	for (i = 0; i < priv->num_tx_queues; i++) {
13217d993c5fSArseny Solokha 		tx_queue = priv->tx_queue[i];
13227d993c5fSArseny Solokha 		/* Initialize some variables in our dev structure */
13237d993c5fSArseny Solokha 		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
13247d993c5fSArseny Solokha 		tx_queue->dirty_tx = tx_queue->tx_bd_base;
13257d993c5fSArseny Solokha 		tx_queue->cur_tx = tx_queue->tx_bd_base;
13267d993c5fSArseny Solokha 		tx_queue->skb_curtx = 0;
13277d993c5fSArseny Solokha 		tx_queue->skb_dirtytx = 0;
13287d993c5fSArseny Solokha 
13297d993c5fSArseny Solokha 		/* Initialize Transmit Descriptor Ring */
13307d993c5fSArseny Solokha 		txbdp = tx_queue->tx_bd_base;
13317d993c5fSArseny Solokha 		for (j = 0; j < tx_queue->tx_ring_size; j++) {
13327d993c5fSArseny Solokha 			txbdp->lstatus = 0;
13337d993c5fSArseny Solokha 			txbdp->bufPtr = 0;
13347d993c5fSArseny Solokha 			txbdp++;
13357d993c5fSArseny Solokha 		}
13367d993c5fSArseny Solokha 
13377d993c5fSArseny Solokha 		/* Set the last descriptor in the ring to indicate wrap */
13387d993c5fSArseny Solokha 		txbdp--;
13397d993c5fSArseny Solokha 		txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
13407d993c5fSArseny Solokha 					    TXBD_WRAP);
13417d993c5fSArseny Solokha 	}
13427d993c5fSArseny Solokha 
13437d993c5fSArseny Solokha 	rfbptr = &regs->rfbptr0;
13447d993c5fSArseny Solokha 	for (i = 0; i < priv->num_rx_queues; i++) {
13457d993c5fSArseny Solokha 		rx_queue = priv->rx_queue[i];
13467d993c5fSArseny Solokha 
13477d993c5fSArseny Solokha 		rx_queue->next_to_clean = 0;
13487d993c5fSArseny Solokha 		rx_queue->next_to_use = 0;
13497d993c5fSArseny Solokha 		rx_queue->next_to_alloc = 0;
13507d993c5fSArseny Solokha 
13517d993c5fSArseny Solokha 		/* make sure next_to_clean != next_to_use after this
13527d993c5fSArseny Solokha 		 * by leaving at least 1 unused descriptor
13537d993c5fSArseny Solokha 		 */
13547d993c5fSArseny Solokha 		gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
13557d993c5fSArseny Solokha 
13567d993c5fSArseny Solokha 		rx_queue->rfbptr = rfbptr;
13577d993c5fSArseny Solokha 		rfbptr += 2;
135880ec396cSClaudiu Manoil 	}
135980ec396cSClaudiu Manoil }
136080ec396cSClaudiu Manoil 
13617d993c5fSArseny Solokha static int gfar_alloc_skb_resources(struct net_device *ndev)
13627d993c5fSArseny Solokha {
13637d993c5fSArseny Solokha 	void *vaddr;
13647d993c5fSArseny Solokha 	dma_addr_t addr;
13657d993c5fSArseny Solokha 	int i, j;
13667d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(ndev);
13677d993c5fSArseny Solokha 	struct device *dev = priv->dev;
13687d993c5fSArseny Solokha 	struct gfar_priv_tx_q *tx_queue = NULL;
13697d993c5fSArseny Solokha 	struct gfar_priv_rx_q *rx_queue = NULL;
13707d993c5fSArseny Solokha 
13717d993c5fSArseny Solokha 	priv->total_tx_ring_size = 0;
13727d993c5fSArseny Solokha 	for (i = 0; i < priv->num_tx_queues; i++)
13737d993c5fSArseny Solokha 		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
13747d993c5fSArseny Solokha 
13757d993c5fSArseny Solokha 	priv->total_rx_ring_size = 0;
13767d993c5fSArseny Solokha 	for (i = 0; i < priv->num_rx_queues; i++)
13777d993c5fSArseny Solokha 		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
13787d993c5fSArseny Solokha 
13797d993c5fSArseny Solokha 	/* Allocate memory for the buffer descriptors */
13807d993c5fSArseny Solokha 	vaddr = dma_alloc_coherent(dev,
13817d993c5fSArseny Solokha 				   (priv->total_tx_ring_size *
13827d993c5fSArseny Solokha 				    sizeof(struct txbd8)) +
13837d993c5fSArseny Solokha 				   (priv->total_rx_ring_size *
13847d993c5fSArseny Solokha 				    sizeof(struct rxbd8)),
13857d993c5fSArseny Solokha 				   &addr, GFP_KERNEL);
13867d993c5fSArseny Solokha 	if (!vaddr)
13877d993c5fSArseny Solokha 		return -ENOMEM;
13887d993c5fSArseny Solokha 
13897d993c5fSArseny Solokha 	for (i = 0; i < priv->num_tx_queues; i++) {
13907d993c5fSArseny Solokha 		tx_queue = priv->tx_queue[i];
13917d993c5fSArseny Solokha 		tx_queue->tx_bd_base = vaddr;
13927d993c5fSArseny Solokha 		tx_queue->tx_bd_dma_base = addr;
13937d993c5fSArseny Solokha 		tx_queue->dev = ndev;
13947d993c5fSArseny Solokha 		/* enet DMA only understands physical addresses */
13957d993c5fSArseny Solokha 		addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
13967d993c5fSArseny Solokha 		vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
13977d993c5fSArseny Solokha 	}
13987d993c5fSArseny Solokha 
13997d993c5fSArseny Solokha 	/* Start the rx descriptor ring where the tx ring leaves off */
14007d993c5fSArseny Solokha 	for (i = 0; i < priv->num_rx_queues; i++) {
14017d993c5fSArseny Solokha 		rx_queue = priv->rx_queue[i];
14027d993c5fSArseny Solokha 		rx_queue->rx_bd_base = vaddr;
14037d993c5fSArseny Solokha 		rx_queue->rx_bd_dma_base = addr;
14047d993c5fSArseny Solokha 		rx_queue->ndev = ndev;
14057d993c5fSArseny Solokha 		rx_queue->dev = dev;
14067d993c5fSArseny Solokha 		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
14077d993c5fSArseny Solokha 		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
14087d993c5fSArseny Solokha 	}
14097d993c5fSArseny Solokha 
14107d993c5fSArseny Solokha 	/* Setup the skbuff rings */
14117d993c5fSArseny Solokha 	for (i = 0; i < priv->num_tx_queues; i++) {
14127d993c5fSArseny Solokha 		tx_queue = priv->tx_queue[i];
14137d993c5fSArseny Solokha 		tx_queue->tx_skbuff =
14147d993c5fSArseny Solokha 			kmalloc_array(tx_queue->tx_ring_size,
14157d993c5fSArseny Solokha 				      sizeof(*tx_queue->tx_skbuff),
14167d993c5fSArseny Solokha 				      GFP_KERNEL);
14177d993c5fSArseny Solokha 		if (!tx_queue->tx_skbuff)
14187d993c5fSArseny Solokha 			goto cleanup;
14197d993c5fSArseny Solokha 
14207d993c5fSArseny Solokha 		for (j = 0; j < tx_queue->tx_ring_size; j++)
14217d993c5fSArseny Solokha 			tx_queue->tx_skbuff[j] = NULL;
14227d993c5fSArseny Solokha 	}
14237d993c5fSArseny Solokha 
14247d993c5fSArseny Solokha 	for (i = 0; i < priv->num_rx_queues; i++) {
14257d993c5fSArseny Solokha 		rx_queue = priv->rx_queue[i];
14267d993c5fSArseny Solokha 		rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
14277d993c5fSArseny Solokha 					    sizeof(*rx_queue->rx_buff),
14287d993c5fSArseny Solokha 					    GFP_KERNEL);
14297d993c5fSArseny Solokha 		if (!rx_queue->rx_buff)
14307d993c5fSArseny Solokha 			goto cleanup;
14317d993c5fSArseny Solokha 	}
14327d993c5fSArseny Solokha 
14337d993c5fSArseny Solokha 	gfar_init_bds(ndev);
14347d993c5fSArseny Solokha 
143580ec396cSClaudiu Manoil 	return 0;
14367d993c5fSArseny Solokha 
14377d993c5fSArseny Solokha cleanup:
14387d993c5fSArseny Solokha 	free_skb_resources(priv);
14397d993c5fSArseny Solokha 	return -ENOMEM;
144080ec396cSClaudiu Manoil }
144180ec396cSClaudiu Manoil 
1442ec21e2ecSJeff Kirsher /* Bring the controller up and running */
1443ec21e2ecSJeff Kirsher int startup_gfar(struct net_device *ndev)
1444ec21e2ecSJeff Kirsher {
1445ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(ndev);
144680ec396cSClaudiu Manoil 	int err;
1447ec21e2ecSJeff Kirsher 
1448a328ac92SClaudiu Manoil 	gfar_mac_reset(priv);
1449ec21e2ecSJeff Kirsher 
1450ec21e2ecSJeff Kirsher 	err = gfar_alloc_skb_resources(ndev);
1451ec21e2ecSJeff Kirsher 	if (err)
1452ec21e2ecSJeff Kirsher 		return err;
1453ec21e2ecSJeff Kirsher 
1454a328ac92SClaudiu Manoil 	gfar_init_tx_rx_base(priv);
1455ec21e2ecSJeff Kirsher 
14564e857c58SPeter Zijlstra 	smp_mb__before_atomic();
14570851133bSClaudiu Manoil 	clear_bit(GFAR_DOWN, &priv->state);
14584e857c58SPeter Zijlstra 	smp_mb__after_atomic();
14590851133bSClaudiu Manoil 
14600851133bSClaudiu Manoil 	/* Start Rx/Tx DMA and enable the interrupts */
1461c10650b6SClaudiu Manoil 	gfar_start(priv);
1462ec21e2ecSJeff Kirsher 
14632a4eebf0SClaudiu Manoil 	/* force link state update after mac reset */
14642a4eebf0SClaudiu Manoil 	priv->oldlink = 0;
14652a4eebf0SClaudiu Manoil 	priv->oldspeed = 0;
14662a4eebf0SClaudiu Manoil 	priv->oldduplex = -1;
14672a4eebf0SClaudiu Manoil 
14684c4a6b0eSPhilippe Reynes 	phy_start(ndev->phydev);
1469ec21e2ecSJeff Kirsher 
14700851133bSClaudiu Manoil 	enable_napi(priv);
14710851133bSClaudiu Manoil 
14720851133bSClaudiu Manoil 	netif_tx_wake_all_queues(ndev);
14730851133bSClaudiu Manoil 
1474ec21e2ecSJeff Kirsher 	return 0;
1475ec21e2ecSJeff Kirsher }
1476ec21e2ecSJeff Kirsher 
14777d993c5fSArseny Solokha static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
14787d993c5fSArseny Solokha {
14797d993c5fSArseny Solokha 	struct net_device *ndev = priv->ndev;
14807d993c5fSArseny Solokha 	struct phy_device *phydev = ndev->phydev;
14817d993c5fSArseny Solokha 	u32 val = 0;
14827d993c5fSArseny Solokha 
14837d993c5fSArseny Solokha 	if (!phydev->duplex)
14847d993c5fSArseny Solokha 		return val;
14857d993c5fSArseny Solokha 
14867d993c5fSArseny Solokha 	if (!priv->pause_aneg_en) {
14877d993c5fSArseny Solokha 		if (priv->tx_pause_en)
14887d993c5fSArseny Solokha 			val |= MACCFG1_TX_FLOW;
14897d993c5fSArseny Solokha 		if (priv->rx_pause_en)
14907d993c5fSArseny Solokha 			val |= MACCFG1_RX_FLOW;
14917d993c5fSArseny Solokha 	} else {
14927d993c5fSArseny Solokha 		u16 lcl_adv, rmt_adv;
14937d993c5fSArseny Solokha 		u8 flowctrl;
14947d993c5fSArseny Solokha 		/* get link partner capabilities */
14957d993c5fSArseny Solokha 		rmt_adv = 0;
14967d993c5fSArseny Solokha 		if (phydev->pause)
14977d993c5fSArseny Solokha 			rmt_adv = LPA_PAUSE_CAP;
14987d993c5fSArseny Solokha 		if (phydev->asym_pause)
14997d993c5fSArseny Solokha 			rmt_adv |= LPA_PAUSE_ASYM;
15007d993c5fSArseny Solokha 
15017d993c5fSArseny Solokha 		lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
15027d993c5fSArseny Solokha 		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
15037d993c5fSArseny Solokha 		if (flowctrl & FLOW_CTRL_TX)
15047d993c5fSArseny Solokha 			val |= MACCFG1_TX_FLOW;
15057d993c5fSArseny Solokha 		if (flowctrl & FLOW_CTRL_RX)
15067d993c5fSArseny Solokha 			val |= MACCFG1_RX_FLOW;
15077d993c5fSArseny Solokha 	}
15087d993c5fSArseny Solokha 
15097d993c5fSArseny Solokha 	return val;
15107d993c5fSArseny Solokha }
15117d993c5fSArseny Solokha 
15127d993c5fSArseny Solokha static noinline void gfar_update_link_state(struct gfar_private *priv)
15137d993c5fSArseny Solokha {
15147d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
15157d993c5fSArseny Solokha 	struct net_device *ndev = priv->ndev;
15167d993c5fSArseny Solokha 	struct phy_device *phydev = ndev->phydev;
15177d993c5fSArseny Solokha 	struct gfar_priv_rx_q *rx_queue = NULL;
15187d993c5fSArseny Solokha 	int i;
15197d993c5fSArseny Solokha 
15207d993c5fSArseny Solokha 	if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
15217d993c5fSArseny Solokha 		return;
15227d993c5fSArseny Solokha 
15237d993c5fSArseny Solokha 	if (phydev->link) {
15247d993c5fSArseny Solokha 		u32 tempval1 = gfar_read(&regs->maccfg1);
15257d993c5fSArseny Solokha 		u32 tempval = gfar_read(&regs->maccfg2);
15267d993c5fSArseny Solokha 		u32 ecntrl = gfar_read(&regs->ecntrl);
15277d993c5fSArseny Solokha 		u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
15287d993c5fSArseny Solokha 
15297d993c5fSArseny Solokha 		if (phydev->duplex != priv->oldduplex) {
15307d993c5fSArseny Solokha 			if (!(phydev->duplex))
15317d993c5fSArseny Solokha 				tempval &= ~(MACCFG2_FULL_DUPLEX);
15327d993c5fSArseny Solokha 			else
15337d993c5fSArseny Solokha 				tempval |= MACCFG2_FULL_DUPLEX;
15347d993c5fSArseny Solokha 
15357d993c5fSArseny Solokha 			priv->oldduplex = phydev->duplex;
15367d993c5fSArseny Solokha 		}
15377d993c5fSArseny Solokha 
15387d993c5fSArseny Solokha 		if (phydev->speed != priv->oldspeed) {
15397d993c5fSArseny Solokha 			switch (phydev->speed) {
15407d993c5fSArseny Solokha 			case 1000:
15417d993c5fSArseny Solokha 				tempval =
15427d993c5fSArseny Solokha 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
15437d993c5fSArseny Solokha 
15447d993c5fSArseny Solokha 				ecntrl &= ~(ECNTRL_R100);
15457d993c5fSArseny Solokha 				break;
15467d993c5fSArseny Solokha 			case 100:
15477d993c5fSArseny Solokha 			case 10:
15487d993c5fSArseny Solokha 				tempval =
15497d993c5fSArseny Solokha 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
15507d993c5fSArseny Solokha 
15517d993c5fSArseny Solokha 				/* Reduced mode distinguishes
15527d993c5fSArseny Solokha 				 * between 10 and 100
15530977f817SJan Ceuleers 				 */
15547d993c5fSArseny Solokha 				if (phydev->speed == SPEED_100)
15557d993c5fSArseny Solokha 					ecntrl |= ECNTRL_R100;
15567d993c5fSArseny Solokha 				else
15577d993c5fSArseny Solokha 					ecntrl &= ~(ECNTRL_R100);
15587d993c5fSArseny Solokha 				break;
15597d993c5fSArseny Solokha 			default:
15607d993c5fSArseny Solokha 				netif_warn(priv, link, priv->ndev,
15617d993c5fSArseny Solokha 					   "Ack!  Speed (%d) is not 10/100/1000!\n",
15627d993c5fSArseny Solokha 					   phydev->speed);
15637d993c5fSArseny Solokha 				break;
15647d993c5fSArseny Solokha 			}
15657d993c5fSArseny Solokha 
15667d993c5fSArseny Solokha 			priv->oldspeed = phydev->speed;
15677d993c5fSArseny Solokha 		}
15687d993c5fSArseny Solokha 
15697d993c5fSArseny Solokha 		tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
15707d993c5fSArseny Solokha 		tempval1 |= gfar_get_flowctrl_cfg(priv);
15717d993c5fSArseny Solokha 
15727d993c5fSArseny Solokha 		/* Turn last free buffer recording on */
15737d993c5fSArseny Solokha 		if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
15747d993c5fSArseny Solokha 			for (i = 0; i < priv->num_rx_queues; i++) {
15757d993c5fSArseny Solokha 				u32 bdp_dma;
15767d993c5fSArseny Solokha 
15777d993c5fSArseny Solokha 				rx_queue = priv->rx_queue[i];
15787d993c5fSArseny Solokha 				bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
15797d993c5fSArseny Solokha 				gfar_write(rx_queue->rfbptr, bdp_dma);
15807d993c5fSArseny Solokha 			}
15817d993c5fSArseny Solokha 
15827d993c5fSArseny Solokha 			priv->tx_actual_en = 1;
15837d993c5fSArseny Solokha 		}
15847d993c5fSArseny Solokha 
15857d993c5fSArseny Solokha 		if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
15867d993c5fSArseny Solokha 			priv->tx_actual_en = 0;
15877d993c5fSArseny Solokha 
15887d993c5fSArseny Solokha 		gfar_write(&regs->maccfg1, tempval1);
15897d993c5fSArseny Solokha 		gfar_write(&regs->maccfg2, tempval);
15907d993c5fSArseny Solokha 		gfar_write(&regs->ecntrl, ecntrl);
15917d993c5fSArseny Solokha 
15927d993c5fSArseny Solokha 		if (!priv->oldlink)
15937d993c5fSArseny Solokha 			priv->oldlink = 1;
15947d993c5fSArseny Solokha 
15957d993c5fSArseny Solokha 	} else if (priv->oldlink) {
15967d993c5fSArseny Solokha 		priv->oldlink = 0;
15977d993c5fSArseny Solokha 		priv->oldspeed = 0;
15987d993c5fSArseny Solokha 		priv->oldduplex = -1;
15997d993c5fSArseny Solokha 	}
16007d993c5fSArseny Solokha 
16017d993c5fSArseny Solokha 	if (netif_msg_link(priv))
16027d993c5fSArseny Solokha 		phy_print_status(phydev);
16037d993c5fSArseny Solokha }
16047d993c5fSArseny Solokha 
16057d993c5fSArseny Solokha /* Called every time the controller might need to be made
16067d993c5fSArseny Solokha  * aware of new link state.  The PHY code conveys this
16077d993c5fSArseny Solokha  * information through variables in the phydev structure, and this
16087d993c5fSArseny Solokha  * function converts those variables into the appropriate
16097d993c5fSArseny Solokha  * register values, and can bring down the device if needed.
16107d993c5fSArseny Solokha  */
16117d993c5fSArseny Solokha static void adjust_link(struct net_device *dev)
1612ec21e2ecSJeff Kirsher {
1613ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
16147d993c5fSArseny Solokha 	struct phy_device *phydev = dev->phydev;
1615ec21e2ecSJeff Kirsher 
16167d993c5fSArseny Solokha 	if (unlikely(phydev->link != priv->oldlink ||
16177d993c5fSArseny Solokha 		     (phydev->link && (phydev->duplex != priv->oldduplex ||
16187d993c5fSArseny Solokha 				       phydev->speed != priv->oldspeed))))
16197d993c5fSArseny Solokha 		gfar_update_link_state(priv);
16207d993c5fSArseny Solokha }
1621ec21e2ecSJeff Kirsher 
16227d993c5fSArseny Solokha /* Initialize TBI PHY interface for communicating with the
16237d993c5fSArseny Solokha  * SERDES lynx PHY on the chip.  We communicate with this PHY
16247d993c5fSArseny Solokha  * through the MDIO bus on each controller, treating it as a
16257d993c5fSArseny Solokha  * "normal" PHY at the address found in the TBIPA register.  We assume
16267d993c5fSArseny Solokha  * that the TBIPA register is valid.  Either the MDIO bus code will set
16277d993c5fSArseny Solokha  * it to a value that doesn't conflict with other PHYs on the bus, or the
16287d993c5fSArseny Solokha  * value doesn't matter, as there are no other PHYs on the bus.
16297d993c5fSArseny Solokha  */
16307d993c5fSArseny Solokha static void gfar_configure_serdes(struct net_device *dev)
16317d993c5fSArseny Solokha {
16327d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
16337d993c5fSArseny Solokha 	struct phy_device *tbiphy;
163480ec396cSClaudiu Manoil 
16357d993c5fSArseny Solokha 	if (!priv->tbi_node) {
16367d993c5fSArseny Solokha 		dev_warn(&dev->dev, "error: SGMII mode requires that the "
16377d993c5fSArseny Solokha 				    "device tree specify a tbi-handle\n");
16387d993c5fSArseny Solokha 		return;
16397d993c5fSArseny Solokha 	}
1640ec21e2ecSJeff Kirsher 
16417d993c5fSArseny Solokha 	tbiphy = of_phy_find_device(priv->tbi_node);
16427d993c5fSArseny Solokha 	if (!tbiphy) {
16437d993c5fSArseny Solokha 		dev_err(&dev->dev, "error: Could not get TBI device\n");
16447d993c5fSArseny Solokha 		return;
16457d993c5fSArseny Solokha 	}
16467d993c5fSArseny Solokha 
16477d993c5fSArseny Solokha 	/* If the link is already up, we must already be ok, and don't need to
16487d993c5fSArseny Solokha 	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
16497d993c5fSArseny Solokha 	 * everything for us?  Resetting it takes the link down and requires
16507d993c5fSArseny Solokha 	 * several seconds for it to come back.
16517d993c5fSArseny Solokha 	 */
16527d993c5fSArseny Solokha 	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
16537d993c5fSArseny Solokha 		put_device(&tbiphy->mdio.dev);
16547d993c5fSArseny Solokha 		return;
16557d993c5fSArseny Solokha 	}
16567d993c5fSArseny Solokha 
16577d993c5fSArseny Solokha 	/* Single clk mode, mii mode off(for serdes communication) */
16587d993c5fSArseny Solokha 	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
16597d993c5fSArseny Solokha 
16607d993c5fSArseny Solokha 	phy_write(tbiphy, MII_ADVERTISE,
16617d993c5fSArseny Solokha 		  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
16627d993c5fSArseny Solokha 		  ADVERTISE_1000XPSE_ASYM);
16637d993c5fSArseny Solokha 
16647d993c5fSArseny Solokha 	phy_write(tbiphy, MII_BMCR,
16657d993c5fSArseny Solokha 		  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
16667d993c5fSArseny Solokha 		  BMCR_SPEED1000);
16677d993c5fSArseny Solokha 
16687d993c5fSArseny Solokha 	put_device(&tbiphy->mdio.dev);
16697d993c5fSArseny Solokha }
16707d993c5fSArseny Solokha 
16717d993c5fSArseny Solokha /* Initializes driver's PHY state, and attaches to the PHY.
16727d993c5fSArseny Solokha  * Returns 0 on success.
16737d993c5fSArseny Solokha  */
16747d993c5fSArseny Solokha static int init_phy(struct net_device *dev)
16757d993c5fSArseny Solokha {
16767d993c5fSArseny Solokha 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
16777d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
16788e578e73SArseny Solokha 	phy_interface_t interface = priv->interface;
16797d993c5fSArseny Solokha 	struct phy_device *phydev;
16807d993c5fSArseny Solokha 	struct ethtool_eee edata;
16817d993c5fSArseny Solokha 
16827d993c5fSArseny Solokha 	linkmode_set_bit_array(phy_10_100_features_array,
16837d993c5fSArseny Solokha 			       ARRAY_SIZE(phy_10_100_features_array),
16847d993c5fSArseny Solokha 			       mask);
16857d993c5fSArseny Solokha 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
16867d993c5fSArseny Solokha 	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
16877d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
16887d993c5fSArseny Solokha 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
16897d993c5fSArseny Solokha 
16907d993c5fSArseny Solokha 	priv->oldlink = 0;
16917d993c5fSArseny Solokha 	priv->oldspeed = 0;
16927d993c5fSArseny Solokha 	priv->oldduplex = -1;
16937d993c5fSArseny Solokha 
16947d993c5fSArseny Solokha 	phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
16957d993c5fSArseny Solokha 				interface);
16967d993c5fSArseny Solokha 	if (!phydev) {
16977d993c5fSArseny Solokha 		dev_err(&dev->dev, "could not attach to PHY\n");
16987d993c5fSArseny Solokha 		return -ENODEV;
16997d993c5fSArseny Solokha 	}
17007d993c5fSArseny Solokha 
17017d993c5fSArseny Solokha 	if (interface == PHY_INTERFACE_MODE_SGMII)
17027d993c5fSArseny Solokha 		gfar_configure_serdes(dev);
17037d993c5fSArseny Solokha 
17047d993c5fSArseny Solokha 	/* Remove any features not supported by the controller */
17057d993c5fSArseny Solokha 	linkmode_and(phydev->supported, phydev->supported, mask);
17067d993c5fSArseny Solokha 	linkmode_copy(phydev->advertising, phydev->supported);
17077d993c5fSArseny Solokha 
17087d993c5fSArseny Solokha 	/* Add support for flow control */
17097d993c5fSArseny Solokha 	phy_support_asym_pause(phydev);
17107d993c5fSArseny Solokha 
17117d993c5fSArseny Solokha 	/* disable EEE autoneg, EEE not supported by eTSEC */
17127d993c5fSArseny Solokha 	memset(&edata, 0, sizeof(struct ethtool_eee));
17137d993c5fSArseny Solokha 	phy_ethtool_set_eee(phydev, &edata);
17147d993c5fSArseny Solokha 
17157d993c5fSArseny Solokha 	return 0;
1716ec21e2ecSJeff Kirsher }
1717ec21e2ecSJeff Kirsher 
1718ec21e2ecSJeff Kirsher static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1719ec21e2ecSJeff Kirsher {
1720d58ff351SJohannes Berg 	struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
1721ec21e2ecSJeff Kirsher 
1722ec21e2ecSJeff Kirsher 	memset(fcb, 0, GMAC_FCB_LEN);
1723ec21e2ecSJeff Kirsher 
1724ec21e2ecSJeff Kirsher 	return fcb;
1725ec21e2ecSJeff Kirsher }
1726ec21e2ecSJeff Kirsher 
17279c4886e5SManfred Rudigier static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
17289c4886e5SManfred Rudigier 				    int fcb_length)
1729ec21e2ecSJeff Kirsher {
1730ec21e2ecSJeff Kirsher 	/* If we're here, it's a IP packet with a TCP or UDP
1731ec21e2ecSJeff Kirsher 	 * payload.  We set it to checksum, using a pseudo-header
1732ec21e2ecSJeff Kirsher 	 * we provide
1733ec21e2ecSJeff Kirsher 	 */
17343a2e16c8SJan Ceuleers 	u8 flags = TXFCB_DEFAULT;
1735ec21e2ecSJeff Kirsher 
17360977f817SJan Ceuleers 	/* Tell the controller what the protocol is
17370977f817SJan Ceuleers 	 * And provide the already calculated phcs
17380977f817SJan Ceuleers 	 */
1739ec21e2ecSJeff Kirsher 	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1740ec21e2ecSJeff Kirsher 		flags |= TXFCB_UDP;
174126eb9374SClaudiu Manoil 		fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
1742ec21e2ecSJeff Kirsher 	} else
174326eb9374SClaudiu Manoil 		fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
1744ec21e2ecSJeff Kirsher 
1745ec21e2ecSJeff Kirsher 	/* l3os is the distance between the start of the
1746ec21e2ecSJeff Kirsher 	 * frame (skb->data) and the start of the IP hdr.
1747ec21e2ecSJeff Kirsher 	 * l4os is the distance between the start of the
17480977f817SJan Ceuleers 	 * l3 hdr and the l4 hdr
17490977f817SJan Ceuleers 	 */
175026eb9374SClaudiu Manoil 	fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
1751ec21e2ecSJeff Kirsher 	fcb->l4os = skb_network_header_len(skb);
1752ec21e2ecSJeff Kirsher 
1753ec21e2ecSJeff Kirsher 	fcb->flags = flags;
1754ec21e2ecSJeff Kirsher }
1755ec21e2ecSJeff Kirsher 
1756278af574SArnd Bergmann static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1757ec21e2ecSJeff Kirsher {
1758ec21e2ecSJeff Kirsher 	fcb->flags |= TXFCB_VLN;
175926eb9374SClaudiu Manoil 	fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
1760ec21e2ecSJeff Kirsher }
1761ec21e2ecSJeff Kirsher 
1762ec21e2ecSJeff Kirsher static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1763ec21e2ecSJeff Kirsher 				      struct txbd8 *base, int ring_size)
1764ec21e2ecSJeff Kirsher {
1765ec21e2ecSJeff Kirsher 	struct txbd8 *new_bd = bdp + stride;
1766ec21e2ecSJeff Kirsher 
1767ec21e2ecSJeff Kirsher 	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1768ec21e2ecSJeff Kirsher }
1769ec21e2ecSJeff Kirsher 
1770ec21e2ecSJeff Kirsher static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1771ec21e2ecSJeff Kirsher 				      int ring_size)
1772ec21e2ecSJeff Kirsher {
1773ec21e2ecSJeff Kirsher 	return skip_txbd(bdp, 1, base, ring_size);
1774ec21e2ecSJeff Kirsher }
1775ec21e2ecSJeff Kirsher 
177602d88fb4SClaudiu Manoil /* eTSEC12: csum generation not supported for some fcb offsets */
177702d88fb4SClaudiu Manoil static inline bool gfar_csum_errata_12(struct gfar_private *priv,
177802d88fb4SClaudiu Manoil 				       unsigned long fcb_addr)
177902d88fb4SClaudiu Manoil {
178002d88fb4SClaudiu Manoil 	return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
178102d88fb4SClaudiu Manoil 	       (fcb_addr % 0x20) > 0x18);
178202d88fb4SClaudiu Manoil }
178302d88fb4SClaudiu Manoil 
178402d88fb4SClaudiu Manoil /* eTSEC76: csum generation for frames larger than 2500 may
178502d88fb4SClaudiu Manoil  * cause excess delays before start of transmission
178602d88fb4SClaudiu Manoil  */
178702d88fb4SClaudiu Manoil static inline bool gfar_csum_errata_76(struct gfar_private *priv,
178802d88fb4SClaudiu Manoil 				       unsigned int len)
178902d88fb4SClaudiu Manoil {
179002d88fb4SClaudiu Manoil 	return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
179102d88fb4SClaudiu Manoil 	       (len > 2500));
179202d88fb4SClaudiu Manoil }
179302d88fb4SClaudiu Manoil 
17940977f817SJan Ceuleers /* This is called by the kernel when a frame is ready for transmission.
17950977f817SJan Ceuleers  * It is pointed to by the dev->hard_start_xmit function pointer
17960977f817SJan Ceuleers  */
179706983aa5SYueHaibing static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1798ec21e2ecSJeff Kirsher {
1799ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
1800ec21e2ecSJeff Kirsher 	struct gfar_priv_tx_q *tx_queue = NULL;
1801ec21e2ecSJeff Kirsher 	struct netdev_queue *txq;
1802ec21e2ecSJeff Kirsher 	struct gfar __iomem *regs = NULL;
1803ec21e2ecSJeff Kirsher 	struct txfcb *fcb = NULL;
1804ec21e2ecSJeff Kirsher 	struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
1805ec21e2ecSJeff Kirsher 	u32 lstatus;
180642f397adSClaudiu Manoil 	skb_frag_t *frag;
18070d0cffdcSClaudiu Manoil 	int i, rq = 0;
18080d0cffdcSClaudiu Manoil 	int do_tstamp, do_csum, do_vlan;
1809ec21e2ecSJeff Kirsher 	u32 bufaddr;
181050ad076bSClaudiu Manoil 	unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
1811ec21e2ecSJeff Kirsher 
1812ec21e2ecSJeff Kirsher 	rq = skb->queue_mapping;
1813ec21e2ecSJeff Kirsher 	tx_queue = priv->tx_queue[rq];
1814ec21e2ecSJeff Kirsher 	txq = netdev_get_tx_queue(dev, rq);
1815ec21e2ecSJeff Kirsher 	base = tx_queue->tx_bd_base;
1816ec21e2ecSJeff Kirsher 	regs = tx_queue->grp->regs;
1817ec21e2ecSJeff Kirsher 
18180d0cffdcSClaudiu Manoil 	do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
1819df8a39deSJiri Pirko 	do_vlan = skb_vlan_tag_present(skb);
18200d0cffdcSClaudiu Manoil 	do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
18210d0cffdcSClaudiu Manoil 		    priv->hwts_tx_en;
18220d0cffdcSClaudiu Manoil 
18230d0cffdcSClaudiu Manoil 	if (do_csum || do_vlan)
18240d0cffdcSClaudiu Manoil 		fcb_len = GMAC_FCB_LEN;
18250d0cffdcSClaudiu Manoil 
1826ec21e2ecSJeff Kirsher 	/* check if time stamp should be generated */
18270d0cffdcSClaudiu Manoil 	if (unlikely(do_tstamp))
18280d0cffdcSClaudiu Manoil 		fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
1829ec21e2ecSJeff Kirsher 
1830ec21e2ecSJeff Kirsher 	/* make space for additional header when fcb is needed */
1831d145c903SClaudiu Manoil 	if (fcb_len) {
1832d145c903SClaudiu Manoil 		if (unlikely(skb_cow_head(skb, fcb_len))) {
1833ec21e2ecSJeff Kirsher 			dev->stats.tx_errors++;
1834c9974ad4SEric W. Biederman 			dev_kfree_skb_any(skb);
1835ec21e2ecSJeff Kirsher 			return NETDEV_TX_OK;
1836ec21e2ecSJeff Kirsher 		}
1837ec21e2ecSJeff Kirsher 	}
1838ec21e2ecSJeff Kirsher 
1839ec21e2ecSJeff Kirsher 	/* total number of fragments in the SKB */
1840ec21e2ecSJeff Kirsher 	nr_frags = skb_shinfo(skb)->nr_frags;
1841ec21e2ecSJeff Kirsher 
1842ec21e2ecSJeff Kirsher 	/* calculate the required number of TxBDs for this skb */
1843ec21e2ecSJeff Kirsher 	if (unlikely(do_tstamp))
1844ec21e2ecSJeff Kirsher 		nr_txbds = nr_frags + 2;
1845ec21e2ecSJeff Kirsher 	else
1846ec21e2ecSJeff Kirsher 		nr_txbds = nr_frags + 1;
1847ec21e2ecSJeff Kirsher 
1848ec21e2ecSJeff Kirsher 	/* check if there is space to queue this packet */
1849ec21e2ecSJeff Kirsher 	if (nr_txbds > tx_queue->num_txbdfree) {
1850ec21e2ecSJeff Kirsher 		/* no space, stop the queue */
1851ec21e2ecSJeff Kirsher 		netif_tx_stop_queue(txq);
1852ec21e2ecSJeff Kirsher 		dev->stats.tx_fifo_errors++;
1853ec21e2ecSJeff Kirsher 		return NETDEV_TX_BUSY;
1854ec21e2ecSJeff Kirsher 	}
1855ec21e2ecSJeff Kirsher 
1856ec21e2ecSJeff Kirsher 	/* Update transmit stats */
185750ad076bSClaudiu Manoil 	bytes_sent = skb->len;
185850ad076bSClaudiu Manoil 	tx_queue->stats.tx_bytes += bytes_sent;
185950ad076bSClaudiu Manoil 	/* keep Tx bytes on wire for BQL accounting */
186050ad076bSClaudiu Manoil 	GFAR_CB(skb)->bytes_sent = bytes_sent;
1861ec21e2ecSJeff Kirsher 	tx_queue->stats.tx_packets++;
1862ec21e2ecSJeff Kirsher 
1863ec21e2ecSJeff Kirsher 	txbdp = txbdp_start = tx_queue->cur_tx;
1864a7312d58SClaudiu Manoil 	lstatus = be32_to_cpu(txbdp->lstatus);
1865ec21e2ecSJeff Kirsher 
18669c4886e5SManfred Rudigier 	/* Add TxPAL between FCB and frame if required */
18679c4886e5SManfred Rudigier 	if (unlikely(do_tstamp)) {
18689c4886e5SManfred Rudigier 		skb_push(skb, GMAC_TXPAL_LEN);
18699c4886e5SManfred Rudigier 		memset(skb->data, 0, GMAC_TXPAL_LEN);
18709c4886e5SManfred Rudigier 	}
18719c4886e5SManfred Rudigier 
18720d0cffdcSClaudiu Manoil 	/* Add TxFCB if required */
18730d0cffdcSClaudiu Manoil 	if (fcb_len) {
1874ec21e2ecSJeff Kirsher 		fcb = gfar_add_fcb(skb);
1875ec21e2ecSJeff Kirsher 		lstatus |= BD_LFLAG(TXBD_TOE);
18760d0cffdcSClaudiu Manoil 	}
18770d0cffdcSClaudiu Manoil 
18780d0cffdcSClaudiu Manoil 	/* Set up checksumming */
18790d0cffdcSClaudiu Manoil 	if (do_csum) {
18800d0cffdcSClaudiu Manoil 		gfar_tx_checksum(skb, fcb, fcb_len);
188102d88fb4SClaudiu Manoil 
188202d88fb4SClaudiu Manoil 		if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
188302d88fb4SClaudiu Manoil 		    unlikely(gfar_csum_errata_76(priv, skb->len))) {
188402d88fb4SClaudiu Manoil 			__skb_pull(skb, GMAC_FCB_LEN);
188502d88fb4SClaudiu Manoil 			skb_checksum_help(skb);
18860d0cffdcSClaudiu Manoil 			if (do_vlan || do_tstamp) {
18870d0cffdcSClaudiu Manoil 				/* put back a new fcb for vlan/tstamp TOE */
18880d0cffdcSClaudiu Manoil 				fcb = gfar_add_fcb(skb);
18890d0cffdcSClaudiu Manoil 			} else {
18900d0cffdcSClaudiu Manoil 				/* Tx TOE not used */
189102d88fb4SClaudiu Manoil 				lstatus &= ~(BD_LFLAG(TXBD_TOE));
189202d88fb4SClaudiu Manoil 				fcb = NULL;
1893ec21e2ecSJeff Kirsher 			}
1894ec21e2ecSJeff Kirsher 		}
1895ec21e2ecSJeff Kirsher 	}
1896ec21e2ecSJeff Kirsher 
18970d0cffdcSClaudiu Manoil 	if (do_vlan)
1898ec21e2ecSJeff Kirsher 		gfar_tx_vlan(skb, fcb);
1899ec21e2ecSJeff Kirsher 
19000a4b5a24SKevin Hao 	bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
19010a4b5a24SKevin Hao 				 DMA_TO_DEVICE);
19020a4b5a24SKevin Hao 	if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
19030a4b5a24SKevin Hao 		goto dma_map_err;
19040a4b5a24SKevin Hao 
1905a7312d58SClaudiu Manoil 	txbdp_start->bufPtr = cpu_to_be32(bufaddr);
1906ec21e2ecSJeff Kirsher 
1907e19d0839SClaudiu Manoil 	/* Time stamp insertion requires one additional TxBD */
1908e19d0839SClaudiu Manoil 	if (unlikely(do_tstamp))
1909e19d0839SClaudiu Manoil 		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
1910e19d0839SClaudiu Manoil 						 tx_queue->tx_ring_size);
1911e19d0839SClaudiu Manoil 
191248963b44SClaudiu Manoil 	if (likely(!nr_frags)) {
19139c8b0778SYangbo Lu 		if (likely(!do_tstamp))
1914e19d0839SClaudiu Manoil 			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1915e19d0839SClaudiu Manoil 	} else {
1916e19d0839SClaudiu Manoil 		u32 lstatus_start = lstatus;
1917e19d0839SClaudiu Manoil 
1918e19d0839SClaudiu Manoil 		/* Place the fragment addresses and lengths into the TxBDs */
191942f397adSClaudiu Manoil 		frag = &skb_shinfo(skb)->frags[0];
192042f397adSClaudiu Manoil 		for (i = 0; i < nr_frags; i++, frag++) {
192142f397adSClaudiu Manoil 			unsigned int size;
192242f397adSClaudiu Manoil 
1923e19d0839SClaudiu Manoil 			/* Point at the next BD, wrapping as needed */
1924e19d0839SClaudiu Manoil 			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1925e19d0839SClaudiu Manoil 
192642f397adSClaudiu Manoil 			size = skb_frag_size(frag);
1927e19d0839SClaudiu Manoil 
192842f397adSClaudiu Manoil 			lstatus = be32_to_cpu(txbdp->lstatus) | size |
1929e19d0839SClaudiu Manoil 				  BD_LFLAG(TXBD_READY);
1930e19d0839SClaudiu Manoil 
1931e19d0839SClaudiu Manoil 			/* Handle the last BD specially */
1932e19d0839SClaudiu Manoil 			if (i == nr_frags - 1)
1933e19d0839SClaudiu Manoil 				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1934e19d0839SClaudiu Manoil 
193542f397adSClaudiu Manoil 			bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
193642f397adSClaudiu Manoil 						   size, DMA_TO_DEVICE);
1937e19d0839SClaudiu Manoil 			if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1938e19d0839SClaudiu Manoil 				goto dma_map_err;
1939e19d0839SClaudiu Manoil 
1940e19d0839SClaudiu Manoil 			/* set the TxBD length and buffer pointer */
1941e19d0839SClaudiu Manoil 			txbdp->bufPtr = cpu_to_be32(bufaddr);
1942e19d0839SClaudiu Manoil 			txbdp->lstatus = cpu_to_be32(lstatus);
1943e19d0839SClaudiu Manoil 		}
1944e19d0839SClaudiu Manoil 
1945e19d0839SClaudiu Manoil 		lstatus = lstatus_start;
1946e19d0839SClaudiu Manoil 	}
1947e19d0839SClaudiu Manoil 
19480977f817SJan Ceuleers 	/* If time stamping is requested one additional TxBD must be set up. The
1949ec21e2ecSJeff Kirsher 	 * first TxBD points to the FCB and must have a data length of
1950ec21e2ecSJeff Kirsher 	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
1951ec21e2ecSJeff Kirsher 	 * the full frame length.
1952ec21e2ecSJeff Kirsher 	 */
1953ec21e2ecSJeff Kirsher 	if (unlikely(do_tstamp)) {
1954a7312d58SClaudiu Manoil 		u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
1955a7312d58SClaudiu Manoil 
1956a7312d58SClaudiu Manoil 		bufaddr = be32_to_cpu(txbdp_start->bufPtr);
1957a7312d58SClaudiu Manoil 		bufaddr += fcb_len;
195848963b44SClaudiu Manoil 
1959a7312d58SClaudiu Manoil 		lstatus_ts |= BD_LFLAG(TXBD_READY) |
19600d0cffdcSClaudiu Manoil 			      (skb_headlen(skb) - fcb_len);
196148963b44SClaudiu Manoil 		if (!nr_frags)
196248963b44SClaudiu Manoil 			lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1963a7312d58SClaudiu Manoil 
1964a7312d58SClaudiu Manoil 		txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
1965a7312d58SClaudiu Manoil 		txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
1966ec21e2ecSJeff Kirsher 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
1967e19d0839SClaudiu Manoil 
1968e19d0839SClaudiu Manoil 		/* Setup tx hardware time stamping */
1969e19d0839SClaudiu Manoil 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1970e19d0839SClaudiu Manoil 		fcb->ptp = 1;
1971ec21e2ecSJeff Kirsher 	} else {
1972ec21e2ecSJeff Kirsher 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1973ec21e2ecSJeff Kirsher 	}
1974ec21e2ecSJeff Kirsher 
197550ad076bSClaudiu Manoil 	netdev_tx_sent_queue(txq, bytes_sent);
1976d8a0f1b0SPaul Gortmaker 
1977d55398baSClaudiu Manoil 	gfar_wmb();
1978ec21e2ecSJeff Kirsher 
1979a7312d58SClaudiu Manoil 	txbdp_start->lstatus = cpu_to_be32(lstatus);
1980ec21e2ecSJeff Kirsher 
1981d55398baSClaudiu Manoil 	gfar_wmb(); /* force lstatus write before tx_skbuff */
1982ec21e2ecSJeff Kirsher 
1983ec21e2ecSJeff Kirsher 	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1984ec21e2ecSJeff Kirsher 
1985ec21e2ecSJeff Kirsher 	/* Update the current skb pointer to the next entry we will use
19860977f817SJan Ceuleers 	 * (wrapping if necessary)
19870977f817SJan Ceuleers 	 */
1988ec21e2ecSJeff Kirsher 	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1989ec21e2ecSJeff Kirsher 			      TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1990ec21e2ecSJeff Kirsher 
1991ec21e2ecSJeff Kirsher 	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1992ec21e2ecSJeff Kirsher 
1993bc602280SClaudiu Manoil 	/* We can work in parallel with gfar_clean_tx_ring(), except
1994bc602280SClaudiu Manoil 	 * when modifying num_txbdfree. Note that we didn't grab the lock
1995bc602280SClaudiu Manoil 	 * when we were reading the num_txbdfree and checking for available
1996bc602280SClaudiu Manoil 	 * space, that's because outside of this function it can only grow.
1997bc602280SClaudiu Manoil 	 */
1998bc602280SClaudiu Manoil 	spin_lock_bh(&tx_queue->txlock);
1999ec21e2ecSJeff Kirsher 	/* reduce TxBD free count */
2000ec21e2ecSJeff Kirsher 	tx_queue->num_txbdfree -= (nr_txbds);
2001bc602280SClaudiu Manoil 	spin_unlock_bh(&tx_queue->txlock);
2002ec21e2ecSJeff Kirsher 
2003ec21e2ecSJeff Kirsher 	/* If the next BD still needs to be cleaned up, then the bds
20040977f817SJan Ceuleers 	 * are full.  We need to tell the kernel to stop sending us stuff.
20050977f817SJan Ceuleers 	 */
2006ec21e2ecSJeff Kirsher 	if (!tx_queue->num_txbdfree) {
2007ec21e2ecSJeff Kirsher 		netif_tx_stop_queue(txq);
2008ec21e2ecSJeff Kirsher 
2009ec21e2ecSJeff Kirsher 		dev->stats.tx_fifo_errors++;
2010ec21e2ecSJeff Kirsher 	}
2011ec21e2ecSJeff Kirsher 
2012ec21e2ecSJeff Kirsher 	/* Tell the DMA to go go go */
2013ec21e2ecSJeff Kirsher 	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2014ec21e2ecSJeff Kirsher 
2015ec21e2ecSJeff Kirsher 	return NETDEV_TX_OK;
20160a4b5a24SKevin Hao 
20170a4b5a24SKevin Hao dma_map_err:
20180a4b5a24SKevin Hao 	txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
20190a4b5a24SKevin Hao 	if (do_tstamp)
20200a4b5a24SKevin Hao 		txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
20210a4b5a24SKevin Hao 	for (i = 0; i < nr_frags; i++) {
2022a7312d58SClaudiu Manoil 		lstatus = be32_to_cpu(txbdp->lstatus);
20230a4b5a24SKevin Hao 		if (!(lstatus & BD_LFLAG(TXBD_READY)))
20240a4b5a24SKevin Hao 			break;
20250a4b5a24SKevin Hao 
2026a7312d58SClaudiu Manoil 		lstatus &= ~BD_LFLAG(TXBD_READY);
2027a7312d58SClaudiu Manoil 		txbdp->lstatus = cpu_to_be32(lstatus);
2028a7312d58SClaudiu Manoil 		bufaddr = be32_to_cpu(txbdp->bufPtr);
2029a7312d58SClaudiu Manoil 		dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
20300a4b5a24SKevin Hao 			       DMA_TO_DEVICE);
20310a4b5a24SKevin Hao 		txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
20320a4b5a24SKevin Hao 	}
20330a4b5a24SKevin Hao 	gfar_wmb();
20340a4b5a24SKevin Hao 	dev_kfree_skb_any(skb);
20350a4b5a24SKevin Hao 	return NETDEV_TX_OK;
2036ec21e2ecSJeff Kirsher }
2037ec21e2ecSJeff Kirsher 
2038ec21e2ecSJeff Kirsher /* Changes the mac address if the controller is not running. */
2039ec21e2ecSJeff Kirsher static int gfar_set_mac_address(struct net_device *dev)
2040ec21e2ecSJeff Kirsher {
2041ec21e2ecSJeff Kirsher 	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2042ec21e2ecSJeff Kirsher 
2043ec21e2ecSJeff Kirsher 	return 0;
2044ec21e2ecSJeff Kirsher }
2045ec21e2ecSJeff Kirsher 
2046ec21e2ecSJeff Kirsher static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2047ec21e2ecSJeff Kirsher {
2048ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
2049ec21e2ecSJeff Kirsher 
20500851133bSClaudiu Manoil 	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
20510851133bSClaudiu Manoil 		cpu_relax();
20520851133bSClaudiu Manoil 
205388302648SClaudiu Manoil 	if (dev->flags & IFF_UP)
2054ec21e2ecSJeff Kirsher 		stop_gfar(dev);
2055ec21e2ecSJeff Kirsher 
2056ec21e2ecSJeff Kirsher 	dev->mtu = new_mtu;
2057ec21e2ecSJeff Kirsher 
205888302648SClaudiu Manoil 	if (dev->flags & IFF_UP)
2059ec21e2ecSJeff Kirsher 		startup_gfar(dev);
2060ec21e2ecSJeff Kirsher 
20610851133bSClaudiu Manoil 	clear_bit_unlock(GFAR_RESETTING, &priv->state);
20620851133bSClaudiu Manoil 
2063ec21e2ecSJeff Kirsher 	return 0;
2064ec21e2ecSJeff Kirsher }
2065ec21e2ecSJeff Kirsher 
20669f5c44cfSYueHaibing static void reset_gfar(struct net_device *ndev)
20670851133bSClaudiu Manoil {
20680851133bSClaudiu Manoil 	struct gfar_private *priv = netdev_priv(ndev);
20690851133bSClaudiu Manoil 
20700851133bSClaudiu Manoil 	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
20710851133bSClaudiu Manoil 		cpu_relax();
20720851133bSClaudiu Manoil 
20730851133bSClaudiu Manoil 	stop_gfar(ndev);
20740851133bSClaudiu Manoil 	startup_gfar(ndev);
20750851133bSClaudiu Manoil 
20760851133bSClaudiu Manoil 	clear_bit_unlock(GFAR_RESETTING, &priv->state);
20770851133bSClaudiu Manoil }
20780851133bSClaudiu Manoil 
2079ec21e2ecSJeff Kirsher /* gfar_reset_task gets scheduled when a packet has not been
2080ec21e2ecSJeff Kirsher  * transmitted after a set amount of time.
2081ec21e2ecSJeff Kirsher  * For now, assume that clearing out all the structures, and
2082ec21e2ecSJeff Kirsher  * starting over will fix the problem.
2083ec21e2ecSJeff Kirsher  */
2084ec21e2ecSJeff Kirsher static void gfar_reset_task(struct work_struct *work)
2085ec21e2ecSJeff Kirsher {
2086ec21e2ecSJeff Kirsher 	struct gfar_private *priv = container_of(work, struct gfar_private,
2087ec21e2ecSJeff Kirsher 						 reset_task);
20880851133bSClaudiu Manoil 	reset_gfar(priv->ndev);
2089ec21e2ecSJeff Kirsher }
2090ec21e2ecSJeff Kirsher 
20910290bd29SMichael S. Tsirkin static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
2092ec21e2ecSJeff Kirsher {
2093ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
2094ec21e2ecSJeff Kirsher 
2095ec21e2ecSJeff Kirsher 	dev->stats.tx_errors++;
2096ec21e2ecSJeff Kirsher 	schedule_work(&priv->reset_task);
2097ec21e2ecSJeff Kirsher }
2098ec21e2ecSJeff Kirsher 
20997d993c5fSArseny Solokha static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
21007d993c5fSArseny Solokha {
21017d993c5fSArseny Solokha 	struct hwtstamp_config config;
21027d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(netdev);
21037d993c5fSArseny Solokha 
21047d993c5fSArseny Solokha 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
21057d993c5fSArseny Solokha 		return -EFAULT;
21067d993c5fSArseny Solokha 
21077d993c5fSArseny Solokha 	/* reserved for future extensions */
21087d993c5fSArseny Solokha 	if (config.flags)
21097d993c5fSArseny Solokha 		return -EINVAL;
21107d993c5fSArseny Solokha 
21117d993c5fSArseny Solokha 	switch (config.tx_type) {
21127d993c5fSArseny Solokha 	case HWTSTAMP_TX_OFF:
21137d993c5fSArseny Solokha 		priv->hwts_tx_en = 0;
21147d993c5fSArseny Solokha 		break;
21157d993c5fSArseny Solokha 	case HWTSTAMP_TX_ON:
21167d993c5fSArseny Solokha 		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
21177d993c5fSArseny Solokha 			return -ERANGE;
21187d993c5fSArseny Solokha 		priv->hwts_tx_en = 1;
21197d993c5fSArseny Solokha 		break;
21207d993c5fSArseny Solokha 	default:
21217d993c5fSArseny Solokha 		return -ERANGE;
21227d993c5fSArseny Solokha 	}
21237d993c5fSArseny Solokha 
21247d993c5fSArseny Solokha 	switch (config.rx_filter) {
21257d993c5fSArseny Solokha 	case HWTSTAMP_FILTER_NONE:
21267d993c5fSArseny Solokha 		if (priv->hwts_rx_en) {
21277d993c5fSArseny Solokha 			priv->hwts_rx_en = 0;
21287d993c5fSArseny Solokha 			reset_gfar(netdev);
21297d993c5fSArseny Solokha 		}
21307d993c5fSArseny Solokha 		break;
21317d993c5fSArseny Solokha 	default:
21327d993c5fSArseny Solokha 		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
21337d993c5fSArseny Solokha 			return -ERANGE;
21347d993c5fSArseny Solokha 		if (!priv->hwts_rx_en) {
21357d993c5fSArseny Solokha 			priv->hwts_rx_en = 1;
21367d993c5fSArseny Solokha 			reset_gfar(netdev);
21377d993c5fSArseny Solokha 		}
21387d993c5fSArseny Solokha 		config.rx_filter = HWTSTAMP_FILTER_ALL;
21397d993c5fSArseny Solokha 		break;
21407d993c5fSArseny Solokha 	}
21417d993c5fSArseny Solokha 
21427d993c5fSArseny Solokha 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
21437d993c5fSArseny Solokha 		-EFAULT : 0;
21447d993c5fSArseny Solokha }
21457d993c5fSArseny Solokha 
21467d993c5fSArseny Solokha static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
21477d993c5fSArseny Solokha {
21487d993c5fSArseny Solokha 	struct hwtstamp_config config;
21497d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(netdev);
21507d993c5fSArseny Solokha 
21517d993c5fSArseny Solokha 	config.flags = 0;
21527d993c5fSArseny Solokha 	config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
21537d993c5fSArseny Solokha 	config.rx_filter = (priv->hwts_rx_en ?
21547d993c5fSArseny Solokha 			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
21557d993c5fSArseny Solokha 
21567d993c5fSArseny Solokha 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
21577d993c5fSArseny Solokha 		-EFAULT : 0;
21587d993c5fSArseny Solokha }
21597d993c5fSArseny Solokha 
21607d993c5fSArseny Solokha static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
21617d993c5fSArseny Solokha {
21627d993c5fSArseny Solokha 	struct phy_device *phydev = dev->phydev;
21637d993c5fSArseny Solokha 
21647d993c5fSArseny Solokha 	if (!netif_running(dev))
21657d993c5fSArseny Solokha 		return -EINVAL;
21667d993c5fSArseny Solokha 
21677d993c5fSArseny Solokha 	if (cmd == SIOCSHWTSTAMP)
21687d993c5fSArseny Solokha 		return gfar_hwtstamp_set(dev, rq);
21697d993c5fSArseny Solokha 	if (cmd == SIOCGHWTSTAMP)
21707d993c5fSArseny Solokha 		return gfar_hwtstamp_get(dev, rq);
21717d993c5fSArseny Solokha 
21727d993c5fSArseny Solokha 	if (!phydev)
21737d993c5fSArseny Solokha 		return -ENODEV;
21747d993c5fSArseny Solokha 
21757d993c5fSArseny Solokha 	return phy_mii_ioctl(phydev, rq, cmd);
21767d993c5fSArseny Solokha }
21777d993c5fSArseny Solokha 
2178ec21e2ecSJeff Kirsher /* Interrupt Handler for Transmit complete */
2179c233cf40SClaudiu Manoil static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2180ec21e2ecSJeff Kirsher {
2181ec21e2ecSJeff Kirsher 	struct net_device *dev = tx_queue->dev;
2182d8a0f1b0SPaul Gortmaker 	struct netdev_queue *txq;
2183ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
2184ec21e2ecSJeff Kirsher 	struct txbd8 *bdp, *next = NULL;
2185ec21e2ecSJeff Kirsher 	struct txbd8 *lbdp = NULL;
2186ec21e2ecSJeff Kirsher 	struct txbd8 *base = tx_queue->tx_bd_base;
2187ec21e2ecSJeff Kirsher 	struct sk_buff *skb;
2188ec21e2ecSJeff Kirsher 	int skb_dirtytx;
2189ec21e2ecSJeff Kirsher 	int tx_ring_size = tx_queue->tx_ring_size;
2190ec21e2ecSJeff Kirsher 	int frags = 0, nr_txbds = 0;
2191ec21e2ecSJeff Kirsher 	int i;
2192ec21e2ecSJeff Kirsher 	int howmany = 0;
2193d8a0f1b0SPaul Gortmaker 	int tqi = tx_queue->qindex;
2194d8a0f1b0SPaul Gortmaker 	unsigned int bytes_sent = 0;
2195ec21e2ecSJeff Kirsher 	u32 lstatus;
2196ec21e2ecSJeff Kirsher 	size_t buflen;
2197ec21e2ecSJeff Kirsher 
2198d8a0f1b0SPaul Gortmaker 	txq = netdev_get_tx_queue(dev, tqi);
2199ec21e2ecSJeff Kirsher 	bdp = tx_queue->dirty_tx;
2200ec21e2ecSJeff Kirsher 	skb_dirtytx = tx_queue->skb_dirtytx;
2201ec21e2ecSJeff Kirsher 
2202ec21e2ecSJeff Kirsher 	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2203c26a2c2dSVladimir Oltean 		bool do_tstamp;
2204c26a2c2dSVladimir Oltean 
2205c26a2c2dSVladimir Oltean 		do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2206c26a2c2dSVladimir Oltean 			    priv->hwts_tx_en;
2207ec21e2ecSJeff Kirsher 
2208ec21e2ecSJeff Kirsher 		frags = skb_shinfo(skb)->nr_frags;
2209ec21e2ecSJeff Kirsher 
22100977f817SJan Ceuleers 		/* When time stamping, one additional TxBD must be freed.
2211ec21e2ecSJeff Kirsher 		 * Also, we need to dma_unmap_single() the TxPAL.
2212ec21e2ecSJeff Kirsher 		 */
2213c26a2c2dSVladimir Oltean 		if (unlikely(do_tstamp))
2214ec21e2ecSJeff Kirsher 			nr_txbds = frags + 2;
2215ec21e2ecSJeff Kirsher 		else
2216ec21e2ecSJeff Kirsher 			nr_txbds = frags + 1;
2217ec21e2ecSJeff Kirsher 
2218ec21e2ecSJeff Kirsher 		lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2219ec21e2ecSJeff Kirsher 
2220a7312d58SClaudiu Manoil 		lstatus = be32_to_cpu(lbdp->lstatus);
2221ec21e2ecSJeff Kirsher 
2222ec21e2ecSJeff Kirsher 		/* Only clean completed frames */
2223ec21e2ecSJeff Kirsher 		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2224ec21e2ecSJeff Kirsher 		    (lstatus & BD_LENGTH_MASK))
2225ec21e2ecSJeff Kirsher 			break;
2226ec21e2ecSJeff Kirsher 
2227c26a2c2dSVladimir Oltean 		if (unlikely(do_tstamp)) {
2228ec21e2ecSJeff Kirsher 			next = next_txbd(bdp, base, tx_ring_size);
2229a7312d58SClaudiu Manoil 			buflen = be16_to_cpu(next->length) +
2230a7312d58SClaudiu Manoil 				 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2231ec21e2ecSJeff Kirsher 		} else
2232a7312d58SClaudiu Manoil 			buflen = be16_to_cpu(bdp->length);
2233ec21e2ecSJeff Kirsher 
2234a7312d58SClaudiu Manoil 		dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2235ec21e2ecSJeff Kirsher 				 buflen, DMA_TO_DEVICE);
2236ec21e2ecSJeff Kirsher 
2237c26a2c2dSVladimir Oltean 		if (unlikely(do_tstamp)) {
2238ec21e2ecSJeff Kirsher 			struct skb_shared_hwtstamps shhwtstamps;
2239b4b67f26SScott Wood 			u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
2240b4b67f26SScott Wood 					  ~0x7UL);
2241bc4598bcSJan Ceuleers 
2242ec21e2ecSJeff Kirsher 			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2243f54af12fSYangbo Lu 			shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
22449c4886e5SManfred Rudigier 			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2245ec21e2ecSJeff Kirsher 			skb_tstamp_tx(skb, &shhwtstamps);
2246a7312d58SClaudiu Manoil 			gfar_clear_txbd_status(bdp);
2247ec21e2ecSJeff Kirsher 			bdp = next;
2248ec21e2ecSJeff Kirsher 		}
2249ec21e2ecSJeff Kirsher 
2250a7312d58SClaudiu Manoil 		gfar_clear_txbd_status(bdp);
2251ec21e2ecSJeff Kirsher 		bdp = next_txbd(bdp, base, tx_ring_size);
2252ec21e2ecSJeff Kirsher 
2253ec21e2ecSJeff Kirsher 		for (i = 0; i < frags; i++) {
2254a7312d58SClaudiu Manoil 			dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2255a7312d58SClaudiu Manoil 				       be16_to_cpu(bdp->length),
2256a7312d58SClaudiu Manoil 				       DMA_TO_DEVICE);
2257a7312d58SClaudiu Manoil 			gfar_clear_txbd_status(bdp);
2258ec21e2ecSJeff Kirsher 			bdp = next_txbd(bdp, base, tx_ring_size);
2259ec21e2ecSJeff Kirsher 		}
2260ec21e2ecSJeff Kirsher 
226150ad076bSClaudiu Manoil 		bytes_sent += GFAR_CB(skb)->bytes_sent;
2262d8a0f1b0SPaul Gortmaker 
2263ec21e2ecSJeff Kirsher 		dev_kfree_skb_any(skb);
2264ec21e2ecSJeff Kirsher 
2265ec21e2ecSJeff Kirsher 		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2266ec21e2ecSJeff Kirsher 
2267ec21e2ecSJeff Kirsher 		skb_dirtytx = (skb_dirtytx + 1) &
2268ec21e2ecSJeff Kirsher 			      TX_RING_MOD_MASK(tx_ring_size);
2269ec21e2ecSJeff Kirsher 
2270ec21e2ecSJeff Kirsher 		howmany++;
2271bc602280SClaudiu Manoil 		spin_lock(&tx_queue->txlock);
2272ec21e2ecSJeff Kirsher 		tx_queue->num_txbdfree += nr_txbds;
2273bc602280SClaudiu Manoil 		spin_unlock(&tx_queue->txlock);
2274ec21e2ecSJeff Kirsher 	}
2275ec21e2ecSJeff Kirsher 
2276ec21e2ecSJeff Kirsher 	/* If we freed a buffer, we can restart transmission, if necessary */
22770851133bSClaudiu Manoil 	if (tx_queue->num_txbdfree &&
22780851133bSClaudiu Manoil 	    netif_tx_queue_stopped(txq) &&
22790851133bSClaudiu Manoil 	    !(test_bit(GFAR_DOWN, &priv->state)))
22800851133bSClaudiu Manoil 		netif_wake_subqueue(priv->ndev, tqi);
2281ec21e2ecSJeff Kirsher 
2282ec21e2ecSJeff Kirsher 	/* Update dirty indicators */
2283ec21e2ecSJeff Kirsher 	tx_queue->skb_dirtytx = skb_dirtytx;
2284ec21e2ecSJeff Kirsher 	tx_queue->dirty_tx = bdp;
2285ec21e2ecSJeff Kirsher 
2286d8a0f1b0SPaul Gortmaker 	netdev_tx_completed_queue(txq, howmany, bytes_sent);
2287ec21e2ecSJeff Kirsher }
2288ec21e2ecSJeff Kirsher 
2289f23223f1SClaudiu Manoil static void count_errors(u32 lstatus, struct net_device *ndev)
2290ec21e2ecSJeff Kirsher {
2291f23223f1SClaudiu Manoil 	struct gfar_private *priv = netdev_priv(ndev);
2292f23223f1SClaudiu Manoil 	struct net_device_stats *stats = &ndev->stats;
2293ec21e2ecSJeff Kirsher 	struct gfar_extra_stats *estats = &priv->extra_stats;
2294ec21e2ecSJeff Kirsher 
22950977f817SJan Ceuleers 	/* If the packet was truncated, none of the other errors matter */
2296f966082eSClaudiu Manoil 	if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
2297ec21e2ecSJeff Kirsher 		stats->rx_length_errors++;
2298ec21e2ecSJeff Kirsher 
2299212079dfSPaul Gortmaker 		atomic64_inc(&estats->rx_trunc);
2300ec21e2ecSJeff Kirsher 
2301ec21e2ecSJeff Kirsher 		return;
2302ec21e2ecSJeff Kirsher 	}
2303ec21e2ecSJeff Kirsher 	/* Count the errors, if there were any */
2304f966082eSClaudiu Manoil 	if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
2305ec21e2ecSJeff Kirsher 		stats->rx_length_errors++;
2306ec21e2ecSJeff Kirsher 
2307f966082eSClaudiu Manoil 		if (lstatus & BD_LFLAG(RXBD_LARGE))
2308212079dfSPaul Gortmaker 			atomic64_inc(&estats->rx_large);
2309ec21e2ecSJeff Kirsher 		else
2310212079dfSPaul Gortmaker 			atomic64_inc(&estats->rx_short);
2311ec21e2ecSJeff Kirsher 	}
2312f966082eSClaudiu Manoil 	if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
2313ec21e2ecSJeff Kirsher 		stats->rx_frame_errors++;
2314212079dfSPaul Gortmaker 		atomic64_inc(&estats->rx_nonoctet);
2315ec21e2ecSJeff Kirsher 	}
2316f966082eSClaudiu Manoil 	if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
2317212079dfSPaul Gortmaker 		atomic64_inc(&estats->rx_crcerr);
2318ec21e2ecSJeff Kirsher 		stats->rx_crc_errors++;
2319ec21e2ecSJeff Kirsher 	}
2320f966082eSClaudiu Manoil 	if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
2321212079dfSPaul Gortmaker 		atomic64_inc(&estats->rx_overrun);
2322f966082eSClaudiu Manoil 		stats->rx_over_errors++;
2323ec21e2ecSJeff Kirsher 	}
2324ec21e2ecSJeff Kirsher }
2325ec21e2ecSJeff Kirsher 
23267ad38784SArseny Solokha static irqreturn_t gfar_receive(int irq, void *grp_id)
2327ec21e2ecSJeff Kirsher {
2328aeb12c5eSClaudiu Manoil 	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2329aeb12c5eSClaudiu Manoil 	unsigned long flags;
23303e905b80SClaudiu Manoil 	u32 imask, ievent;
23313e905b80SClaudiu Manoil 
23323e905b80SClaudiu Manoil 	ievent = gfar_read(&grp->regs->ievent);
23333e905b80SClaudiu Manoil 
23343e905b80SClaudiu Manoil 	if (unlikely(ievent & IEVENT_FGPI)) {
23353e905b80SClaudiu Manoil 		gfar_write(&grp->regs->ievent, IEVENT_FGPI);
23363e905b80SClaudiu Manoil 		return IRQ_HANDLED;
23373e905b80SClaudiu Manoil 	}
2338aeb12c5eSClaudiu Manoil 
2339aeb12c5eSClaudiu Manoil 	if (likely(napi_schedule_prep(&grp->napi_rx))) {
2340aeb12c5eSClaudiu Manoil 		spin_lock_irqsave(&grp->grplock, flags);
2341aeb12c5eSClaudiu Manoil 		imask = gfar_read(&grp->regs->imask);
2342aeb12c5eSClaudiu Manoil 		imask &= IMASK_RX_DISABLED;
2343aeb12c5eSClaudiu Manoil 		gfar_write(&grp->regs->imask, imask);
2344aeb12c5eSClaudiu Manoil 		spin_unlock_irqrestore(&grp->grplock, flags);
2345aeb12c5eSClaudiu Manoil 		__napi_schedule(&grp->napi_rx);
2346aeb12c5eSClaudiu Manoil 	} else {
2347aeb12c5eSClaudiu Manoil 		/* Clear IEVENT, so interrupts aren't called again
2348aeb12c5eSClaudiu Manoil 		 * because of the packets that have already arrived.
2349aeb12c5eSClaudiu Manoil 		 */
2350aeb12c5eSClaudiu Manoil 		gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2351aeb12c5eSClaudiu Manoil 	}
2352aeb12c5eSClaudiu Manoil 
2353aeb12c5eSClaudiu Manoil 	return IRQ_HANDLED;
2354aeb12c5eSClaudiu Manoil }
2355aeb12c5eSClaudiu Manoil 
2356aeb12c5eSClaudiu Manoil /* Interrupt Handler for Transmit complete */
2357aeb12c5eSClaudiu Manoil static irqreturn_t gfar_transmit(int irq, void *grp_id)
2358aeb12c5eSClaudiu Manoil {
2359aeb12c5eSClaudiu Manoil 	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2360aeb12c5eSClaudiu Manoil 	unsigned long flags;
2361aeb12c5eSClaudiu Manoil 	u32 imask;
2362aeb12c5eSClaudiu Manoil 
2363aeb12c5eSClaudiu Manoil 	if (likely(napi_schedule_prep(&grp->napi_tx))) {
2364aeb12c5eSClaudiu Manoil 		spin_lock_irqsave(&grp->grplock, flags);
2365aeb12c5eSClaudiu Manoil 		imask = gfar_read(&grp->regs->imask);
2366aeb12c5eSClaudiu Manoil 		imask &= IMASK_TX_DISABLED;
2367aeb12c5eSClaudiu Manoil 		gfar_write(&grp->regs->imask, imask);
2368aeb12c5eSClaudiu Manoil 		spin_unlock_irqrestore(&grp->grplock, flags);
2369aeb12c5eSClaudiu Manoil 		__napi_schedule(&grp->napi_tx);
2370aeb12c5eSClaudiu Manoil 	} else {
2371aeb12c5eSClaudiu Manoil 		/* Clear IEVENT, so interrupts aren't called again
2372aeb12c5eSClaudiu Manoil 		 * because of the packets that have already arrived.
2373aeb12c5eSClaudiu Manoil 		 */
2374aeb12c5eSClaudiu Manoil 		gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2375aeb12c5eSClaudiu Manoil 	}
2376aeb12c5eSClaudiu Manoil 
2377ec21e2ecSJeff Kirsher 	return IRQ_HANDLED;
2378ec21e2ecSJeff Kirsher }
2379ec21e2ecSJeff Kirsher 
238075354148SClaudiu Manoil static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
238175354148SClaudiu Manoil 			     struct sk_buff *skb, bool first)
238275354148SClaudiu Manoil {
2383202a0a70SAndy Spencer 	int size = lstatus & BD_LENGTH_MASK;
238475354148SClaudiu Manoil 	struct page *page = rxb->page;
238575354148SClaudiu Manoil 
23866c389fc9SZefir Kurtisi 	if (likely(first)) {
238775354148SClaudiu Manoil 		skb_put(skb, size);
23886c389fc9SZefir Kurtisi 	} else {
23896c389fc9SZefir Kurtisi 		/* the last fragments' length contains the full frame length */
2390d903ec77SAndy Spencer 		if (lstatus & BD_LFLAG(RXBD_LAST))
23916c389fc9SZefir Kurtisi 			size -= skb->len;
23926c389fc9SZefir Kurtisi 
2393*d8861babSMichael Braun 		WARN(size < 0, "gianfar: rx fragment size underflow");
2394*d8861babSMichael Braun 		if (size < 0)
2395*d8861babSMichael Braun 			return false;
2396*d8861babSMichael Braun 
239775354148SClaudiu Manoil 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
239875354148SClaudiu Manoil 				rxb->page_offset + RXBUF_ALIGNMENT,
239975354148SClaudiu Manoil 				size, GFAR_RXB_TRUESIZE);
24006c389fc9SZefir Kurtisi 	}
240175354148SClaudiu Manoil 
240275354148SClaudiu Manoil 	/* try reuse page */
240369fed99bSEric Dumazet 	if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
240475354148SClaudiu Manoil 		return false;
240575354148SClaudiu Manoil 
240675354148SClaudiu Manoil 	/* change offset to the other half */
240775354148SClaudiu Manoil 	rxb->page_offset ^= GFAR_RXB_TRUESIZE;
240875354148SClaudiu Manoil 
2409fe896d18SJoonsoo Kim 	page_ref_inc(page);
241075354148SClaudiu Manoil 
241175354148SClaudiu Manoil 	return true;
241275354148SClaudiu Manoil }
241375354148SClaudiu Manoil 
241475354148SClaudiu Manoil static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
241575354148SClaudiu Manoil 			       struct gfar_rx_buff *old_rxb)
241675354148SClaudiu Manoil {
241775354148SClaudiu Manoil 	struct gfar_rx_buff *new_rxb;
241875354148SClaudiu Manoil 	u16 nta = rxq->next_to_alloc;
241975354148SClaudiu Manoil 
242075354148SClaudiu Manoil 	new_rxb = &rxq->rx_buff[nta];
242175354148SClaudiu Manoil 
242275354148SClaudiu Manoil 	/* find next buf that can reuse a page */
242375354148SClaudiu Manoil 	nta++;
242475354148SClaudiu Manoil 	rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
242575354148SClaudiu Manoil 
242675354148SClaudiu Manoil 	/* copy page reference */
242775354148SClaudiu Manoil 	*new_rxb = *old_rxb;
242875354148SClaudiu Manoil 
242975354148SClaudiu Manoil 	/* sync for use by the device */
243075354148SClaudiu Manoil 	dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
243175354148SClaudiu Manoil 					 old_rxb->page_offset,
243275354148SClaudiu Manoil 					 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
243375354148SClaudiu Manoil }
243475354148SClaudiu Manoil 
243575354148SClaudiu Manoil static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
243675354148SClaudiu Manoil 					    u32 lstatus, struct sk_buff *skb)
243775354148SClaudiu Manoil {
243875354148SClaudiu Manoil 	struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
243975354148SClaudiu Manoil 	struct page *page = rxb->page;
244075354148SClaudiu Manoil 	bool first = false;
244175354148SClaudiu Manoil 
244275354148SClaudiu Manoil 	if (likely(!skb)) {
244375354148SClaudiu Manoil 		void *buff_addr = page_address(page) + rxb->page_offset;
244475354148SClaudiu Manoil 
244575354148SClaudiu Manoil 		skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
244675354148SClaudiu Manoil 		if (unlikely(!skb)) {
244775354148SClaudiu Manoil 			gfar_rx_alloc_err(rx_queue);
244875354148SClaudiu Manoil 			return NULL;
244975354148SClaudiu Manoil 		}
245075354148SClaudiu Manoil 		skb_reserve(skb, RXBUF_ALIGNMENT);
245175354148SClaudiu Manoil 		first = true;
245275354148SClaudiu Manoil 	}
245375354148SClaudiu Manoil 
245475354148SClaudiu Manoil 	dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
245575354148SClaudiu Manoil 				      GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
245675354148SClaudiu Manoil 
245775354148SClaudiu Manoil 	if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
245875354148SClaudiu Manoil 		/* reuse the free half of the page */
245975354148SClaudiu Manoil 		gfar_reuse_rx_page(rx_queue, rxb);
246075354148SClaudiu Manoil 	} else {
246175354148SClaudiu Manoil 		/* page cannot be reused, unmap it */
246275354148SClaudiu Manoil 		dma_unmap_page(rx_queue->dev, rxb->dma,
246375354148SClaudiu Manoil 			       PAGE_SIZE, DMA_FROM_DEVICE);
246475354148SClaudiu Manoil 	}
246575354148SClaudiu Manoil 
246675354148SClaudiu Manoil 	/* clear rxb content */
246775354148SClaudiu Manoil 	rxb->page = NULL;
246875354148SClaudiu Manoil 
246975354148SClaudiu Manoil 	return skb;
247075354148SClaudiu Manoil }
247175354148SClaudiu Manoil 
2472ec21e2ecSJeff Kirsher static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2473ec21e2ecSJeff Kirsher {
2474ec21e2ecSJeff Kirsher 	/* If valid headers were found, and valid sums
2475ec21e2ecSJeff Kirsher 	 * were verified, then we tell the kernel that no
24760977f817SJan Ceuleers 	 * checksumming is necessary.  Otherwise, it is [FIXME]
24770977f817SJan Ceuleers 	 */
247826eb9374SClaudiu Manoil 	if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
247926eb9374SClaudiu Manoil 	    (RXFCB_CIP | RXFCB_CTU))
2480ec21e2ecSJeff Kirsher 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2481ec21e2ecSJeff Kirsher 	else
2482ec21e2ecSJeff Kirsher 		skb_checksum_none_assert(skb);
2483ec21e2ecSJeff Kirsher }
2484ec21e2ecSJeff Kirsher 
24850977f817SJan Ceuleers /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2486f23223f1SClaudiu Manoil static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
2487ec21e2ecSJeff Kirsher {
2488f23223f1SClaudiu Manoil 	struct gfar_private *priv = netdev_priv(ndev);
2489ec21e2ecSJeff Kirsher 	struct rxfcb *fcb = NULL;
2490ec21e2ecSJeff Kirsher 
2491ec21e2ecSJeff Kirsher 	/* fcb is at the beginning if exists */
2492ec21e2ecSJeff Kirsher 	fcb = (struct rxfcb *)skb->data;
2493ec21e2ecSJeff Kirsher 
24940977f817SJan Ceuleers 	/* Remove the FCB from the skb
24950977f817SJan Ceuleers 	 * Remove the padded bytes, if there are any
24960977f817SJan Ceuleers 	 */
2497f23223f1SClaudiu Manoil 	if (priv->uses_rxfcb)
249876f31e8bSClaudiu Manoil 		skb_pull(skb, GMAC_FCB_LEN);
2499ec21e2ecSJeff Kirsher 
2500ec21e2ecSJeff Kirsher 	/* Get receive timestamp from the skb */
2501ec21e2ecSJeff Kirsher 	if (priv->hwts_rx_en) {
2502ec21e2ecSJeff Kirsher 		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2503ec21e2ecSJeff Kirsher 		u64 *ns = (u64 *) skb->data;
2504bc4598bcSJan Ceuleers 
2505ec21e2ecSJeff Kirsher 		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2506f54af12fSYangbo Lu 		shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2507ec21e2ecSJeff Kirsher 	}
2508ec21e2ecSJeff Kirsher 
2509ec21e2ecSJeff Kirsher 	if (priv->padding)
2510ec21e2ecSJeff Kirsher 		skb_pull(skb, priv->padding);
2511ec21e2ecSJeff Kirsher 
2512d903ec77SAndy Spencer 	/* Trim off the FCS */
2513d903ec77SAndy Spencer 	pskb_trim(skb, skb->len - ETH_FCS_LEN);
2514d903ec77SAndy Spencer 
2515f23223f1SClaudiu Manoil 	if (ndev->features & NETIF_F_RXCSUM)
2516ec21e2ecSJeff Kirsher 		gfar_rx_checksum(skb, fcb);
2517ec21e2ecSJeff Kirsher 
2518f646968fSPatrick McHardy 	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2519823dcd25SDavid S. Miller 	 * Even if vlan rx accel is disabled, on some chips
2520823dcd25SDavid S. Miller 	 * RXFCB_VLN is pseudo randomly set.
2521823dcd25SDavid S. Miller 	 */
2522f23223f1SClaudiu Manoil 	if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
252326eb9374SClaudiu Manoil 	    be16_to_cpu(fcb->flags) & RXFCB_VLN)
252426eb9374SClaudiu Manoil 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
252526eb9374SClaudiu Manoil 				       be16_to_cpu(fcb->vlctl));
2526ec21e2ecSJeff Kirsher }
2527ec21e2ecSJeff Kirsher 
2528ec21e2ecSJeff Kirsher /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2529ec21e2ecSJeff Kirsher  * until the budget/quota has been reached. Returns the number
2530ec21e2ecSJeff Kirsher  * of frames handled
2531ec21e2ecSJeff Kirsher  */
25327ad38784SArseny Solokha static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
25337ad38784SArseny Solokha 			      int rx_work_limit)
2534ec21e2ecSJeff Kirsher {
2535f23223f1SClaudiu Manoil 	struct net_device *ndev = rx_queue->ndev;
2536f23223f1SClaudiu Manoil 	struct gfar_private *priv = netdev_priv(ndev);
253775354148SClaudiu Manoil 	struct rxbd8 *bdp;
253875354148SClaudiu Manoil 	int i, howmany = 0;
253975354148SClaudiu Manoil 	struct sk_buff *skb = rx_queue->skb;
254075354148SClaudiu Manoil 	int cleaned_cnt = gfar_rxbd_unused(rx_queue);
254175354148SClaudiu Manoil 	unsigned int total_bytes = 0, total_pkts = 0;
2542ec21e2ecSJeff Kirsher 
2543ec21e2ecSJeff Kirsher 	/* Get the first full descriptor */
254476f31e8bSClaudiu Manoil 	i = rx_queue->next_to_clean;
2545ec21e2ecSJeff Kirsher 
254676f31e8bSClaudiu Manoil 	while (rx_work_limit--) {
2547f966082eSClaudiu Manoil 		u32 lstatus;
2548ec21e2ecSJeff Kirsher 
254976f31e8bSClaudiu Manoil 		if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
255076f31e8bSClaudiu Manoil 			gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
255176f31e8bSClaudiu Manoil 			cleaned_cnt = 0;
255276f31e8bSClaudiu Manoil 		}
2553bc4598bcSJan Ceuleers 
255476f31e8bSClaudiu Manoil 		bdp = &rx_queue->rx_bd_base[i];
2555f966082eSClaudiu Manoil 		lstatus = be32_to_cpu(bdp->lstatus);
2556f966082eSClaudiu Manoil 		if (lstatus & BD_LFLAG(RXBD_EMPTY))
255776f31e8bSClaudiu Manoil 			break;
255876f31e8bSClaudiu Manoil 
2559*d8861babSMichael Braun 		/* lost RXBD_LAST descriptor due to overrun */
2560*d8861babSMichael Braun 		if (skb &&
2561*d8861babSMichael Braun 		    (lstatus & BD_LFLAG(RXBD_FIRST))) {
2562*d8861babSMichael Braun 			/* discard faulty buffer */
2563*d8861babSMichael Braun 			dev_kfree_skb(skb);
2564*d8861babSMichael Braun 			skb = NULL;
2565*d8861babSMichael Braun 			rx_queue->stats.rx_dropped++;
2566*d8861babSMichael Braun 
2567*d8861babSMichael Braun 			/* can continue normally */
2568*d8861babSMichael Braun 		}
2569*d8861babSMichael Braun 
257076f31e8bSClaudiu Manoil 		/* order rx buffer descriptor reads */
2571ec21e2ecSJeff Kirsher 		rmb();
2572ec21e2ecSJeff Kirsher 
257376f31e8bSClaudiu Manoil 		/* fetch next to clean buffer from the ring */
257475354148SClaudiu Manoil 		skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
257575354148SClaudiu Manoil 		if (unlikely(!skb))
257675354148SClaudiu Manoil 			break;
2577ec21e2ecSJeff Kirsher 
257875354148SClaudiu Manoil 		cleaned_cnt++;
257975354148SClaudiu Manoil 		howmany++;
2580ec21e2ecSJeff Kirsher 
258175354148SClaudiu Manoil 		if (unlikely(++i == rx_queue->rx_ring_size))
258275354148SClaudiu Manoil 			i = 0;
2583ec21e2ecSJeff Kirsher 
258475354148SClaudiu Manoil 		rx_queue->next_to_clean = i;
258575354148SClaudiu Manoil 
258675354148SClaudiu Manoil 		/* fetch next buffer if not the last in frame */
258775354148SClaudiu Manoil 		if (!(lstatus & BD_LFLAG(RXBD_LAST)))
258875354148SClaudiu Manoil 			continue;
258975354148SClaudiu Manoil 
259075354148SClaudiu Manoil 		if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
2591f23223f1SClaudiu Manoil 			count_errors(lstatus, ndev);
2592ec21e2ecSJeff Kirsher 
259376f31e8bSClaudiu Manoil 			/* discard faulty buffer */
2594acb600deSEric Dumazet 			dev_kfree_skb(skb);
259575354148SClaudiu Manoil 			skb = NULL;
259675354148SClaudiu Manoil 			rx_queue->stats.rx_dropped++;
259775354148SClaudiu Manoil 			continue;
259875354148SClaudiu Manoil 		}
259976f31e8bSClaudiu Manoil 
2600590399ddSClaudiu Manoil 		gfar_process_frame(ndev, skb);
2601590399ddSClaudiu Manoil 
2602ec21e2ecSJeff Kirsher 		/* Increment the number of packets */
260375354148SClaudiu Manoil 		total_pkts++;
260475354148SClaudiu Manoil 		total_bytes += skb->len;
2605ec21e2ecSJeff Kirsher 
2606ec21e2ecSJeff Kirsher 		skb_record_rx_queue(skb, rx_queue->qindex);
260775354148SClaudiu Manoil 
2608590399ddSClaudiu Manoil 		skb->protocol = eth_type_trans(skb, ndev);
2609f23223f1SClaudiu Manoil 
2610f23223f1SClaudiu Manoil 		/* Send the packet up the stack */
2611f23223f1SClaudiu Manoil 		napi_gro_receive(&rx_queue->grp->napi_rx, skb);
2612ec21e2ecSJeff Kirsher 
261375354148SClaudiu Manoil 		skb = NULL;
2614ec21e2ecSJeff Kirsher 	}
2615ec21e2ecSJeff Kirsher 
261675354148SClaudiu Manoil 	/* Store incomplete frames for completion */
261775354148SClaudiu Manoil 	rx_queue->skb = skb;
2618ec21e2ecSJeff Kirsher 
261975354148SClaudiu Manoil 	rx_queue->stats.rx_packets += total_pkts;
262075354148SClaudiu Manoil 	rx_queue->stats.rx_bytes += total_bytes;
262176f31e8bSClaudiu Manoil 
262276f31e8bSClaudiu Manoil 	if (cleaned_cnt)
262376f31e8bSClaudiu Manoil 		gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
262476f31e8bSClaudiu Manoil 
262576f31e8bSClaudiu Manoil 	/* Update Last Free RxBD pointer for LFC */
262676f31e8bSClaudiu Manoil 	if (unlikely(priv->tx_actual_en)) {
2627b4b67f26SScott Wood 		u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
2628b4b67f26SScott Wood 
2629b4b67f26SScott Wood 		gfar_write(rx_queue->rfbptr, bdp_dma);
263076f31e8bSClaudiu Manoil 	}
2631ec21e2ecSJeff Kirsher 
2632ec21e2ecSJeff Kirsher 	return howmany;
2633ec21e2ecSJeff Kirsher }
2634ec21e2ecSJeff Kirsher 
2635aeb12c5eSClaudiu Manoil static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
26365eaedf31SClaudiu Manoil {
26375eaedf31SClaudiu Manoil 	struct gfar_priv_grp *gfargrp =
2638aeb12c5eSClaudiu Manoil 		container_of(napi, struct gfar_priv_grp, napi_rx);
26395eaedf31SClaudiu Manoil 	struct gfar __iomem *regs = gfargrp->regs;
264071ff9e3dSClaudiu Manoil 	struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
26415eaedf31SClaudiu Manoil 	int work_done = 0;
26425eaedf31SClaudiu Manoil 
26435eaedf31SClaudiu Manoil 	/* Clear IEVENT, so interrupts aren't called again
26445eaedf31SClaudiu Manoil 	 * because of the packets that have already arrived
26455eaedf31SClaudiu Manoil 	 */
2646aeb12c5eSClaudiu Manoil 	gfar_write(&regs->ievent, IEVENT_RX_MASK);
26475eaedf31SClaudiu Manoil 
26485eaedf31SClaudiu Manoil 	work_done = gfar_clean_rx_ring(rx_queue, budget);
26495eaedf31SClaudiu Manoil 
26505eaedf31SClaudiu Manoil 	if (work_done < budget) {
2651aeb12c5eSClaudiu Manoil 		u32 imask;
26526ad20165SEric Dumazet 		napi_complete_done(napi, work_done);
26535eaedf31SClaudiu Manoil 		/* Clear the halt bit in RSTAT */
26545eaedf31SClaudiu Manoil 		gfar_write(&regs->rstat, gfargrp->rstat);
26555eaedf31SClaudiu Manoil 
2656aeb12c5eSClaudiu Manoil 		spin_lock_irq(&gfargrp->grplock);
2657aeb12c5eSClaudiu Manoil 		imask = gfar_read(&regs->imask);
2658aeb12c5eSClaudiu Manoil 		imask |= IMASK_RX_DEFAULT;
2659aeb12c5eSClaudiu Manoil 		gfar_write(&regs->imask, imask);
2660aeb12c5eSClaudiu Manoil 		spin_unlock_irq(&gfargrp->grplock);
26615eaedf31SClaudiu Manoil 	}
26625eaedf31SClaudiu Manoil 
26635eaedf31SClaudiu Manoil 	return work_done;
26645eaedf31SClaudiu Manoil }
26655eaedf31SClaudiu Manoil 
2666aeb12c5eSClaudiu Manoil static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2667ec21e2ecSJeff Kirsher {
2668bc4598bcSJan Ceuleers 	struct gfar_priv_grp *gfargrp =
2669aeb12c5eSClaudiu Manoil 		container_of(napi, struct gfar_priv_grp, napi_tx);
2670aeb12c5eSClaudiu Manoil 	struct gfar __iomem *regs = gfargrp->regs;
267171ff9e3dSClaudiu Manoil 	struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2672aeb12c5eSClaudiu Manoil 	u32 imask;
2673aeb12c5eSClaudiu Manoil 
2674aeb12c5eSClaudiu Manoil 	/* Clear IEVENT, so interrupts aren't called again
2675aeb12c5eSClaudiu Manoil 	 * because of the packets that have already arrived
2676aeb12c5eSClaudiu Manoil 	 */
2677aeb12c5eSClaudiu Manoil 	gfar_write(&regs->ievent, IEVENT_TX_MASK);
2678aeb12c5eSClaudiu Manoil 
2679aeb12c5eSClaudiu Manoil 	/* run Tx cleanup to completion */
2680aeb12c5eSClaudiu Manoil 	if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2681aeb12c5eSClaudiu Manoil 		gfar_clean_tx_ring(tx_queue);
2682aeb12c5eSClaudiu Manoil 
2683aeb12c5eSClaudiu Manoil 	napi_complete(napi);
2684aeb12c5eSClaudiu Manoil 
2685aeb12c5eSClaudiu Manoil 	spin_lock_irq(&gfargrp->grplock);
2686aeb12c5eSClaudiu Manoil 	imask = gfar_read(&regs->imask);
2687aeb12c5eSClaudiu Manoil 	imask |= IMASK_TX_DEFAULT;
2688aeb12c5eSClaudiu Manoil 	gfar_write(&regs->imask, imask);
2689aeb12c5eSClaudiu Manoil 	spin_unlock_irq(&gfargrp->grplock);
2690aeb12c5eSClaudiu Manoil 
2691aeb12c5eSClaudiu Manoil 	return 0;
2692aeb12c5eSClaudiu Manoil }
2693aeb12c5eSClaudiu Manoil 
2694aeb12c5eSClaudiu Manoil static int gfar_poll_rx(struct napi_struct *napi, int budget)
2695aeb12c5eSClaudiu Manoil {
2696aeb12c5eSClaudiu Manoil 	struct gfar_priv_grp *gfargrp =
2697aeb12c5eSClaudiu Manoil 		container_of(napi, struct gfar_priv_grp, napi_rx);
2698ec21e2ecSJeff Kirsher 	struct gfar_private *priv = gfargrp->priv;
2699ec21e2ecSJeff Kirsher 	struct gfar __iomem *regs = gfargrp->regs;
2700ec21e2ecSJeff Kirsher 	struct gfar_priv_rx_q *rx_queue = NULL;
2701c233cf40SClaudiu Manoil 	int work_done = 0, work_done_per_q = 0;
270239c0a0d5SClaudiu Manoil 	int i, budget_per_q = 0;
27036be5ed3fSClaudiu Manoil 	unsigned long rstat_rxf;
27046be5ed3fSClaudiu Manoil 	int num_act_queues;
2705ec21e2ecSJeff Kirsher 
2706ec21e2ecSJeff Kirsher 	/* Clear IEVENT, so interrupts aren't called again
27070977f817SJan Ceuleers 	 * because of the packets that have already arrived
27080977f817SJan Ceuleers 	 */
2709aeb12c5eSClaudiu Manoil 	gfar_write(&regs->ievent, IEVENT_RX_MASK);
2710ec21e2ecSJeff Kirsher 
27116be5ed3fSClaudiu Manoil 	rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
27126be5ed3fSClaudiu Manoil 
27136be5ed3fSClaudiu Manoil 	num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
27146be5ed3fSClaudiu Manoil 	if (num_act_queues)
27156be5ed3fSClaudiu Manoil 		budget_per_q = budget/num_act_queues;
27166be5ed3fSClaudiu Manoil 
2717ec21e2ecSJeff Kirsher 	for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
27186be5ed3fSClaudiu Manoil 		/* skip queue if not active */
27196be5ed3fSClaudiu Manoil 		if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
2720ec21e2ecSJeff Kirsher 			continue;
2721ec21e2ecSJeff Kirsher 
2722c233cf40SClaudiu Manoil 		rx_queue = priv->rx_queue[i];
2723c233cf40SClaudiu Manoil 		work_done_per_q =
2724c233cf40SClaudiu Manoil 			gfar_clean_rx_ring(rx_queue, budget_per_q);
2725c233cf40SClaudiu Manoil 		work_done += work_done_per_q;
2726c233cf40SClaudiu Manoil 
2727c233cf40SClaudiu Manoil 		/* finished processing this queue */
2728c233cf40SClaudiu Manoil 		if (work_done_per_q < budget_per_q) {
27296be5ed3fSClaudiu Manoil 			/* clear active queue hw indication */
27306be5ed3fSClaudiu Manoil 			gfar_write(&regs->rstat,
27316be5ed3fSClaudiu Manoil 				   RSTAT_CLEAR_RXF0 >> i);
27326be5ed3fSClaudiu Manoil 			num_act_queues--;
27336be5ed3fSClaudiu Manoil 
27346be5ed3fSClaudiu Manoil 			if (!num_act_queues)
2735c233cf40SClaudiu Manoil 				break;
2736ec21e2ecSJeff Kirsher 		}
2737ec21e2ecSJeff Kirsher 	}
2738ec21e2ecSJeff Kirsher 
2739aeb12c5eSClaudiu Manoil 	if (!num_act_queues) {
2740aeb12c5eSClaudiu Manoil 		u32 imask;
27416ad20165SEric Dumazet 		napi_complete_done(napi, work_done);
2742ec21e2ecSJeff Kirsher 
2743ec21e2ecSJeff Kirsher 		/* Clear the halt bit in RSTAT */
2744ec21e2ecSJeff Kirsher 		gfar_write(&regs->rstat, gfargrp->rstat);
2745ec21e2ecSJeff Kirsher 
2746aeb12c5eSClaudiu Manoil 		spin_lock_irq(&gfargrp->grplock);
2747aeb12c5eSClaudiu Manoil 		imask = gfar_read(&regs->imask);
2748aeb12c5eSClaudiu Manoil 		imask |= IMASK_RX_DEFAULT;
2749aeb12c5eSClaudiu Manoil 		gfar_write(&regs->imask, imask);
2750aeb12c5eSClaudiu Manoil 		spin_unlock_irq(&gfargrp->grplock);
2751ec21e2ecSJeff Kirsher 	}
2752ec21e2ecSJeff Kirsher 
2753c233cf40SClaudiu Manoil 	return work_done;
2754ec21e2ecSJeff Kirsher }
2755ec21e2ecSJeff Kirsher 
2756aeb12c5eSClaudiu Manoil static int gfar_poll_tx(struct napi_struct *napi, int budget)
2757aeb12c5eSClaudiu Manoil {
2758aeb12c5eSClaudiu Manoil 	struct gfar_priv_grp *gfargrp =
2759aeb12c5eSClaudiu Manoil 		container_of(napi, struct gfar_priv_grp, napi_tx);
2760aeb12c5eSClaudiu Manoil 	struct gfar_private *priv = gfargrp->priv;
2761aeb12c5eSClaudiu Manoil 	struct gfar __iomem *regs = gfargrp->regs;
2762aeb12c5eSClaudiu Manoil 	struct gfar_priv_tx_q *tx_queue = NULL;
2763aeb12c5eSClaudiu Manoil 	int has_tx_work = 0;
2764aeb12c5eSClaudiu Manoil 	int i;
2765aeb12c5eSClaudiu Manoil 
2766aeb12c5eSClaudiu Manoil 	/* Clear IEVENT, so interrupts aren't called again
2767aeb12c5eSClaudiu Manoil 	 * because of the packets that have already arrived
2768aeb12c5eSClaudiu Manoil 	 */
2769aeb12c5eSClaudiu Manoil 	gfar_write(&regs->ievent, IEVENT_TX_MASK);
2770aeb12c5eSClaudiu Manoil 
2771aeb12c5eSClaudiu Manoil 	for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
2772aeb12c5eSClaudiu Manoil 		tx_queue = priv->tx_queue[i];
2773aeb12c5eSClaudiu Manoil 		/* run Tx cleanup to completion */
2774aeb12c5eSClaudiu Manoil 		if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
2775aeb12c5eSClaudiu Manoil 			gfar_clean_tx_ring(tx_queue);
2776aeb12c5eSClaudiu Manoil 			has_tx_work = 1;
2777aeb12c5eSClaudiu Manoil 		}
2778aeb12c5eSClaudiu Manoil 	}
2779aeb12c5eSClaudiu Manoil 
2780aeb12c5eSClaudiu Manoil 	if (!has_tx_work) {
2781aeb12c5eSClaudiu Manoil 		u32 imask;
2782aeb12c5eSClaudiu Manoil 		napi_complete(napi);
2783aeb12c5eSClaudiu Manoil 
2784aeb12c5eSClaudiu Manoil 		spin_lock_irq(&gfargrp->grplock);
2785aeb12c5eSClaudiu Manoil 		imask = gfar_read(&regs->imask);
2786aeb12c5eSClaudiu Manoil 		imask |= IMASK_TX_DEFAULT;
2787aeb12c5eSClaudiu Manoil 		gfar_write(&regs->imask, imask);
2788aeb12c5eSClaudiu Manoil 		spin_unlock_irq(&gfargrp->grplock);
2789aeb12c5eSClaudiu Manoil 	}
2790aeb12c5eSClaudiu Manoil 
2791aeb12c5eSClaudiu Manoil 	return 0;
2792aeb12c5eSClaudiu Manoil }
2793aeb12c5eSClaudiu Manoil 
27947d993c5fSArseny Solokha /* GFAR error interrupt handler */
27957d993c5fSArseny Solokha static irqreturn_t gfar_error(int irq, void *grp_id)
27967d993c5fSArseny Solokha {
27977d993c5fSArseny Solokha 	struct gfar_priv_grp *gfargrp = grp_id;
27987d993c5fSArseny Solokha 	struct gfar __iomem *regs = gfargrp->regs;
27997d993c5fSArseny Solokha 	struct gfar_private *priv= gfargrp->priv;
28007d993c5fSArseny Solokha 	struct net_device *dev = priv->ndev;
28017d993c5fSArseny Solokha 
28027d993c5fSArseny Solokha 	/* Save ievent for future reference */
28037d993c5fSArseny Solokha 	u32 events = gfar_read(&regs->ievent);
28047d993c5fSArseny Solokha 
28057d993c5fSArseny Solokha 	/* Clear IEVENT */
28067d993c5fSArseny Solokha 	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
28077d993c5fSArseny Solokha 
28087d993c5fSArseny Solokha 	/* Magic Packet is not an error. */
28097d993c5fSArseny Solokha 	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
28107d993c5fSArseny Solokha 	    (events & IEVENT_MAG))
28117d993c5fSArseny Solokha 		events &= ~IEVENT_MAG;
28127d993c5fSArseny Solokha 
28137d993c5fSArseny Solokha 	/* Hmm... */
28147d993c5fSArseny Solokha 	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
28157d993c5fSArseny Solokha 		netdev_dbg(dev,
28167d993c5fSArseny Solokha 			   "error interrupt (ievent=0x%08x imask=0x%08x)\n",
28177d993c5fSArseny Solokha 			   events, gfar_read(&regs->imask));
28187d993c5fSArseny Solokha 
28197d993c5fSArseny Solokha 	/* Update the error counters */
28207d993c5fSArseny Solokha 	if (events & IEVENT_TXE) {
28217d993c5fSArseny Solokha 		dev->stats.tx_errors++;
28227d993c5fSArseny Solokha 
28237d993c5fSArseny Solokha 		if (events & IEVENT_LC)
28247d993c5fSArseny Solokha 			dev->stats.tx_window_errors++;
28257d993c5fSArseny Solokha 		if (events & IEVENT_CRL)
28267d993c5fSArseny Solokha 			dev->stats.tx_aborted_errors++;
28277d993c5fSArseny Solokha 		if (events & IEVENT_XFUN) {
28287d993c5fSArseny Solokha 			netif_dbg(priv, tx_err, dev,
28297d993c5fSArseny Solokha 				  "TX FIFO underrun, packet dropped\n");
28307d993c5fSArseny Solokha 			dev->stats.tx_dropped++;
28317d993c5fSArseny Solokha 			atomic64_inc(&priv->extra_stats.tx_underrun);
28327d993c5fSArseny Solokha 
28337d993c5fSArseny Solokha 			schedule_work(&priv->reset_task);
28347d993c5fSArseny Solokha 		}
28357d993c5fSArseny Solokha 		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
28367d993c5fSArseny Solokha 	}
28377d993c5fSArseny Solokha 	if (events & IEVENT_BSY) {
28387d993c5fSArseny Solokha 		dev->stats.rx_over_errors++;
28397d993c5fSArseny Solokha 		atomic64_inc(&priv->extra_stats.rx_bsy);
28407d993c5fSArseny Solokha 
28417d993c5fSArseny Solokha 		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
28427d993c5fSArseny Solokha 			  gfar_read(&regs->rstat));
28437d993c5fSArseny Solokha 	}
28447d993c5fSArseny Solokha 	if (events & IEVENT_BABR) {
28457d993c5fSArseny Solokha 		dev->stats.rx_errors++;
28467d993c5fSArseny Solokha 		atomic64_inc(&priv->extra_stats.rx_babr);
28477d993c5fSArseny Solokha 
28487d993c5fSArseny Solokha 		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
28497d993c5fSArseny Solokha 	}
28507d993c5fSArseny Solokha 	if (events & IEVENT_EBERR) {
28517d993c5fSArseny Solokha 		atomic64_inc(&priv->extra_stats.eberr);
28527d993c5fSArseny Solokha 		netif_dbg(priv, rx_err, dev, "bus error\n");
28537d993c5fSArseny Solokha 	}
28547d993c5fSArseny Solokha 	if (events & IEVENT_RXC)
28557d993c5fSArseny Solokha 		netif_dbg(priv, rx_status, dev, "control frame\n");
28567d993c5fSArseny Solokha 
28577d993c5fSArseny Solokha 	if (events & IEVENT_BABT) {
28587d993c5fSArseny Solokha 		atomic64_inc(&priv->extra_stats.tx_babt);
28597d993c5fSArseny Solokha 		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
28607d993c5fSArseny Solokha 	}
28617d993c5fSArseny Solokha 	return IRQ_HANDLED;
28627d993c5fSArseny Solokha }
28637d993c5fSArseny Solokha 
28647d993c5fSArseny Solokha /* The interrupt handler for devices with one interrupt */
28657d993c5fSArseny Solokha static irqreturn_t gfar_interrupt(int irq, void *grp_id)
28667d993c5fSArseny Solokha {
28677d993c5fSArseny Solokha 	struct gfar_priv_grp *gfargrp = grp_id;
28687d993c5fSArseny Solokha 
28697d993c5fSArseny Solokha 	/* Save ievent for future reference */
28707d993c5fSArseny Solokha 	u32 events = gfar_read(&gfargrp->regs->ievent);
28717d993c5fSArseny Solokha 
28727d993c5fSArseny Solokha 	/* Check for reception */
28737d993c5fSArseny Solokha 	if (events & IEVENT_RX_MASK)
28747d993c5fSArseny Solokha 		gfar_receive(irq, grp_id);
28757d993c5fSArseny Solokha 
28767d993c5fSArseny Solokha 	/* Check for transmit completion */
28777d993c5fSArseny Solokha 	if (events & IEVENT_TX_MASK)
28787d993c5fSArseny Solokha 		gfar_transmit(irq, grp_id);
28797d993c5fSArseny Solokha 
28807d993c5fSArseny Solokha 	/* Check for errors */
28817d993c5fSArseny Solokha 	if (events & IEVENT_ERR_MASK)
28827d993c5fSArseny Solokha 		gfar_error(irq, grp_id);
28837d993c5fSArseny Solokha 
28847d993c5fSArseny Solokha 	return IRQ_HANDLED;
28857d993c5fSArseny Solokha }
2886aeb12c5eSClaudiu Manoil 
2887ec21e2ecSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
28880977f817SJan Ceuleers /* Polling 'interrupt' - used by things like netconsole to send skbs
2889ec21e2ecSJeff Kirsher  * without having to re-enable interrupts. It's not called while
2890ec21e2ecSJeff Kirsher  * the interrupt routine is executing.
2891ec21e2ecSJeff Kirsher  */
2892ec21e2ecSJeff Kirsher static void gfar_netpoll(struct net_device *dev)
2893ec21e2ecSJeff Kirsher {
2894ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
28953a2e16c8SJan Ceuleers 	int i;
2896ec21e2ecSJeff Kirsher 
2897ec21e2ecSJeff Kirsher 	/* If the device has multiple interrupts, run tx/rx */
2898ec21e2ecSJeff Kirsher 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2899ec21e2ecSJeff Kirsher 		for (i = 0; i < priv->num_grps; i++) {
290062ed839dSPaul Gortmaker 			struct gfar_priv_grp *grp = &priv->gfargrp[i];
290162ed839dSPaul Gortmaker 
290262ed839dSPaul Gortmaker 			disable_irq(gfar_irq(grp, TX)->irq);
290362ed839dSPaul Gortmaker 			disable_irq(gfar_irq(grp, RX)->irq);
290462ed839dSPaul Gortmaker 			disable_irq(gfar_irq(grp, ER)->irq);
290562ed839dSPaul Gortmaker 			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
290662ed839dSPaul Gortmaker 			enable_irq(gfar_irq(grp, ER)->irq);
290762ed839dSPaul Gortmaker 			enable_irq(gfar_irq(grp, RX)->irq);
290862ed839dSPaul Gortmaker 			enable_irq(gfar_irq(grp, TX)->irq);
2909ec21e2ecSJeff Kirsher 		}
2910ec21e2ecSJeff Kirsher 	} else {
2911ec21e2ecSJeff Kirsher 		for (i = 0; i < priv->num_grps; i++) {
291262ed839dSPaul Gortmaker 			struct gfar_priv_grp *grp = &priv->gfargrp[i];
291362ed839dSPaul Gortmaker 
291462ed839dSPaul Gortmaker 			disable_irq(gfar_irq(grp, TX)->irq);
291562ed839dSPaul Gortmaker 			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
291662ed839dSPaul Gortmaker 			enable_irq(gfar_irq(grp, TX)->irq);
2917ec21e2ecSJeff Kirsher 		}
2918ec21e2ecSJeff Kirsher 	}
2919ec21e2ecSJeff Kirsher }
2920ec21e2ecSJeff Kirsher #endif
2921ec21e2ecSJeff Kirsher 
29227d993c5fSArseny Solokha static void free_grp_irqs(struct gfar_priv_grp *grp)
2923ec21e2ecSJeff Kirsher {
29247d993c5fSArseny Solokha 	free_irq(gfar_irq(grp, TX)->irq, grp);
29257d993c5fSArseny Solokha 	free_irq(gfar_irq(grp, RX)->irq, grp);
29267d993c5fSArseny Solokha 	free_irq(gfar_irq(grp, ER)->irq, grp);
2927ec21e2ecSJeff Kirsher }
2928ec21e2ecSJeff Kirsher 
29297d993c5fSArseny Solokha static int register_grp_irqs(struct gfar_priv_grp *grp)
29307d993c5fSArseny Solokha {
29317d993c5fSArseny Solokha 	struct gfar_private *priv = grp->priv;
29327d993c5fSArseny Solokha 	struct net_device *dev = priv->ndev;
29337d993c5fSArseny Solokha 	int err;
29347d993c5fSArseny Solokha 
29357d993c5fSArseny Solokha 	/* If the device has multiple interrupts, register for
29367d993c5fSArseny Solokha 	 * them.  Otherwise, only register for the one
2937ec21e2ecSJeff Kirsher 	 */
29387d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
29397d993c5fSArseny Solokha 		/* Install our interrupt handlers for Error,
29407d993c5fSArseny Solokha 		 * Transmit, and Receive
29417d993c5fSArseny Solokha 		 */
29427d993c5fSArseny Solokha 		err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
29437d993c5fSArseny Solokha 				  gfar_irq(grp, ER)->name, grp);
29447d993c5fSArseny Solokha 		if (err < 0) {
29457d993c5fSArseny Solokha 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
29467d993c5fSArseny Solokha 				  gfar_irq(grp, ER)->irq);
29477d993c5fSArseny Solokha 
29487d993c5fSArseny Solokha 			goto err_irq_fail;
29497d993c5fSArseny Solokha 		}
29507d993c5fSArseny Solokha 		enable_irq_wake(gfar_irq(grp, ER)->irq);
29517d993c5fSArseny Solokha 
29527d993c5fSArseny Solokha 		err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
29537d993c5fSArseny Solokha 				  gfar_irq(grp, TX)->name, grp);
29547d993c5fSArseny Solokha 		if (err < 0) {
29557d993c5fSArseny Solokha 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
29567d993c5fSArseny Solokha 				  gfar_irq(grp, TX)->irq);
29577d993c5fSArseny Solokha 			goto tx_irq_fail;
29587d993c5fSArseny Solokha 		}
29597d993c5fSArseny Solokha 		err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
29607d993c5fSArseny Solokha 				  gfar_irq(grp, RX)->name, grp);
29617d993c5fSArseny Solokha 		if (err < 0) {
29627d993c5fSArseny Solokha 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
29637d993c5fSArseny Solokha 				  gfar_irq(grp, RX)->irq);
29647d993c5fSArseny Solokha 			goto rx_irq_fail;
29657d993c5fSArseny Solokha 		}
29667d993c5fSArseny Solokha 		enable_irq_wake(gfar_irq(grp, RX)->irq);
29677d993c5fSArseny Solokha 
29687d993c5fSArseny Solokha 	} else {
29697d993c5fSArseny Solokha 		err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
29707d993c5fSArseny Solokha 				  gfar_irq(grp, TX)->name, grp);
29717d993c5fSArseny Solokha 		if (err < 0) {
29727d993c5fSArseny Solokha 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
29737d993c5fSArseny Solokha 				  gfar_irq(grp, TX)->irq);
29747d993c5fSArseny Solokha 			goto err_irq_fail;
29757d993c5fSArseny Solokha 		}
29767d993c5fSArseny Solokha 		enable_irq_wake(gfar_irq(grp, TX)->irq);
29777d993c5fSArseny Solokha 	}
29787d993c5fSArseny Solokha 
29797d993c5fSArseny Solokha 	return 0;
29807d993c5fSArseny Solokha 
29817d993c5fSArseny Solokha rx_irq_fail:
29827d993c5fSArseny Solokha 	free_irq(gfar_irq(grp, TX)->irq, grp);
29837d993c5fSArseny Solokha tx_irq_fail:
29847d993c5fSArseny Solokha 	free_irq(gfar_irq(grp, ER)->irq, grp);
29857d993c5fSArseny Solokha err_irq_fail:
29867d993c5fSArseny Solokha 	return err;
29877d993c5fSArseny Solokha 
29887d993c5fSArseny Solokha }
29897d993c5fSArseny Solokha 
29907d993c5fSArseny Solokha static void gfar_free_irq(struct gfar_private *priv)
29917d993c5fSArseny Solokha {
29927d993c5fSArseny Solokha 	int i;
29937d993c5fSArseny Solokha 
29947d993c5fSArseny Solokha 	/* Free the IRQs */
29957d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
29967d993c5fSArseny Solokha 		for (i = 0; i < priv->num_grps; i++)
29977d993c5fSArseny Solokha 			free_grp_irqs(&priv->gfargrp[i]);
29987d993c5fSArseny Solokha 	} else {
29997d993c5fSArseny Solokha 		for (i = 0; i < priv->num_grps; i++)
30007d993c5fSArseny Solokha 			free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
30017d993c5fSArseny Solokha 				 &priv->gfargrp[i]);
30027d993c5fSArseny Solokha 	}
30037d993c5fSArseny Solokha }
30047d993c5fSArseny Solokha 
30057d993c5fSArseny Solokha static int gfar_request_irq(struct gfar_private *priv)
30067d993c5fSArseny Solokha {
30077d993c5fSArseny Solokha 	int err, i, j;
30087d993c5fSArseny Solokha 
30097d993c5fSArseny Solokha 	for (i = 0; i < priv->num_grps; i++) {
30107d993c5fSArseny Solokha 		err = register_grp_irqs(&priv->gfargrp[i]);
30117d993c5fSArseny Solokha 		if (err) {
30127d993c5fSArseny Solokha 			for (j = 0; j < i; j++)
30137d993c5fSArseny Solokha 				free_grp_irqs(&priv->gfargrp[j]);
30147d993c5fSArseny Solokha 			return err;
30157d993c5fSArseny Solokha 		}
30167d993c5fSArseny Solokha 	}
30177d993c5fSArseny Solokha 
30187d993c5fSArseny Solokha 	return 0;
30197d993c5fSArseny Solokha }
30207d993c5fSArseny Solokha 
30217d993c5fSArseny Solokha /* Called when something needs to use the ethernet device
30227d993c5fSArseny Solokha  * Returns 0 for success.
30237d993c5fSArseny Solokha  */
30247d993c5fSArseny Solokha static int gfar_enet_open(struct net_device *dev)
3025ec21e2ecSJeff Kirsher {
3026ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
30277d993c5fSArseny Solokha 	int err;
3028ec21e2ecSJeff Kirsher 
30297d993c5fSArseny Solokha 	err = init_phy(dev);
30307d993c5fSArseny Solokha 	if (err)
30317d993c5fSArseny Solokha 		return err;
30327d993c5fSArseny Solokha 
30337d993c5fSArseny Solokha 	err = gfar_request_irq(priv);
30347d993c5fSArseny Solokha 	if (err)
30357d993c5fSArseny Solokha 		return err;
30367d993c5fSArseny Solokha 
30377d993c5fSArseny Solokha 	err = startup_gfar(dev);
30387d993c5fSArseny Solokha 	if (err)
30397d993c5fSArseny Solokha 		return err;
30407d993c5fSArseny Solokha 
30417d993c5fSArseny Solokha 	return err;
30427d993c5fSArseny Solokha }
30437d993c5fSArseny Solokha 
30447d993c5fSArseny Solokha /* Stops the kernel queue, and halts the controller */
30457d993c5fSArseny Solokha static int gfar_close(struct net_device *dev)
30467d993c5fSArseny Solokha {
30477d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
30487d993c5fSArseny Solokha 
30497d993c5fSArseny Solokha 	cancel_work_sync(&priv->reset_task);
30507d993c5fSArseny Solokha 	stop_gfar(dev);
30517d993c5fSArseny Solokha 
30527d993c5fSArseny Solokha 	/* Disconnect from the PHY */
30537d993c5fSArseny Solokha 	phy_disconnect(dev->phydev);
30547d993c5fSArseny Solokha 
30557d993c5fSArseny Solokha 	gfar_free_irq(priv);
30567d993c5fSArseny Solokha 
30577d993c5fSArseny Solokha 	return 0;
30587d993c5fSArseny Solokha }
30597d993c5fSArseny Solokha 
30607d993c5fSArseny Solokha /* Clears each of the exact match registers to zero, so they
30617d993c5fSArseny Solokha  * don't interfere with normal reception
30627d993c5fSArseny Solokha  */
30637d993c5fSArseny Solokha static void gfar_clear_exact_match(struct net_device *dev)
30647d993c5fSArseny Solokha {
30657d993c5fSArseny Solokha 	int idx;
30667d993c5fSArseny Solokha 	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
30677d993c5fSArseny Solokha 
30687d993c5fSArseny Solokha 	for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
30697d993c5fSArseny Solokha 		gfar_set_mac_for_addr(dev, idx, zero_arr);
3070ec21e2ecSJeff Kirsher }
3071ec21e2ecSJeff Kirsher 
3072ec21e2ecSJeff Kirsher /* Update the hash table based on the current list of multicast
3073ec21e2ecSJeff Kirsher  * addresses we subscribe to.  Also, change the promiscuity of
3074ec21e2ecSJeff Kirsher  * the device based on the flags (this function is called
30750977f817SJan Ceuleers  * whenever dev->flags is changed
30760977f817SJan Ceuleers  */
3077ec21e2ecSJeff Kirsher static void gfar_set_multi(struct net_device *dev)
3078ec21e2ecSJeff Kirsher {
3079ec21e2ecSJeff Kirsher 	struct netdev_hw_addr *ha;
3080ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
3081ec21e2ecSJeff Kirsher 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3082ec21e2ecSJeff Kirsher 	u32 tempval;
3083ec21e2ecSJeff Kirsher 
3084ec21e2ecSJeff Kirsher 	if (dev->flags & IFF_PROMISC) {
3085ec21e2ecSJeff Kirsher 		/* Set RCTRL to PROM */
3086ec21e2ecSJeff Kirsher 		tempval = gfar_read(&regs->rctrl);
3087ec21e2ecSJeff Kirsher 		tempval |= RCTRL_PROM;
3088ec21e2ecSJeff Kirsher 		gfar_write(&regs->rctrl, tempval);
3089ec21e2ecSJeff Kirsher 	} else {
3090ec21e2ecSJeff Kirsher 		/* Set RCTRL to not PROM */
3091ec21e2ecSJeff Kirsher 		tempval = gfar_read(&regs->rctrl);
3092ec21e2ecSJeff Kirsher 		tempval &= ~(RCTRL_PROM);
3093ec21e2ecSJeff Kirsher 		gfar_write(&regs->rctrl, tempval);
3094ec21e2ecSJeff Kirsher 	}
3095ec21e2ecSJeff Kirsher 
3096ec21e2ecSJeff Kirsher 	if (dev->flags & IFF_ALLMULTI) {
3097ec21e2ecSJeff Kirsher 		/* Set the hash to rx all multicast frames */
3098ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr0, 0xffffffff);
3099ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr1, 0xffffffff);
3100ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr2, 0xffffffff);
3101ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr3, 0xffffffff);
3102ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr4, 0xffffffff);
3103ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr5, 0xffffffff);
3104ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr6, 0xffffffff);
3105ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr7, 0xffffffff);
3106ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr0, 0xffffffff);
3107ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr1, 0xffffffff);
3108ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr2, 0xffffffff);
3109ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr3, 0xffffffff);
3110ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr4, 0xffffffff);
3111ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr5, 0xffffffff);
3112ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr6, 0xffffffff);
3113ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr7, 0xffffffff);
3114ec21e2ecSJeff Kirsher 	} else {
3115ec21e2ecSJeff Kirsher 		int em_num;
3116ec21e2ecSJeff Kirsher 		int idx;
3117ec21e2ecSJeff Kirsher 
3118ec21e2ecSJeff Kirsher 		/* zero out the hash */
3119ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr0, 0x0);
3120ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr1, 0x0);
3121ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr2, 0x0);
3122ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr3, 0x0);
3123ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr4, 0x0);
3124ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr5, 0x0);
3125ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr6, 0x0);
3126ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr7, 0x0);
3127ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr0, 0x0);
3128ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr1, 0x0);
3129ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr2, 0x0);
3130ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr3, 0x0);
3131ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr4, 0x0);
3132ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr5, 0x0);
3133ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr6, 0x0);
3134ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr7, 0x0);
3135ec21e2ecSJeff Kirsher 
3136ec21e2ecSJeff Kirsher 		/* If we have extended hash tables, we need to
3137ec21e2ecSJeff Kirsher 		 * clear the exact match registers to prepare for
31380977f817SJan Ceuleers 		 * setting them
31390977f817SJan Ceuleers 		 */
3140ec21e2ecSJeff Kirsher 		if (priv->extended_hash) {
3141ec21e2ecSJeff Kirsher 			em_num = GFAR_EM_NUM + 1;
3142ec21e2ecSJeff Kirsher 			gfar_clear_exact_match(dev);
3143ec21e2ecSJeff Kirsher 			idx = 1;
3144ec21e2ecSJeff Kirsher 		} else {
3145ec21e2ecSJeff Kirsher 			idx = 0;
3146ec21e2ecSJeff Kirsher 			em_num = 0;
3147ec21e2ecSJeff Kirsher 		}
3148ec21e2ecSJeff Kirsher 
3149ec21e2ecSJeff Kirsher 		if (netdev_mc_empty(dev))
3150ec21e2ecSJeff Kirsher 			return;
3151ec21e2ecSJeff Kirsher 
3152ec21e2ecSJeff Kirsher 		/* Parse the list, and set the appropriate bits */
3153ec21e2ecSJeff Kirsher 		netdev_for_each_mc_addr(ha, dev) {
3154ec21e2ecSJeff Kirsher 			if (idx < em_num) {
3155ec21e2ecSJeff Kirsher 				gfar_set_mac_for_addr(dev, idx, ha->addr);
3156ec21e2ecSJeff Kirsher 				idx++;
3157ec21e2ecSJeff Kirsher 			} else
3158ec21e2ecSJeff Kirsher 				gfar_set_hash_for_addr(dev, ha->addr);
3159ec21e2ecSJeff Kirsher 		}
3160ec21e2ecSJeff Kirsher 	}
3161ec21e2ecSJeff Kirsher }
3162ec21e2ecSJeff Kirsher 
31637d993c5fSArseny Solokha void gfar_mac_reset(struct gfar_private *priv)
31646ce29b0eSClaudiu Manoil {
31656ce29b0eSClaudiu Manoil 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
31667d993c5fSArseny Solokha 	u32 tempval;
31676ce29b0eSClaudiu Manoil 
31687d993c5fSArseny Solokha 	/* Reset MAC layer */
31697d993c5fSArseny Solokha 	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
31706ce29b0eSClaudiu Manoil 
31717d993c5fSArseny Solokha 	/* We need to delay at least 3 TX clocks */
31727d993c5fSArseny Solokha 	udelay(3);
31736ce29b0eSClaudiu Manoil 
31747d993c5fSArseny Solokha 	/* the soft reset bit is not self-resetting, so we need to
31757d993c5fSArseny Solokha 	 * clear it before resuming normal operation
31766ce29b0eSClaudiu Manoil 	 */
31777d993c5fSArseny Solokha 	gfar_write(&regs->maccfg1, 0);
31786ce29b0eSClaudiu Manoil 
31797d993c5fSArseny Solokha 	udelay(3);
31806ce29b0eSClaudiu Manoil 
31817d993c5fSArseny Solokha 	gfar_rx_offload_en(priv);
31826ce29b0eSClaudiu Manoil 
31837d993c5fSArseny Solokha 	/* Initialize the max receive frame/buffer lengths */
31847d993c5fSArseny Solokha 	gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
31857d993c5fSArseny Solokha 	gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
3186b4b67f26SScott Wood 
31877d993c5fSArseny Solokha 	/* Initialize the Minimum Frame Length Register */
31887d993c5fSArseny Solokha 	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
318945b679c9SMatei Pavaluca 
31907d993c5fSArseny Solokha 	/* Initialize MACCFG2. */
31917d993c5fSArseny Solokha 	tempval = MACCFG2_INIT_SETTINGS;
319245b679c9SMatei Pavaluca 
31937d993c5fSArseny Solokha 	/* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
31947d993c5fSArseny Solokha 	 * are marked as truncated.  Avoid this by MACCFG2[Huge Frame]=1,
31957d993c5fSArseny Solokha 	 * and by checking RxBD[LG] and discarding larger than MAXFRM.
31967d993c5fSArseny Solokha 	 */
31977d993c5fSArseny Solokha 	if (gfar_has_errata(priv, GFAR_ERRATA_74))
31987d993c5fSArseny Solokha 		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
319945b679c9SMatei Pavaluca 
32006ce29b0eSClaudiu Manoil 	gfar_write(&regs->maccfg2, tempval);
32016ce29b0eSClaudiu Manoil 
32027d993c5fSArseny Solokha 	/* Clear mac addr hash registers */
32037d993c5fSArseny Solokha 	gfar_write(&regs->igaddr0, 0);
32047d993c5fSArseny Solokha 	gfar_write(&regs->igaddr1, 0);
32057d993c5fSArseny Solokha 	gfar_write(&regs->igaddr2, 0);
32067d993c5fSArseny Solokha 	gfar_write(&regs->igaddr3, 0);
32077d993c5fSArseny Solokha 	gfar_write(&regs->igaddr4, 0);
32087d993c5fSArseny Solokha 	gfar_write(&regs->igaddr5, 0);
32097d993c5fSArseny Solokha 	gfar_write(&regs->igaddr6, 0);
32107d993c5fSArseny Solokha 	gfar_write(&regs->igaddr7, 0);
32116ce29b0eSClaudiu Manoil 
32127d993c5fSArseny Solokha 	gfar_write(&regs->gaddr0, 0);
32137d993c5fSArseny Solokha 	gfar_write(&regs->gaddr1, 0);
32147d993c5fSArseny Solokha 	gfar_write(&regs->gaddr2, 0);
32157d993c5fSArseny Solokha 	gfar_write(&regs->gaddr3, 0);
32167d993c5fSArseny Solokha 	gfar_write(&regs->gaddr4, 0);
32177d993c5fSArseny Solokha 	gfar_write(&regs->gaddr5, 0);
32187d993c5fSArseny Solokha 	gfar_write(&regs->gaddr6, 0);
32197d993c5fSArseny Solokha 	gfar_write(&regs->gaddr7, 0);
32207d993c5fSArseny Solokha 
32217d993c5fSArseny Solokha 	if (priv->extended_hash)
32227d993c5fSArseny Solokha 		gfar_clear_exact_match(priv->ndev);
32237d993c5fSArseny Solokha 
32247d993c5fSArseny Solokha 	gfar_mac_rx_config(priv);
32257d993c5fSArseny Solokha 
32267d993c5fSArseny Solokha 	gfar_mac_tx_config(priv);
32277d993c5fSArseny Solokha 
32287d993c5fSArseny Solokha 	gfar_set_mac_address(priv->ndev);
32297d993c5fSArseny Solokha 
32307d993c5fSArseny Solokha 	gfar_set_multi(priv->ndev);
32317d993c5fSArseny Solokha 
32327d993c5fSArseny Solokha 	/* clear ievent and imask before configuring coalescing */
32337d993c5fSArseny Solokha 	gfar_ints_disable(priv);
32347d993c5fSArseny Solokha 
32357d993c5fSArseny Solokha 	/* Configure the coalescing support */
32367d993c5fSArseny Solokha 	gfar_configure_coalescing_all(priv);
32377d993c5fSArseny Solokha }
32387d993c5fSArseny Solokha 
32397d993c5fSArseny Solokha static void gfar_hw_init(struct gfar_private *priv)
32407d993c5fSArseny Solokha {
32417d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
32427d993c5fSArseny Solokha 	u32 attrs;
32437d993c5fSArseny Solokha 
32447d993c5fSArseny Solokha 	/* Stop the DMA engine now, in case it was running before
32457d993c5fSArseny Solokha 	 * (The firmware could have used it, and left it running).
32467d993c5fSArseny Solokha 	 */
32477d993c5fSArseny Solokha 	gfar_halt(priv);
32487d993c5fSArseny Solokha 
32497d993c5fSArseny Solokha 	gfar_mac_reset(priv);
32507d993c5fSArseny Solokha 
32517d993c5fSArseny Solokha 	/* Zero out the rmon mib registers if it has them */
32527d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
32537d993c5fSArseny Solokha 		memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
32547d993c5fSArseny Solokha 
32557d993c5fSArseny Solokha 		/* Mask off the CAM interrupts */
32567d993c5fSArseny Solokha 		gfar_write(&regs->rmon.cam1, 0xffffffff);
32577d993c5fSArseny Solokha 		gfar_write(&regs->rmon.cam2, 0xffffffff);
32587d993c5fSArseny Solokha 	}
32597d993c5fSArseny Solokha 
32607d993c5fSArseny Solokha 	/* Initialize ECNTRL */
32617d993c5fSArseny Solokha 	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
32627d993c5fSArseny Solokha 
32637d993c5fSArseny Solokha 	/* Set the extraction length and index */
32647d993c5fSArseny Solokha 	attrs = ATTRELI_EL(priv->rx_stash_size) |
32657d993c5fSArseny Solokha 		ATTRELI_EI(priv->rx_stash_index);
32667d993c5fSArseny Solokha 
32677d993c5fSArseny Solokha 	gfar_write(&regs->attreli, attrs);
32687d993c5fSArseny Solokha 
32697d993c5fSArseny Solokha 	/* Start with defaults, and add stashing
32707d993c5fSArseny Solokha 	 * depending on driver parameters
32717d993c5fSArseny Solokha 	 */
32727d993c5fSArseny Solokha 	attrs = ATTR_INIT_SETTINGS;
32737d993c5fSArseny Solokha 
32747d993c5fSArseny Solokha 	if (priv->bd_stash_en)
32757d993c5fSArseny Solokha 		attrs |= ATTR_BDSTASH;
32767d993c5fSArseny Solokha 
32777d993c5fSArseny Solokha 	if (priv->rx_stash_size != 0)
32787d993c5fSArseny Solokha 		attrs |= ATTR_BUFSTASH;
32797d993c5fSArseny Solokha 
32807d993c5fSArseny Solokha 	gfar_write(&regs->attr, attrs);
32817d993c5fSArseny Solokha 
32827d993c5fSArseny Solokha 	/* FIFO configs */
32837d993c5fSArseny Solokha 	gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
32847d993c5fSArseny Solokha 	gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
32857d993c5fSArseny Solokha 	gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
32867d993c5fSArseny Solokha 
32877d993c5fSArseny Solokha 	/* Program the interrupt steering regs, only for MG devices */
32887d993c5fSArseny Solokha 	if (priv->num_grps > 1)
32897d993c5fSArseny Solokha 		gfar_write_isrg(priv);
32907d993c5fSArseny Solokha }
32917d993c5fSArseny Solokha 
32927d993c5fSArseny Solokha static const struct net_device_ops gfar_netdev_ops = {
32937d993c5fSArseny Solokha 	.ndo_open = gfar_enet_open,
32947d993c5fSArseny Solokha 	.ndo_start_xmit = gfar_start_xmit,
32957d993c5fSArseny Solokha 	.ndo_stop = gfar_close,
32967d993c5fSArseny Solokha 	.ndo_change_mtu = gfar_change_mtu,
32977d993c5fSArseny Solokha 	.ndo_set_features = gfar_set_features,
32987d993c5fSArseny Solokha 	.ndo_set_rx_mode = gfar_set_multi,
32997d993c5fSArseny Solokha 	.ndo_tx_timeout = gfar_timeout,
33007d993c5fSArseny Solokha 	.ndo_do_ioctl = gfar_ioctl,
33017d993c5fSArseny Solokha 	.ndo_get_stats = gfar_get_stats,
33027d993c5fSArseny Solokha 	.ndo_change_carrier = fixed_phy_change_carrier,
33037d993c5fSArseny Solokha 	.ndo_set_mac_address = gfar_set_mac_addr,
33047d993c5fSArseny Solokha 	.ndo_validate_addr = eth_validate_addr,
33057d993c5fSArseny Solokha #ifdef CONFIG_NET_POLL_CONTROLLER
33067d993c5fSArseny Solokha 	.ndo_poll_controller = gfar_netpoll,
33077d993c5fSArseny Solokha #endif
33087d993c5fSArseny Solokha };
33097d993c5fSArseny Solokha 
33107d993c5fSArseny Solokha /* Set up the ethernet device structure, private data,
33117d993c5fSArseny Solokha  * and anything else we need before we start
33127d993c5fSArseny Solokha  */
33137d993c5fSArseny Solokha static int gfar_probe(struct platform_device *ofdev)
33147d993c5fSArseny Solokha {
33157d993c5fSArseny Solokha 	struct device_node *np = ofdev->dev.of_node;
33167d993c5fSArseny Solokha 	struct net_device *dev = NULL;
33177d993c5fSArseny Solokha 	struct gfar_private *priv = NULL;
33187d993c5fSArseny Solokha 	int err = 0, i;
33197d993c5fSArseny Solokha 
33207d993c5fSArseny Solokha 	err = gfar_of_init(ofdev, &dev);
33217d993c5fSArseny Solokha 
33227d993c5fSArseny Solokha 	if (err)
33237d993c5fSArseny Solokha 		return err;
33247d993c5fSArseny Solokha 
33257d993c5fSArseny Solokha 	priv = netdev_priv(dev);
33267d993c5fSArseny Solokha 	priv->ndev = dev;
33277d993c5fSArseny Solokha 	priv->ofdev = ofdev;
33287d993c5fSArseny Solokha 	priv->dev = &ofdev->dev;
33297d993c5fSArseny Solokha 	SET_NETDEV_DEV(dev, &ofdev->dev);
33307d993c5fSArseny Solokha 
33317d993c5fSArseny Solokha 	INIT_WORK(&priv->reset_task, gfar_reset_task);
33327d993c5fSArseny Solokha 
33337d993c5fSArseny Solokha 	platform_set_drvdata(ofdev, priv);
33347d993c5fSArseny Solokha 
33357d993c5fSArseny Solokha 	gfar_detect_errata(priv);
33367d993c5fSArseny Solokha 
33377d993c5fSArseny Solokha 	/* Set the dev->base_addr to the gfar reg region */
33387d993c5fSArseny Solokha 	dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
33397d993c5fSArseny Solokha 
33407d993c5fSArseny Solokha 	/* Fill in the dev structure */
33417d993c5fSArseny Solokha 	dev->watchdog_timeo = TX_TIMEOUT;
33427d993c5fSArseny Solokha 	/* MTU range: 50 - 9586 */
33437d993c5fSArseny Solokha 	dev->mtu = 1500;
33447d993c5fSArseny Solokha 	dev->min_mtu = 50;
33457d993c5fSArseny Solokha 	dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
33467d993c5fSArseny Solokha 	dev->netdev_ops = &gfar_netdev_ops;
33477d993c5fSArseny Solokha 	dev->ethtool_ops = &gfar_ethtool_ops;
33487d993c5fSArseny Solokha 
33497d993c5fSArseny Solokha 	/* Register for napi ...We are registering NAPI for each grp */
33507d993c5fSArseny Solokha 	for (i = 0; i < priv->num_grps; i++) {
33517d993c5fSArseny Solokha 		if (priv->poll_mode == GFAR_SQ_POLLING) {
33527d993c5fSArseny Solokha 			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
33537d993c5fSArseny Solokha 				       gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
33547d993c5fSArseny Solokha 			netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
33557d993c5fSArseny Solokha 				       gfar_poll_tx_sq, 2);
33567d993c5fSArseny Solokha 		} else {
33577d993c5fSArseny Solokha 			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
33587d993c5fSArseny Solokha 				       gfar_poll_rx, GFAR_DEV_WEIGHT);
33597d993c5fSArseny Solokha 			netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
33607d993c5fSArseny Solokha 				       gfar_poll_tx, 2);
33617d993c5fSArseny Solokha 		}
33627d993c5fSArseny Solokha 	}
33637d993c5fSArseny Solokha 
33647d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
33657d993c5fSArseny Solokha 		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
33667d993c5fSArseny Solokha 				   NETIF_F_RXCSUM;
33677d993c5fSArseny Solokha 		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
33687d993c5fSArseny Solokha 				 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
33697d993c5fSArseny Solokha 	}
33707d993c5fSArseny Solokha 
33717d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
33727d993c5fSArseny Solokha 		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
33737d993c5fSArseny Solokha 				    NETIF_F_HW_VLAN_CTAG_RX;
33747d993c5fSArseny Solokha 		dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
33757d993c5fSArseny Solokha 	}
33767d993c5fSArseny Solokha 
33777d993c5fSArseny Solokha 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
33787d993c5fSArseny Solokha 
33797d993c5fSArseny Solokha 	gfar_init_addr_hash_table(priv);
33807d993c5fSArseny Solokha 
33817d993c5fSArseny Solokha 	/* Insert receive time stamps into padding alignment bytes, and
33827d993c5fSArseny Solokha 	 * plus 2 bytes padding to ensure the cpu alignment.
33837d993c5fSArseny Solokha 	 */
33847d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
33857d993c5fSArseny Solokha 		priv->padding = 8 + DEFAULT_PADDING;
33867d993c5fSArseny Solokha 
33877d993c5fSArseny Solokha 	if (dev->features & NETIF_F_IP_CSUM ||
33887d993c5fSArseny Solokha 	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3389d6a076d6SClaudiu Manoil 		dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
33907d993c5fSArseny Solokha 
33917d993c5fSArseny Solokha 	/* Initializing some of the rx/tx queue level parameters */
33927d993c5fSArseny Solokha 	for (i = 0; i < priv->num_tx_queues; i++) {
33937d993c5fSArseny Solokha 		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
33947d993c5fSArseny Solokha 		priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
33957d993c5fSArseny Solokha 		priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
33967d993c5fSArseny Solokha 		priv->tx_queue[i]->txic = DEFAULT_TXIC;
33977d993c5fSArseny Solokha 	}
33987d993c5fSArseny Solokha 
33997d993c5fSArseny Solokha 	for (i = 0; i < priv->num_rx_queues; i++) {
34007d993c5fSArseny Solokha 		priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
34017d993c5fSArseny Solokha 		priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
34027d993c5fSArseny Solokha 		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
34037d993c5fSArseny Solokha 	}
34047d993c5fSArseny Solokha 
34057d993c5fSArseny Solokha 	/* Always enable rx filer if available */
34067d993c5fSArseny Solokha 	priv->rx_filer_enable =
34077d993c5fSArseny Solokha 	    (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
34087d993c5fSArseny Solokha 	/* Enable most messages by default */
34097d993c5fSArseny Solokha 	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
34107d993c5fSArseny Solokha 	/* use pritority h/w tx queue scheduling for single queue devices */
34117d993c5fSArseny Solokha 	if (priv->num_tx_queues == 1)
34127d993c5fSArseny Solokha 		priv->prio_sched_en = 1;
34137d993c5fSArseny Solokha 
34147d993c5fSArseny Solokha 	set_bit(GFAR_DOWN, &priv->state);
34157d993c5fSArseny Solokha 
34167d993c5fSArseny Solokha 	gfar_hw_init(priv);
34177d993c5fSArseny Solokha 
34187d993c5fSArseny Solokha 	/* Carrier starts down, phylib will bring it up */
34197d993c5fSArseny Solokha 	netif_carrier_off(dev);
34207d993c5fSArseny Solokha 
34217d993c5fSArseny Solokha 	err = register_netdev(dev);
34227d993c5fSArseny Solokha 
34237d993c5fSArseny Solokha 	if (err) {
34247d993c5fSArseny Solokha 		pr_err("%s: Cannot register net device, aborting\n", dev->name);
34257d993c5fSArseny Solokha 		goto register_fail;
34267d993c5fSArseny Solokha 	}
34277d993c5fSArseny Solokha 
34287d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
34297d993c5fSArseny Solokha 		priv->wol_supported |= GFAR_WOL_MAGIC;
34307d993c5fSArseny Solokha 
34317d993c5fSArseny Solokha 	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
34327d993c5fSArseny Solokha 	    priv->rx_filer_enable)
34337d993c5fSArseny Solokha 		priv->wol_supported |= GFAR_WOL_FILER_UCAST;
34347d993c5fSArseny Solokha 
34357d993c5fSArseny Solokha 	device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
34367d993c5fSArseny Solokha 
34377d993c5fSArseny Solokha 	/* fill out IRQ number and name fields */
34387d993c5fSArseny Solokha 	for (i = 0; i < priv->num_grps; i++) {
34397d993c5fSArseny Solokha 		struct gfar_priv_grp *grp = &priv->gfargrp[i];
34407d993c5fSArseny Solokha 		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
34417d993c5fSArseny Solokha 			sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
34427d993c5fSArseny Solokha 				dev->name, "_g", '0' + i, "_tx");
34437d993c5fSArseny Solokha 			sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
34447d993c5fSArseny Solokha 				dev->name, "_g", '0' + i, "_rx");
34457d993c5fSArseny Solokha 			sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
34467d993c5fSArseny Solokha 				dev->name, "_g", '0' + i, "_er");
34477d993c5fSArseny Solokha 		} else
34487d993c5fSArseny Solokha 			strcpy(gfar_irq(grp, TX)->name, dev->name);
34497d993c5fSArseny Solokha 	}
34507d993c5fSArseny Solokha 
34517d993c5fSArseny Solokha 	/* Initialize the filer table */
34527d993c5fSArseny Solokha 	gfar_init_filer_table(priv);
34537d993c5fSArseny Solokha 
34547d993c5fSArseny Solokha 	/* Print out the device info */
34557d993c5fSArseny Solokha 	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
34567d993c5fSArseny Solokha 
34577d993c5fSArseny Solokha 	/* Even more device info helps when determining which kernel
34587d993c5fSArseny Solokha 	 * provided which set of benchmarks.
34597d993c5fSArseny Solokha 	 */
34607d993c5fSArseny Solokha 	netdev_info(dev, "Running with NAPI enabled\n");
34617d993c5fSArseny Solokha 	for (i = 0; i < priv->num_rx_queues; i++)
34627d993c5fSArseny Solokha 		netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
34637d993c5fSArseny Solokha 			    i, priv->rx_queue[i]->rx_ring_size);
34647d993c5fSArseny Solokha 	for (i = 0; i < priv->num_tx_queues; i++)
34657d993c5fSArseny Solokha 		netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
34667d993c5fSArseny Solokha 			    i, priv->tx_queue[i]->tx_ring_size);
34677d993c5fSArseny Solokha 
34687d993c5fSArseny Solokha 	return 0;
34697d993c5fSArseny Solokha 
34707d993c5fSArseny Solokha register_fail:
34717d993c5fSArseny Solokha 	if (of_phy_is_fixed_link(np))
34727d993c5fSArseny Solokha 		of_phy_deregister_fixed_link(np);
34737d993c5fSArseny Solokha 	unmap_group_regs(priv);
34747d993c5fSArseny Solokha 	gfar_free_rx_queues(priv);
34757d993c5fSArseny Solokha 	gfar_free_tx_queues(priv);
34767d993c5fSArseny Solokha 	of_node_put(priv->phy_node);
34777d993c5fSArseny Solokha 	of_node_put(priv->tbi_node);
34787d993c5fSArseny Solokha 	free_gfar_dev(priv);
34797d993c5fSArseny Solokha 	return err;
34807d993c5fSArseny Solokha }
34817d993c5fSArseny Solokha 
34827d993c5fSArseny Solokha static int gfar_remove(struct platform_device *ofdev)
34837d993c5fSArseny Solokha {
34847d993c5fSArseny Solokha 	struct gfar_private *priv = platform_get_drvdata(ofdev);
34857d993c5fSArseny Solokha 	struct device_node *np = ofdev->dev.of_node;
34867d993c5fSArseny Solokha 
34877d993c5fSArseny Solokha 	of_node_put(priv->phy_node);
34887d993c5fSArseny Solokha 	of_node_put(priv->tbi_node);
34897d993c5fSArseny Solokha 
34907d993c5fSArseny Solokha 	unregister_netdev(priv->ndev);
34917d993c5fSArseny Solokha 
34927d993c5fSArseny Solokha 	if (of_phy_is_fixed_link(np))
34937d993c5fSArseny Solokha 		of_phy_deregister_fixed_link(np);
34947d993c5fSArseny Solokha 
34957d993c5fSArseny Solokha 	unmap_group_regs(priv);
34967d993c5fSArseny Solokha 	gfar_free_rx_queues(priv);
34977d993c5fSArseny Solokha 	gfar_free_tx_queues(priv);
34987d993c5fSArseny Solokha 	free_gfar_dev(priv);
34997d993c5fSArseny Solokha 
35007d993c5fSArseny Solokha 	return 0;
35017d993c5fSArseny Solokha }
35027d993c5fSArseny Solokha 
35037d993c5fSArseny Solokha #ifdef CONFIG_PM
35047d993c5fSArseny Solokha 
35057d993c5fSArseny Solokha static void __gfar_filer_disable(struct gfar_private *priv)
35067d993c5fSArseny Solokha {
35077d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
35087d993c5fSArseny Solokha 	u32 temp;
35097d993c5fSArseny Solokha 
35107d993c5fSArseny Solokha 	temp = gfar_read(&regs->rctrl);
35117d993c5fSArseny Solokha 	temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
35127d993c5fSArseny Solokha 	gfar_write(&regs->rctrl, temp);
35137d993c5fSArseny Solokha }
35147d993c5fSArseny Solokha 
35157d993c5fSArseny Solokha static void __gfar_filer_enable(struct gfar_private *priv)
35167d993c5fSArseny Solokha {
35177d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
35187d993c5fSArseny Solokha 	u32 temp;
35197d993c5fSArseny Solokha 
35207d993c5fSArseny Solokha 	temp = gfar_read(&regs->rctrl);
35217d993c5fSArseny Solokha 	temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
35227d993c5fSArseny Solokha 	gfar_write(&regs->rctrl, temp);
35237d993c5fSArseny Solokha }
35247d993c5fSArseny Solokha 
35257d993c5fSArseny Solokha /* Filer rules implementing wol capabilities */
35267d993c5fSArseny Solokha static void gfar_filer_config_wol(struct gfar_private *priv)
35277d993c5fSArseny Solokha {
35287d993c5fSArseny Solokha 	unsigned int i;
35297d993c5fSArseny Solokha 	u32 rqfcr;
35307d993c5fSArseny Solokha 
35317d993c5fSArseny Solokha 	__gfar_filer_disable(priv);
35327d993c5fSArseny Solokha 
35337d993c5fSArseny Solokha 	/* clear the filer table, reject any packet by default */
35347d993c5fSArseny Solokha 	rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
35357d993c5fSArseny Solokha 	for (i = 0; i <= MAX_FILER_IDX; i++)
35367d993c5fSArseny Solokha 		gfar_write_filer(priv, i, rqfcr, 0);
35377d993c5fSArseny Solokha 
35387d993c5fSArseny Solokha 	i = 0;
35397d993c5fSArseny Solokha 	if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
35407d993c5fSArseny Solokha 		/* unicast packet, accept it */
35417d993c5fSArseny Solokha 		struct net_device *ndev = priv->ndev;
35427d993c5fSArseny Solokha 		/* get the default rx queue index */
35437d993c5fSArseny Solokha 		u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
35447d993c5fSArseny Solokha 		u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
35457d993c5fSArseny Solokha 				    (ndev->dev_addr[1] << 8) |
35467d993c5fSArseny Solokha 				     ndev->dev_addr[2];
35477d993c5fSArseny Solokha 
35487d993c5fSArseny Solokha 		rqfcr = (qindex << 10) | RQFCR_AND |
35497d993c5fSArseny Solokha 			RQFCR_CMP_EXACT | RQFCR_PID_DAH;
35507d993c5fSArseny Solokha 
35517d993c5fSArseny Solokha 		gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
35527d993c5fSArseny Solokha 
35537d993c5fSArseny Solokha 		dest_mac_addr = (ndev->dev_addr[3] << 16) |
35547d993c5fSArseny Solokha 				(ndev->dev_addr[4] << 8) |
35557d993c5fSArseny Solokha 				 ndev->dev_addr[5];
35567d993c5fSArseny Solokha 		rqfcr = (qindex << 10) | RQFCR_GPI |
35577d993c5fSArseny Solokha 			RQFCR_CMP_EXACT | RQFCR_PID_DAL;
35587d993c5fSArseny Solokha 		gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
35597d993c5fSArseny Solokha 	}
35607d993c5fSArseny Solokha 
35617d993c5fSArseny Solokha 	__gfar_filer_enable(priv);
35627d993c5fSArseny Solokha }
35637d993c5fSArseny Solokha 
35647d993c5fSArseny Solokha static void gfar_filer_restore_table(struct gfar_private *priv)
35657d993c5fSArseny Solokha {
35667d993c5fSArseny Solokha 	u32 rqfcr, rqfpr;
35677d993c5fSArseny Solokha 	unsigned int i;
35687d993c5fSArseny Solokha 
35697d993c5fSArseny Solokha 	__gfar_filer_disable(priv);
35707d993c5fSArseny Solokha 
35717d993c5fSArseny Solokha 	for (i = 0; i <= MAX_FILER_IDX; i++) {
35727d993c5fSArseny Solokha 		rqfcr = priv->ftp_rqfcr[i];
35737d993c5fSArseny Solokha 		rqfpr = priv->ftp_rqfpr[i];
35747d993c5fSArseny Solokha 		gfar_write_filer(priv, i, rqfcr, rqfpr);
35757d993c5fSArseny Solokha 	}
35767d993c5fSArseny Solokha 
35777d993c5fSArseny Solokha 	__gfar_filer_enable(priv);
35787d993c5fSArseny Solokha }
35797d993c5fSArseny Solokha 
35807d993c5fSArseny Solokha /* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
35817d993c5fSArseny Solokha static void gfar_start_wol_filer(struct gfar_private *priv)
35827d993c5fSArseny Solokha {
35837d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
35847d993c5fSArseny Solokha 	u32 tempval;
35857d993c5fSArseny Solokha 	int i = 0;
35867d993c5fSArseny Solokha 
35877d993c5fSArseny Solokha 	/* Enable Rx hw queues */
35887d993c5fSArseny Solokha 	gfar_write(&regs->rqueue, priv->rqueue);
35897d993c5fSArseny Solokha 
35907d993c5fSArseny Solokha 	/* Initialize DMACTRL to have WWR and WOP */
35917d993c5fSArseny Solokha 	tempval = gfar_read(&regs->dmactrl);
35927d993c5fSArseny Solokha 	tempval |= DMACTRL_INIT_SETTINGS;
35937d993c5fSArseny Solokha 	gfar_write(&regs->dmactrl, tempval);
35947d993c5fSArseny Solokha 
35957d993c5fSArseny Solokha 	/* Make sure we aren't stopped */
35967d993c5fSArseny Solokha 	tempval = gfar_read(&regs->dmactrl);
35977d993c5fSArseny Solokha 	tempval &= ~DMACTRL_GRS;
35987d993c5fSArseny Solokha 	gfar_write(&regs->dmactrl, tempval);
35997d993c5fSArseny Solokha 
36007d993c5fSArseny Solokha 	for (i = 0; i < priv->num_grps; i++) {
36017d993c5fSArseny Solokha 		regs = priv->gfargrp[i].regs;
36027d993c5fSArseny Solokha 		/* Clear RHLT, so that the DMA starts polling now */
36037d993c5fSArseny Solokha 		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
36047d993c5fSArseny Solokha 		/* enable the Filer General Purpose Interrupt */
36057d993c5fSArseny Solokha 		gfar_write(&regs->imask, IMASK_FGPI);
36067d993c5fSArseny Solokha 	}
36077d993c5fSArseny Solokha 
36087d993c5fSArseny Solokha 	/* Enable Rx DMA */
36097d993c5fSArseny Solokha 	tempval = gfar_read(&regs->maccfg1);
36107d993c5fSArseny Solokha 	tempval |= MACCFG1_RX_EN;
36117d993c5fSArseny Solokha 	gfar_write(&regs->maccfg1, tempval);
36127d993c5fSArseny Solokha }
36137d993c5fSArseny Solokha 
36147d993c5fSArseny Solokha static int gfar_suspend(struct device *dev)
36157d993c5fSArseny Solokha {
36167d993c5fSArseny Solokha 	struct gfar_private *priv = dev_get_drvdata(dev);
36177d993c5fSArseny Solokha 	struct net_device *ndev = priv->ndev;
36187d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
36197d993c5fSArseny Solokha 	u32 tempval;
36207d993c5fSArseny Solokha 	u16 wol = priv->wol_opts;
36217d993c5fSArseny Solokha 
36227d993c5fSArseny Solokha 	if (!netif_running(ndev))
36237d993c5fSArseny Solokha 		return 0;
36247d993c5fSArseny Solokha 
36257d993c5fSArseny Solokha 	disable_napi(priv);
36267d993c5fSArseny Solokha 	netif_tx_lock(ndev);
36277d993c5fSArseny Solokha 	netif_device_detach(ndev);
36287d993c5fSArseny Solokha 	netif_tx_unlock(ndev);
36297d993c5fSArseny Solokha 
36307d993c5fSArseny Solokha 	gfar_halt(priv);
36317d993c5fSArseny Solokha 
36327d993c5fSArseny Solokha 	if (wol & GFAR_WOL_MAGIC) {
36337d993c5fSArseny Solokha 		/* Enable interrupt on Magic Packet */
36347d993c5fSArseny Solokha 		gfar_write(&regs->imask, IMASK_MAG);
36357d993c5fSArseny Solokha 
36367d993c5fSArseny Solokha 		/* Enable Magic Packet mode */
36377d993c5fSArseny Solokha 		tempval = gfar_read(&regs->maccfg2);
36387d993c5fSArseny Solokha 		tempval |= MACCFG2_MPEN;
36397d993c5fSArseny Solokha 		gfar_write(&regs->maccfg2, tempval);
36407d993c5fSArseny Solokha 
36417d993c5fSArseny Solokha 		/* re-enable the Rx block */
36427d993c5fSArseny Solokha 		tempval = gfar_read(&regs->maccfg1);
36437d993c5fSArseny Solokha 		tempval |= MACCFG1_RX_EN;
36447d993c5fSArseny Solokha 		gfar_write(&regs->maccfg1, tempval);
36457d993c5fSArseny Solokha 
36467d993c5fSArseny Solokha 	} else if (wol & GFAR_WOL_FILER_UCAST) {
36477d993c5fSArseny Solokha 		gfar_filer_config_wol(priv);
36487d993c5fSArseny Solokha 		gfar_start_wol_filer(priv);
36497d993c5fSArseny Solokha 
36507d993c5fSArseny Solokha 	} else {
36517d993c5fSArseny Solokha 		phy_stop(ndev->phydev);
36527d993c5fSArseny Solokha 	}
36537d993c5fSArseny Solokha 
36547d993c5fSArseny Solokha 	return 0;
36557d993c5fSArseny Solokha }
36567d993c5fSArseny Solokha 
36577d993c5fSArseny Solokha static int gfar_resume(struct device *dev)
36587d993c5fSArseny Solokha {
36597d993c5fSArseny Solokha 	struct gfar_private *priv = dev_get_drvdata(dev);
36607d993c5fSArseny Solokha 	struct net_device *ndev = priv->ndev;
36617d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
36627d993c5fSArseny Solokha 	u32 tempval;
36637d993c5fSArseny Solokha 	u16 wol = priv->wol_opts;
36647d993c5fSArseny Solokha 
36657d993c5fSArseny Solokha 	if (!netif_running(ndev))
36667d993c5fSArseny Solokha 		return 0;
36677d993c5fSArseny Solokha 
36687d993c5fSArseny Solokha 	if (wol & GFAR_WOL_MAGIC) {
36697d993c5fSArseny Solokha 		/* Disable Magic Packet mode */
36707d993c5fSArseny Solokha 		tempval = gfar_read(&regs->maccfg2);
36717d993c5fSArseny Solokha 		tempval &= ~MACCFG2_MPEN;
36727d993c5fSArseny Solokha 		gfar_write(&regs->maccfg2, tempval);
36737d993c5fSArseny Solokha 
36747d993c5fSArseny Solokha 	} else if (wol & GFAR_WOL_FILER_UCAST) {
36757d993c5fSArseny Solokha 		/* need to stop rx only, tx is already down */
36767d993c5fSArseny Solokha 		gfar_halt(priv);
36777d993c5fSArseny Solokha 		gfar_filer_restore_table(priv);
36787d993c5fSArseny Solokha 
36797d993c5fSArseny Solokha 	} else {
36807d993c5fSArseny Solokha 		phy_start(ndev->phydev);
36817d993c5fSArseny Solokha 	}
36827d993c5fSArseny Solokha 
36837d993c5fSArseny Solokha 	gfar_start(priv);
36847d993c5fSArseny Solokha 
36857d993c5fSArseny Solokha 	netif_device_attach(ndev);
36867d993c5fSArseny Solokha 	enable_napi(priv);
36877d993c5fSArseny Solokha 
36887d993c5fSArseny Solokha 	return 0;
36897d993c5fSArseny Solokha }
36907d993c5fSArseny Solokha 
36917d993c5fSArseny Solokha static int gfar_restore(struct device *dev)
36927d993c5fSArseny Solokha {
36937d993c5fSArseny Solokha 	struct gfar_private *priv = dev_get_drvdata(dev);
36947d993c5fSArseny Solokha 	struct net_device *ndev = priv->ndev;
36957d993c5fSArseny Solokha 
36967d993c5fSArseny Solokha 	if (!netif_running(ndev)) {
36977d993c5fSArseny Solokha 		netif_device_attach(ndev);
36987d993c5fSArseny Solokha 
36997d993c5fSArseny Solokha 		return 0;
37007d993c5fSArseny Solokha 	}
37017d993c5fSArseny Solokha 
37027d993c5fSArseny Solokha 	gfar_init_bds(ndev);
37037d993c5fSArseny Solokha 
37047d993c5fSArseny Solokha 	gfar_mac_reset(priv);
37057d993c5fSArseny Solokha 
37067d993c5fSArseny Solokha 	gfar_init_tx_rx_base(priv);
37077d993c5fSArseny Solokha 
37087d993c5fSArseny Solokha 	gfar_start(priv);
37097d993c5fSArseny Solokha 
37106ce29b0eSClaudiu Manoil 	priv->oldlink = 0;
37116ce29b0eSClaudiu Manoil 	priv->oldspeed = 0;
37126ce29b0eSClaudiu Manoil 	priv->oldduplex = -1;
37137d993c5fSArseny Solokha 
37147d993c5fSArseny Solokha 	if (ndev->phydev)
37157d993c5fSArseny Solokha 		phy_start(ndev->phydev);
37167d993c5fSArseny Solokha 
37177d993c5fSArseny Solokha 	netif_device_attach(ndev);
37187d993c5fSArseny Solokha 	enable_napi(priv);
37197d993c5fSArseny Solokha 
37207d993c5fSArseny Solokha 	return 0;
37216ce29b0eSClaudiu Manoil }
37226ce29b0eSClaudiu Manoil 
37237d993c5fSArseny Solokha static const struct dev_pm_ops gfar_pm_ops = {
37247d993c5fSArseny Solokha 	.suspend = gfar_suspend,
37257d993c5fSArseny Solokha 	.resume = gfar_resume,
37267d993c5fSArseny Solokha 	.freeze = gfar_suspend,
37277d993c5fSArseny Solokha 	.thaw = gfar_resume,
37287d993c5fSArseny Solokha 	.restore = gfar_restore,
37297d993c5fSArseny Solokha };
37307d993c5fSArseny Solokha 
37317d993c5fSArseny Solokha #define GFAR_PM_OPS (&gfar_pm_ops)
37327d993c5fSArseny Solokha 
37337d993c5fSArseny Solokha #else
37347d993c5fSArseny Solokha 
37357d993c5fSArseny Solokha #define GFAR_PM_OPS NULL
37367d993c5fSArseny Solokha 
37377d993c5fSArseny Solokha #endif
37386ce29b0eSClaudiu Manoil 
373994e5a2a8SFabian Frederick static const struct of_device_id gfar_match[] =
3740ec21e2ecSJeff Kirsher {
3741ec21e2ecSJeff Kirsher 	{
3742ec21e2ecSJeff Kirsher 		.type = "network",
3743ec21e2ecSJeff Kirsher 		.compatible = "gianfar",
3744ec21e2ecSJeff Kirsher 	},
3745ec21e2ecSJeff Kirsher 	{
3746ec21e2ecSJeff Kirsher 		.compatible = "fsl,etsec2",
3747ec21e2ecSJeff Kirsher 	},
3748ec21e2ecSJeff Kirsher 	{},
3749ec21e2ecSJeff Kirsher };
3750ec21e2ecSJeff Kirsher MODULE_DEVICE_TABLE(of, gfar_match);
3751ec21e2ecSJeff Kirsher 
3752ec21e2ecSJeff Kirsher /* Structure for a device driver */
3753ec21e2ecSJeff Kirsher static struct platform_driver gfar_driver = {
3754ec21e2ecSJeff Kirsher 	.driver = {
3755ec21e2ecSJeff Kirsher 		.name = "fsl-gianfar",
3756ec21e2ecSJeff Kirsher 		.pm = GFAR_PM_OPS,
3757ec21e2ecSJeff Kirsher 		.of_match_table = gfar_match,
3758ec21e2ecSJeff Kirsher 	},
3759ec21e2ecSJeff Kirsher 	.probe = gfar_probe,
3760ec21e2ecSJeff Kirsher 	.remove = gfar_remove,
3761ec21e2ecSJeff Kirsher };
3762ec21e2ecSJeff Kirsher 
3763db62f684SAxel Lin module_platform_driver(gfar_driver);
3764