xref: /openbmc/linux/drivers/net/ethernet/freescale/gianfar.c (revision ff021f22ea8f1388ced048243c6e06ca5cfbd62a)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
20977f817SJan Ceuleers /* drivers/net/ethernet/freescale/gianfar.c
3ec21e2ecSJeff Kirsher  *
4ec21e2ecSJeff Kirsher  * Gianfar Ethernet Driver
5ec21e2ecSJeff Kirsher  * This driver is designed for the non-CPM ethernet controllers
6ec21e2ecSJeff Kirsher  * on the 85xx and 83xx family of integrated processors
7ec21e2ecSJeff Kirsher  * Based on 8260_io/fcc_enet.c
8ec21e2ecSJeff Kirsher  *
9ec21e2ecSJeff Kirsher  * Author: Andy Fleming
10ec21e2ecSJeff Kirsher  * Maintainer: Kumar Gala
11ec21e2ecSJeff Kirsher  * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12ec21e2ecSJeff Kirsher  *
1320862788SClaudiu Manoil  * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
14ec21e2ecSJeff Kirsher  * Copyright 2007 MontaVista Software, Inc.
15ec21e2ecSJeff Kirsher  *
16ec21e2ecSJeff Kirsher  *  Gianfar:  AKA Lambda Draconis, "Dragon"
17ec21e2ecSJeff Kirsher  *  RA 11 31 24.2
18ec21e2ecSJeff Kirsher  *  Dec +69 19 52
19ec21e2ecSJeff Kirsher  *  V 3.84
20ec21e2ecSJeff Kirsher  *  B-V +1.62
21ec21e2ecSJeff Kirsher  *
22ec21e2ecSJeff Kirsher  *  Theory of operation
23ec21e2ecSJeff Kirsher  *
24ec21e2ecSJeff Kirsher  *  The driver is initialized through of_device. Configuration information
25ec21e2ecSJeff Kirsher  *  is therefore conveyed through an OF-style device tree.
26ec21e2ecSJeff Kirsher  *
27ec21e2ecSJeff Kirsher  *  The Gianfar Ethernet Controller uses a ring of buffer
28ec21e2ecSJeff Kirsher  *  descriptors.  The beginning is indicated by a register
29ec21e2ecSJeff Kirsher  *  pointing to the physical address of the start of the ring.
30ec21e2ecSJeff Kirsher  *  The end is determined by a "wrap" bit being set in the
31ec21e2ecSJeff Kirsher  *  last descriptor of the ring.
32ec21e2ecSJeff Kirsher  *
33ec21e2ecSJeff Kirsher  *  When a packet is received, the RXF bit in the
34ec21e2ecSJeff Kirsher  *  IEVENT register is set, triggering an interrupt when the
35ec21e2ecSJeff Kirsher  *  corresponding bit in the IMASK register is also set (if
36ec21e2ecSJeff Kirsher  *  interrupt coalescing is active, then the interrupt may not
37ec21e2ecSJeff Kirsher  *  happen immediately, but will wait until either a set number
38ec21e2ecSJeff Kirsher  *  of frames or amount of time have passed).  In NAPI, the
39ec21e2ecSJeff Kirsher  *  interrupt handler will signal there is work to be done, and
40ec21e2ecSJeff Kirsher  *  exit. This method will start at the last known empty
41ec21e2ecSJeff Kirsher  *  descriptor, and process every subsequent descriptor until there
42ec21e2ecSJeff Kirsher  *  are none left with data (NAPI will stop after a set number of
43ec21e2ecSJeff Kirsher  *  packets to give time to other tasks, but will eventually
44ec21e2ecSJeff Kirsher  *  process all the packets).  The data arrives inside a
45ec21e2ecSJeff Kirsher  *  pre-allocated skb, and so after the skb is passed up to the
46ec21e2ecSJeff Kirsher  *  stack, a new skb must be allocated, and the address field in
47ec21e2ecSJeff Kirsher  *  the buffer descriptor must be updated to indicate this new
48ec21e2ecSJeff Kirsher  *  skb.
49ec21e2ecSJeff Kirsher  *
50ec21e2ecSJeff Kirsher  *  When the kernel requests that a packet be transmitted, the
51ec21e2ecSJeff Kirsher  *  driver starts where it left off last time, and points the
52ec21e2ecSJeff Kirsher  *  descriptor at the buffer which was passed in.  The driver
53ec21e2ecSJeff Kirsher  *  then informs the DMA engine that there are packets ready to
54ec21e2ecSJeff Kirsher  *  be transmitted.  Once the controller is finished transmitting
55ec21e2ecSJeff Kirsher  *  the packet, an interrupt may be triggered (under the same
56ec21e2ecSJeff Kirsher  *  conditions as for reception, but depending on the TXF bit).
57ec21e2ecSJeff Kirsher  *  The driver then cleans up the buffer.
58ec21e2ecSJeff Kirsher  */
59ec21e2ecSJeff Kirsher 
60ec21e2ecSJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
61ec21e2ecSJeff Kirsher #define DEBUG
62ec21e2ecSJeff Kirsher 
63ec21e2ecSJeff Kirsher #include <linux/kernel.h>
64ec21e2ecSJeff Kirsher #include <linux/string.h>
65ec21e2ecSJeff Kirsher #include <linux/errno.h>
66ec21e2ecSJeff Kirsher #include <linux/unistd.h>
67ec21e2ecSJeff Kirsher #include <linux/slab.h>
68ec21e2ecSJeff Kirsher #include <linux/interrupt.h>
69ec21e2ecSJeff Kirsher #include <linux/delay.h>
70ec21e2ecSJeff Kirsher #include <linux/netdevice.h>
71ec21e2ecSJeff Kirsher #include <linux/etherdevice.h>
72ec21e2ecSJeff Kirsher #include <linux/skbuff.h>
73ec21e2ecSJeff Kirsher #include <linux/if_vlan.h>
74ec21e2ecSJeff Kirsher #include <linux/spinlock.h>
75ec21e2ecSJeff Kirsher #include <linux/mm.h>
765af50730SRob Herring #include <linux/of_address.h>
775af50730SRob Herring #include <linux/of_irq.h>
78ec21e2ecSJeff Kirsher #include <linux/of_mdio.h>
79ec21e2ecSJeff Kirsher #include <linux/of_platform.h>
80ec21e2ecSJeff Kirsher #include <linux/ip.h>
81ec21e2ecSJeff Kirsher #include <linux/tcp.h>
82ec21e2ecSJeff Kirsher #include <linux/udp.h>
83ec21e2ecSJeff Kirsher #include <linux/in.h>
84ec21e2ecSJeff Kirsher #include <linux/net_tstamp.h>
85ec21e2ecSJeff Kirsher 
86ec21e2ecSJeff Kirsher #include <asm/io.h>
87d6ef0bccSClaudiu Manoil #ifdef CONFIG_PPC
88ec21e2ecSJeff Kirsher #include <asm/reg.h>
892969b1f7SClaudiu Manoil #include <asm/mpc85xx.h>
90d6ef0bccSClaudiu Manoil #endif
91ec21e2ecSJeff Kirsher #include <asm/irq.h>
927c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
93ec21e2ecSJeff Kirsher #include <linux/module.h>
94ec21e2ecSJeff Kirsher #include <linux/dma-mapping.h>
95ec21e2ecSJeff Kirsher #include <linux/crc32.h>
96ec21e2ecSJeff Kirsher #include <linux/mii.h>
97ec21e2ecSJeff Kirsher #include <linux/phy.h>
98ec21e2ecSJeff Kirsher #include <linux/phy_fixed.h>
99ec21e2ecSJeff Kirsher #include <linux/of.h>
100ec21e2ecSJeff Kirsher #include <linux/of_net.h>
101ec21e2ecSJeff Kirsher 
102ec21e2ecSJeff Kirsher #include "gianfar.h"
103ec21e2ecSJeff Kirsher 
1048fcc6033SAbhimanyu #define TX_TIMEOUT      (5*HZ)
105ec21e2ecSJeff Kirsher 
106ec21e2ecSJeff Kirsher MODULE_AUTHOR("Freescale Semiconductor, Inc");
107ec21e2ecSJeff Kirsher MODULE_DESCRIPTION("Gianfar Ethernet Driver");
108ec21e2ecSJeff Kirsher MODULE_LICENSE("GPL");
109ec21e2ecSJeff Kirsher 
110ec21e2ecSJeff Kirsher static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
111ec21e2ecSJeff Kirsher 			    dma_addr_t buf)
112ec21e2ecSJeff Kirsher {
113ec21e2ecSJeff Kirsher 	u32 lstatus;
114ec21e2ecSJeff Kirsher 
115a7312d58SClaudiu Manoil 	bdp->bufPtr = cpu_to_be32(buf);
116ec21e2ecSJeff Kirsher 
117ec21e2ecSJeff Kirsher 	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
118ec21e2ecSJeff Kirsher 	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
119ec21e2ecSJeff Kirsher 		lstatus |= BD_LFLAG(RXBD_WRAP);
120ec21e2ecSJeff Kirsher 
121d55398baSClaudiu Manoil 	gfar_wmb();
122ec21e2ecSJeff Kirsher 
123a7312d58SClaudiu Manoil 	bdp->lstatus = cpu_to_be32(lstatus);
124ec21e2ecSJeff Kirsher }
125ec21e2ecSJeff Kirsher 
126ec21e2ecSJeff Kirsher static void gfar_init_tx_rx_base(struct gfar_private *priv)
127ec21e2ecSJeff Kirsher {
128ec21e2ecSJeff Kirsher 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
129ec21e2ecSJeff Kirsher 	u32 __iomem *baddr;
130ec21e2ecSJeff Kirsher 	int i;
131ec21e2ecSJeff Kirsher 
132ec21e2ecSJeff Kirsher 	baddr = &regs->tbase0;
133ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_tx_queues; i++) {
134ec21e2ecSJeff Kirsher 		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
135ec21e2ecSJeff Kirsher 		baddr += 2;
136ec21e2ecSJeff Kirsher 	}
137ec21e2ecSJeff Kirsher 
138ec21e2ecSJeff Kirsher 	baddr = &regs->rbase0;
139ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_rx_queues; i++) {
140ec21e2ecSJeff Kirsher 		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
141ec21e2ecSJeff Kirsher 		baddr += 2;
142ec21e2ecSJeff Kirsher 	}
143ec21e2ecSJeff Kirsher }
144ec21e2ecSJeff Kirsher 
14545b679c9SMatei Pavaluca static void gfar_init_rqprm(struct gfar_private *priv)
14645b679c9SMatei Pavaluca {
14745b679c9SMatei Pavaluca 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
14845b679c9SMatei Pavaluca 	u32 __iomem *baddr;
14945b679c9SMatei Pavaluca 	int i;
15045b679c9SMatei Pavaluca 
15145b679c9SMatei Pavaluca 	baddr = &regs->rqprm0;
15245b679c9SMatei Pavaluca 	for (i = 0; i < priv->num_rx_queues; i++) {
15345b679c9SMatei Pavaluca 		gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
15445b679c9SMatei Pavaluca 			   (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
15545b679c9SMatei Pavaluca 		baddr++;
15645b679c9SMatei Pavaluca 	}
15745b679c9SMatei Pavaluca }
15845b679c9SMatei Pavaluca 
15975354148SClaudiu Manoil static void gfar_rx_offload_en(struct gfar_private *priv)
16088302648SClaudiu Manoil {
16188302648SClaudiu Manoil 	/* set this when rx hw offload (TOE) functions are being used */
16288302648SClaudiu Manoil 	priv->uses_rxfcb = 0;
16388302648SClaudiu Manoil 
16488302648SClaudiu Manoil 	if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
16588302648SClaudiu Manoil 		priv->uses_rxfcb = 1;
16688302648SClaudiu Manoil 
16715bf176dSClaudiu Manoil 	if (priv->hwts_rx_en || priv->rx_filer_enable)
16888302648SClaudiu Manoil 		priv->uses_rxfcb = 1;
16988302648SClaudiu Manoil }
17088302648SClaudiu Manoil 
171a328ac92SClaudiu Manoil static void gfar_mac_rx_config(struct gfar_private *priv)
172ec21e2ecSJeff Kirsher {
173ec21e2ecSJeff Kirsher 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
174ec21e2ecSJeff Kirsher 	u32 rctrl = 0;
175ec21e2ecSJeff Kirsher 
176ec21e2ecSJeff Kirsher 	if (priv->rx_filer_enable) {
17715bf176dSClaudiu Manoil 		rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
178ec21e2ecSJeff Kirsher 		/* Program the RIR0 reg with the required distribution */
17971ff9e3dSClaudiu Manoil 		if (priv->poll_mode == GFAR_SQ_POLLING)
18071ff9e3dSClaudiu Manoil 			gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
18171ff9e3dSClaudiu Manoil 		else /* GFAR_MQ_POLLING */
18271ff9e3dSClaudiu Manoil 			gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
183ec21e2ecSJeff Kirsher 	}
184ec21e2ecSJeff Kirsher 
185f5ae6279SClaudiu Manoil 	/* Restore PROMISC mode */
186a328ac92SClaudiu Manoil 	if (priv->ndev->flags & IFF_PROMISC)
187f5ae6279SClaudiu Manoil 		rctrl |= RCTRL_PROM;
188f5ae6279SClaudiu Manoil 
18988302648SClaudiu Manoil 	if (priv->ndev->features & NETIF_F_RXCSUM)
190ec21e2ecSJeff Kirsher 		rctrl |= RCTRL_CHECKSUMMING;
191ec21e2ecSJeff Kirsher 
19288302648SClaudiu Manoil 	if (priv->extended_hash)
19388302648SClaudiu Manoil 		rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
194ec21e2ecSJeff Kirsher 
195ec21e2ecSJeff Kirsher 	if (priv->padding) {
196ec21e2ecSJeff Kirsher 		rctrl &= ~RCTRL_PAL_MASK;
197ec21e2ecSJeff Kirsher 		rctrl |= RCTRL_PADDING(priv->padding);
198ec21e2ecSJeff Kirsher 	}
199ec21e2ecSJeff Kirsher 
200ec21e2ecSJeff Kirsher 	/* Enable HW time stamping if requested from user space */
20188302648SClaudiu Manoil 	if (priv->hwts_rx_en)
202ec21e2ecSJeff Kirsher 		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
203ec21e2ecSJeff Kirsher 
20488302648SClaudiu Manoil 	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
205ec21e2ecSJeff Kirsher 		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
206ec21e2ecSJeff Kirsher 
20745b679c9SMatei Pavaluca 	/* Clear the LFC bit */
20845b679c9SMatei Pavaluca 	gfar_write(&regs->rctrl, rctrl);
20945b679c9SMatei Pavaluca 	/* Init flow control threshold values */
21045b679c9SMatei Pavaluca 	gfar_init_rqprm(priv);
21145b679c9SMatei Pavaluca 	gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
21245b679c9SMatei Pavaluca 	rctrl |= RCTRL_LFC;
21345b679c9SMatei Pavaluca 
214ec21e2ecSJeff Kirsher 	/* Init rctrl based on our settings */
215ec21e2ecSJeff Kirsher 	gfar_write(&regs->rctrl, rctrl);
216a328ac92SClaudiu Manoil }
217ec21e2ecSJeff Kirsher 
218a328ac92SClaudiu Manoil static void gfar_mac_tx_config(struct gfar_private *priv)
219a328ac92SClaudiu Manoil {
220a328ac92SClaudiu Manoil 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
221a328ac92SClaudiu Manoil 	u32 tctrl = 0;
222a328ac92SClaudiu Manoil 
223a328ac92SClaudiu Manoil 	if (priv->ndev->features & NETIF_F_IP_CSUM)
224ec21e2ecSJeff Kirsher 		tctrl |= TCTRL_INIT_CSUM;
225ec21e2ecSJeff Kirsher 
226b98b8babSClaudiu Manoil 	if (priv->prio_sched_en)
227ec21e2ecSJeff Kirsher 		tctrl |= TCTRL_TXSCHED_PRIO;
228b98b8babSClaudiu Manoil 	else {
229b98b8babSClaudiu Manoil 		tctrl |= TCTRL_TXSCHED_WRRS;
230b98b8babSClaudiu Manoil 		gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
231b98b8babSClaudiu Manoil 		gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
232b98b8babSClaudiu Manoil 	}
233ec21e2ecSJeff Kirsher 
23488302648SClaudiu Manoil 	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
23588302648SClaudiu Manoil 		tctrl |= TCTRL_VLINS;
23688302648SClaudiu Manoil 
237ec21e2ecSJeff Kirsher 	gfar_write(&regs->tctrl, tctrl);
238ec21e2ecSJeff Kirsher }
239ec21e2ecSJeff Kirsher 
240f19015baSClaudiu Manoil static void gfar_configure_coalescing(struct gfar_private *priv,
241f19015baSClaudiu Manoil 			       unsigned long tx_mask, unsigned long rx_mask)
242f19015baSClaudiu Manoil {
243f19015baSClaudiu Manoil 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
244f19015baSClaudiu Manoil 	u32 __iomem *baddr;
245f19015baSClaudiu Manoil 
246f19015baSClaudiu Manoil 	if (priv->mode == MQ_MG_MODE) {
247f19015baSClaudiu Manoil 		int i = 0;
248f19015baSClaudiu Manoil 
249f19015baSClaudiu Manoil 		baddr = &regs->txic0;
250f19015baSClaudiu Manoil 		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
251f19015baSClaudiu Manoil 			gfar_write(baddr + i, 0);
252f19015baSClaudiu Manoil 			if (likely(priv->tx_queue[i]->txcoalescing))
253f19015baSClaudiu Manoil 				gfar_write(baddr + i, priv->tx_queue[i]->txic);
254f19015baSClaudiu Manoil 		}
255f19015baSClaudiu Manoil 
256f19015baSClaudiu Manoil 		baddr = &regs->rxic0;
257f19015baSClaudiu Manoil 		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
258f19015baSClaudiu Manoil 			gfar_write(baddr + i, 0);
259f19015baSClaudiu Manoil 			if (likely(priv->rx_queue[i]->rxcoalescing))
260f19015baSClaudiu Manoil 				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
261f19015baSClaudiu Manoil 		}
262f19015baSClaudiu Manoil 	} else {
263f19015baSClaudiu Manoil 		/* Backward compatible case -- even if we enable
264f19015baSClaudiu Manoil 		 * multiple queues, there's only single reg to program
265f19015baSClaudiu Manoil 		 */
266f19015baSClaudiu Manoil 		gfar_write(&regs->txic, 0);
267f19015baSClaudiu Manoil 		if (likely(priv->tx_queue[0]->txcoalescing))
268f19015baSClaudiu Manoil 			gfar_write(&regs->txic, priv->tx_queue[0]->txic);
269f19015baSClaudiu Manoil 
270f19015baSClaudiu Manoil 		gfar_write(&regs->rxic, 0);
271f19015baSClaudiu Manoil 		if (unlikely(priv->rx_queue[0]->rxcoalescing))
272f19015baSClaudiu Manoil 			gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
273f19015baSClaudiu Manoil 	}
274f19015baSClaudiu Manoil }
275f19015baSClaudiu Manoil 
2767ad38784SArseny Solokha static void gfar_configure_coalescing_all(struct gfar_private *priv)
277f19015baSClaudiu Manoil {
278f19015baSClaudiu Manoil 	gfar_configure_coalescing(priv, 0xFF, 0xFF);
279f19015baSClaudiu Manoil }
280f19015baSClaudiu Manoil 
281ec21e2ecSJeff Kirsher static struct net_device_stats *gfar_get_stats(struct net_device *dev)
282ec21e2ecSJeff Kirsher {
283ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
284ec21e2ecSJeff Kirsher 	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
285ec21e2ecSJeff Kirsher 	unsigned long tx_packets = 0, tx_bytes = 0;
2863a2e16c8SJan Ceuleers 	int i;
287ec21e2ecSJeff Kirsher 
288ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_rx_queues; i++) {
289ec21e2ecSJeff Kirsher 		rx_packets += priv->rx_queue[i]->stats.rx_packets;
290ec21e2ecSJeff Kirsher 		rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
291ec21e2ecSJeff Kirsher 		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
292ec21e2ecSJeff Kirsher 	}
293ec21e2ecSJeff Kirsher 
294ec21e2ecSJeff Kirsher 	dev->stats.rx_packets = rx_packets;
295ec21e2ecSJeff Kirsher 	dev->stats.rx_bytes   = rx_bytes;
296ec21e2ecSJeff Kirsher 	dev->stats.rx_dropped = rx_dropped;
297ec21e2ecSJeff Kirsher 
298ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_tx_queues; i++) {
299ec21e2ecSJeff Kirsher 		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
300ec21e2ecSJeff Kirsher 		tx_packets += priv->tx_queue[i]->stats.tx_packets;
301ec21e2ecSJeff Kirsher 	}
302ec21e2ecSJeff Kirsher 
303ec21e2ecSJeff Kirsher 	dev->stats.tx_bytes   = tx_bytes;
304ec21e2ecSJeff Kirsher 	dev->stats.tx_packets = tx_packets;
305ec21e2ecSJeff Kirsher 
306ec21e2ecSJeff Kirsher 	return &dev->stats;
307ec21e2ecSJeff Kirsher }
308ec21e2ecSJeff Kirsher 
3097d993c5fSArseny Solokha /* Set the appropriate hash bit for the given addr */
3107d993c5fSArseny Solokha /* The algorithm works like so:
3117d993c5fSArseny Solokha  * 1) Take the Destination Address (ie the multicast address), and
3127d993c5fSArseny Solokha  * do a CRC on it (little endian), and reverse the bits of the
3137d993c5fSArseny Solokha  * result.
3147d993c5fSArseny Solokha  * 2) Use the 8 most significant bits as a hash into a 256-entry
3157d993c5fSArseny Solokha  * table.  The table is controlled through 8 32-bit registers:
3167d993c5fSArseny Solokha  * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
3177d993c5fSArseny Solokha  * gaddr7.  This means that the 3 most significant bits in the
3187d993c5fSArseny Solokha  * hash index which gaddr register to use, and the 5 other bits
3197d993c5fSArseny Solokha  * indicate which bit (assuming an IBM numbering scheme, which
3207d993c5fSArseny Solokha  * for PowerPC (tm) is usually the case) in the register holds
3217d993c5fSArseny Solokha  * the entry.
3227d993c5fSArseny Solokha  */
3237d993c5fSArseny Solokha static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3247d993c5fSArseny Solokha {
3257d993c5fSArseny Solokha 	u32 tempval;
3267d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
3277d993c5fSArseny Solokha 	u32 result = ether_crc(ETH_ALEN, addr);
3287d993c5fSArseny Solokha 	int width = priv->hash_width;
3297d993c5fSArseny Solokha 	u8 whichbit = (result >> (32 - width)) & 0x1f;
3307d993c5fSArseny Solokha 	u8 whichreg = result >> (32 - width + 5);
3317d993c5fSArseny Solokha 	u32 value = (1 << (31-whichbit));
3327d993c5fSArseny Solokha 
3337d993c5fSArseny Solokha 	tempval = gfar_read(priv->hash_regs[whichreg]);
3347d993c5fSArseny Solokha 	tempval |= value;
3357d993c5fSArseny Solokha 	gfar_write(priv->hash_regs[whichreg], tempval);
3367d993c5fSArseny Solokha }
3377d993c5fSArseny Solokha 
3387d993c5fSArseny Solokha /* There are multiple MAC Address register pairs on some controllers
3397d993c5fSArseny Solokha  * This function sets the numth pair to a given address
3407d993c5fSArseny Solokha  */
3417d993c5fSArseny Solokha static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3427d993c5fSArseny Solokha 				  const u8 *addr)
3437d993c5fSArseny Solokha {
3447d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
3457d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3467d993c5fSArseny Solokha 	u32 tempval;
3477d993c5fSArseny Solokha 	u32 __iomem *macptr = &regs->macstnaddr1;
3487d993c5fSArseny Solokha 
3497d993c5fSArseny Solokha 	macptr += num*2;
3507d993c5fSArseny Solokha 
3517d993c5fSArseny Solokha 	/* For a station address of 0x12345678ABCD in transmission
3527d993c5fSArseny Solokha 	 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
3537d993c5fSArseny Solokha 	 * MACnADDR2 is set to 0x34120000.
3547d993c5fSArseny Solokha 	 */
3557d993c5fSArseny Solokha 	tempval = (addr[5] << 24) | (addr[4] << 16) |
3567d993c5fSArseny Solokha 		  (addr[3] << 8)  |  addr[2];
3577d993c5fSArseny Solokha 
3587d993c5fSArseny Solokha 	gfar_write(macptr, tempval);
3597d993c5fSArseny Solokha 
3607d993c5fSArseny Solokha 	tempval = (addr[1] << 24) | (addr[0] << 16);
3617d993c5fSArseny Solokha 
3627d993c5fSArseny Solokha 	gfar_write(macptr+1, tempval);
3637d993c5fSArseny Solokha }
3647d993c5fSArseny Solokha 
3653d23a05cSClaudiu Manoil static int gfar_set_mac_addr(struct net_device *dev, void *p)
3663d23a05cSClaudiu Manoil {
3673d23a05cSClaudiu Manoil 	eth_mac_addr(dev, p);
3683d23a05cSClaudiu Manoil 
3693d23a05cSClaudiu Manoil 	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
3703d23a05cSClaudiu Manoil 
3713d23a05cSClaudiu Manoil 	return 0;
3723d23a05cSClaudiu Manoil }
3733d23a05cSClaudiu Manoil 
374efeddce7SClaudiu Manoil static void gfar_ints_disable(struct gfar_private *priv)
375efeddce7SClaudiu Manoil {
376efeddce7SClaudiu Manoil 	int i;
377efeddce7SClaudiu Manoil 	for (i = 0; i < priv->num_grps; i++) {
378efeddce7SClaudiu Manoil 		struct gfar __iomem *regs = priv->gfargrp[i].regs;
379efeddce7SClaudiu Manoil 		/* Clear IEVENT */
380efeddce7SClaudiu Manoil 		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
381efeddce7SClaudiu Manoil 
382efeddce7SClaudiu Manoil 		/* Initialize IMASK */
383efeddce7SClaudiu Manoil 		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
384efeddce7SClaudiu Manoil 	}
385efeddce7SClaudiu Manoil }
386efeddce7SClaudiu Manoil 
387efeddce7SClaudiu Manoil static void gfar_ints_enable(struct gfar_private *priv)
388efeddce7SClaudiu Manoil {
389efeddce7SClaudiu Manoil 	int i;
390efeddce7SClaudiu Manoil 	for (i = 0; i < priv->num_grps; i++) {
391efeddce7SClaudiu Manoil 		struct gfar __iomem *regs = priv->gfargrp[i].regs;
392efeddce7SClaudiu Manoil 		/* Unmask the interrupts we look for */
393efeddce7SClaudiu Manoil 		gfar_write(&regs->imask, IMASK_DEFAULT);
394efeddce7SClaudiu Manoil 	}
395efeddce7SClaudiu Manoil }
396efeddce7SClaudiu Manoil 
39720862788SClaudiu Manoil static int gfar_alloc_tx_queues(struct gfar_private *priv)
39820862788SClaudiu Manoil {
39920862788SClaudiu Manoil 	int i;
40020862788SClaudiu Manoil 
40120862788SClaudiu Manoil 	for (i = 0; i < priv->num_tx_queues; i++) {
40220862788SClaudiu Manoil 		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
40320862788SClaudiu Manoil 					    GFP_KERNEL);
40420862788SClaudiu Manoil 		if (!priv->tx_queue[i])
40520862788SClaudiu Manoil 			return -ENOMEM;
40620862788SClaudiu Manoil 
40720862788SClaudiu Manoil 		priv->tx_queue[i]->tx_skbuff = NULL;
40820862788SClaudiu Manoil 		priv->tx_queue[i]->qindex = i;
40920862788SClaudiu Manoil 		priv->tx_queue[i]->dev = priv->ndev;
41020862788SClaudiu Manoil 		spin_lock_init(&(priv->tx_queue[i]->txlock));
41120862788SClaudiu Manoil 	}
41220862788SClaudiu Manoil 	return 0;
41320862788SClaudiu Manoil }
41420862788SClaudiu Manoil 
41520862788SClaudiu Manoil static int gfar_alloc_rx_queues(struct gfar_private *priv)
41620862788SClaudiu Manoil {
41720862788SClaudiu Manoil 	int i;
41820862788SClaudiu Manoil 
41920862788SClaudiu Manoil 	for (i = 0; i < priv->num_rx_queues; i++) {
42020862788SClaudiu Manoil 		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
42120862788SClaudiu Manoil 					    GFP_KERNEL);
42220862788SClaudiu Manoil 		if (!priv->rx_queue[i])
42320862788SClaudiu Manoil 			return -ENOMEM;
42420862788SClaudiu Manoil 
42520862788SClaudiu Manoil 		priv->rx_queue[i]->qindex = i;
426f23223f1SClaudiu Manoil 		priv->rx_queue[i]->ndev = priv->ndev;
42720862788SClaudiu Manoil 	}
42820862788SClaudiu Manoil 	return 0;
42920862788SClaudiu Manoil }
43020862788SClaudiu Manoil 
43120862788SClaudiu Manoil static void gfar_free_tx_queues(struct gfar_private *priv)
432ec21e2ecSJeff Kirsher {
4333a2e16c8SJan Ceuleers 	int i;
434ec21e2ecSJeff Kirsher 
435ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_tx_queues; i++)
436ec21e2ecSJeff Kirsher 		kfree(priv->tx_queue[i]);
437ec21e2ecSJeff Kirsher }
438ec21e2ecSJeff Kirsher 
43920862788SClaudiu Manoil static void gfar_free_rx_queues(struct gfar_private *priv)
440ec21e2ecSJeff Kirsher {
4413a2e16c8SJan Ceuleers 	int i;
442ec21e2ecSJeff Kirsher 
443ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_rx_queues; i++)
444ec21e2ecSJeff Kirsher 		kfree(priv->rx_queue[i]);
445ec21e2ecSJeff Kirsher }
446ec21e2ecSJeff Kirsher 
447ec21e2ecSJeff Kirsher static void unmap_group_regs(struct gfar_private *priv)
448ec21e2ecSJeff Kirsher {
4493a2e16c8SJan Ceuleers 	int i;
450ec21e2ecSJeff Kirsher 
451ec21e2ecSJeff Kirsher 	for (i = 0; i < MAXGROUPS; i++)
452ec21e2ecSJeff Kirsher 		if (priv->gfargrp[i].regs)
453ec21e2ecSJeff Kirsher 			iounmap(priv->gfargrp[i].regs);
454ec21e2ecSJeff Kirsher }
455ec21e2ecSJeff Kirsher 
456ee873fdaSClaudiu Manoil static void free_gfar_dev(struct gfar_private *priv)
457ee873fdaSClaudiu Manoil {
458ee873fdaSClaudiu Manoil 	int i, j;
459ee873fdaSClaudiu Manoil 
460ee873fdaSClaudiu Manoil 	for (i = 0; i < priv->num_grps; i++)
461ee873fdaSClaudiu Manoil 		for (j = 0; j < GFAR_NUM_IRQS; j++) {
462ee873fdaSClaudiu Manoil 			kfree(priv->gfargrp[i].irqinfo[j]);
463ee873fdaSClaudiu Manoil 			priv->gfargrp[i].irqinfo[j] = NULL;
464ee873fdaSClaudiu Manoil 		}
465ee873fdaSClaudiu Manoil 
466ee873fdaSClaudiu Manoil 	free_netdev(priv->ndev);
467ee873fdaSClaudiu Manoil }
468ee873fdaSClaudiu Manoil 
469ec21e2ecSJeff Kirsher static void disable_napi(struct gfar_private *priv)
470ec21e2ecSJeff Kirsher {
4713a2e16c8SJan Ceuleers 	int i;
472ec21e2ecSJeff Kirsher 
473aeb12c5eSClaudiu Manoil 	for (i = 0; i < priv->num_grps; i++) {
474aeb12c5eSClaudiu Manoil 		napi_disable(&priv->gfargrp[i].napi_rx);
475aeb12c5eSClaudiu Manoil 		napi_disable(&priv->gfargrp[i].napi_tx);
476aeb12c5eSClaudiu Manoil 	}
477ec21e2ecSJeff Kirsher }
478ec21e2ecSJeff Kirsher 
479ec21e2ecSJeff Kirsher static void enable_napi(struct gfar_private *priv)
480ec21e2ecSJeff Kirsher {
4813a2e16c8SJan Ceuleers 	int i;
482ec21e2ecSJeff Kirsher 
483aeb12c5eSClaudiu Manoil 	for (i = 0; i < priv->num_grps; i++) {
484aeb12c5eSClaudiu Manoil 		napi_enable(&priv->gfargrp[i].napi_rx);
485aeb12c5eSClaudiu Manoil 		napi_enable(&priv->gfargrp[i].napi_tx);
486aeb12c5eSClaudiu Manoil 	}
487ec21e2ecSJeff Kirsher }
488ec21e2ecSJeff Kirsher 
489ec21e2ecSJeff Kirsher static int gfar_parse_group(struct device_node *np,
490ec21e2ecSJeff Kirsher 			    struct gfar_private *priv, const char *model)
491ec21e2ecSJeff Kirsher {
4925fedcc14SClaudiu Manoil 	struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
493ee873fdaSClaudiu Manoil 	int i;
494ee873fdaSClaudiu Manoil 
495ee873fdaSClaudiu Manoil 	for (i = 0; i < GFAR_NUM_IRQS; i++) {
496ee873fdaSClaudiu Manoil 		grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
497ee873fdaSClaudiu Manoil 					  GFP_KERNEL);
498ee873fdaSClaudiu Manoil 		if (!grp->irqinfo[i])
499ee873fdaSClaudiu Manoil 			return -ENOMEM;
500ee873fdaSClaudiu Manoil 	}
501ec21e2ecSJeff Kirsher 
5025fedcc14SClaudiu Manoil 	grp->regs = of_iomap(np, 0);
5035fedcc14SClaudiu Manoil 	if (!grp->regs)
504ec21e2ecSJeff Kirsher 		return -ENOMEM;
505ec21e2ecSJeff Kirsher 
506ee873fdaSClaudiu Manoil 	gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
507ec21e2ecSJeff Kirsher 
508ec21e2ecSJeff Kirsher 	/* If we aren't the FEC we have multiple interrupts */
509ec21e2ecSJeff Kirsher 	if (model && strcasecmp(model, "FEC")) {
510ee873fdaSClaudiu Manoil 		gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
511ee873fdaSClaudiu Manoil 		gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
512fea0f665SMark Brown 		if (!gfar_irq(grp, TX)->irq ||
513fea0f665SMark Brown 		    !gfar_irq(grp, RX)->irq ||
514fea0f665SMark Brown 		    !gfar_irq(grp, ER)->irq)
515ec21e2ecSJeff Kirsher 			return -EINVAL;
516ec21e2ecSJeff Kirsher 	}
517ec21e2ecSJeff Kirsher 
5185fedcc14SClaudiu Manoil 	grp->priv = priv;
5195fedcc14SClaudiu Manoil 	spin_lock_init(&grp->grplock);
520ec21e2ecSJeff Kirsher 	if (priv->mode == MQ_MG_MODE) {
52155917641SJingchang Lu 		u32 rxq_mask, txq_mask;
52255917641SJingchang Lu 		int ret;
52355917641SJingchang Lu 
52455917641SJingchang Lu 		grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
52555917641SJingchang Lu 		grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
52655917641SJingchang Lu 
52755917641SJingchang Lu 		ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
52855917641SJingchang Lu 		if (!ret) {
52955917641SJingchang Lu 			grp->rx_bit_map = rxq_mask ?
53055917641SJingchang Lu 			rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
53155917641SJingchang Lu 		}
53255917641SJingchang Lu 
53355917641SJingchang Lu 		ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
53455917641SJingchang Lu 		if (!ret) {
53555917641SJingchang Lu 			grp->tx_bit_map = txq_mask ?
53655917641SJingchang Lu 			txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
53755917641SJingchang Lu 		}
53871ff9e3dSClaudiu Manoil 
53971ff9e3dSClaudiu Manoil 		if (priv->poll_mode == GFAR_SQ_POLLING) {
54071ff9e3dSClaudiu Manoil 			/* One Q per interrupt group: Q0 to G0, Q1 to G1 */
54171ff9e3dSClaudiu Manoil 			grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
54271ff9e3dSClaudiu Manoil 			grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
54371ff9e3dSClaudiu Manoil 		}
544ec21e2ecSJeff Kirsher 	} else {
5455fedcc14SClaudiu Manoil 		grp->rx_bit_map = 0xFF;
5465fedcc14SClaudiu Manoil 		grp->tx_bit_map = 0xFF;
547ec21e2ecSJeff Kirsher 	}
54820862788SClaudiu Manoil 
54920862788SClaudiu Manoil 	/* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
55020862788SClaudiu Manoil 	 * right to left, so we need to revert the 8 bits to get the q index
55120862788SClaudiu Manoil 	 */
55220862788SClaudiu Manoil 	grp->rx_bit_map = bitrev8(grp->rx_bit_map);
55320862788SClaudiu Manoil 	grp->tx_bit_map = bitrev8(grp->tx_bit_map);
55420862788SClaudiu Manoil 
55520862788SClaudiu Manoil 	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
55620862788SClaudiu Manoil 	 * also assign queues to groups
55720862788SClaudiu Manoil 	 */
55820862788SClaudiu Manoil 	for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
55971ff9e3dSClaudiu Manoil 		if (!grp->rx_queue)
56071ff9e3dSClaudiu Manoil 			grp->rx_queue = priv->rx_queue[i];
56120862788SClaudiu Manoil 		grp->num_rx_queues++;
56220862788SClaudiu Manoil 		grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
56320862788SClaudiu Manoil 		priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
56420862788SClaudiu Manoil 		priv->rx_queue[i]->grp = grp;
56520862788SClaudiu Manoil 	}
56620862788SClaudiu Manoil 
56720862788SClaudiu Manoil 	for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
56871ff9e3dSClaudiu Manoil 		if (!grp->tx_queue)
56971ff9e3dSClaudiu Manoil 			grp->tx_queue = priv->tx_queue[i];
57020862788SClaudiu Manoil 		grp->num_tx_queues++;
57120862788SClaudiu Manoil 		grp->tstat |= (TSTAT_CLEAR_THALT >> i);
57220862788SClaudiu Manoil 		priv->tqueue |= (TQUEUE_EN0 >> i);
57320862788SClaudiu Manoil 		priv->tx_queue[i]->grp = grp;
57420862788SClaudiu Manoil 	}
57520862788SClaudiu Manoil 
576ec21e2ecSJeff Kirsher 	priv->num_grps++;
577ec21e2ecSJeff Kirsher 
578ec21e2ecSJeff Kirsher 	return 0;
579ec21e2ecSJeff Kirsher }
580ec21e2ecSJeff Kirsher 
581f50724cdSTobias Waldekranz static int gfar_of_group_count(struct device_node *np)
582f50724cdSTobias Waldekranz {
583f50724cdSTobias Waldekranz 	struct device_node *child;
584f50724cdSTobias Waldekranz 	int num = 0;
585f50724cdSTobias Waldekranz 
586f50724cdSTobias Waldekranz 	for_each_available_child_of_node(np, child)
587bf5849f1SRob Herring 		if (of_node_name_eq(child, "queue-group"))
588f50724cdSTobias Waldekranz 			num++;
589f50724cdSTobias Waldekranz 
590f50724cdSTobias Waldekranz 	return num;
591f50724cdSTobias Waldekranz }
592f50724cdSTobias Waldekranz 
5937d993c5fSArseny Solokha /* Reads the controller's registers to determine what interface
5947d993c5fSArseny Solokha  * connects it to the PHY.
5957d993c5fSArseny Solokha  */
5967d993c5fSArseny Solokha static phy_interface_t gfar_get_interface(struct net_device *dev)
5977d993c5fSArseny Solokha {
5987d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
5997d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
6007d993c5fSArseny Solokha 	u32 ecntrl;
6017d993c5fSArseny Solokha 
6027d993c5fSArseny Solokha 	ecntrl = gfar_read(&regs->ecntrl);
6037d993c5fSArseny Solokha 
6047d993c5fSArseny Solokha 	if (ecntrl & ECNTRL_SGMII_MODE)
6057d993c5fSArseny Solokha 		return PHY_INTERFACE_MODE_SGMII;
6067d993c5fSArseny Solokha 
6077d993c5fSArseny Solokha 	if (ecntrl & ECNTRL_TBI_MODE) {
6087d993c5fSArseny Solokha 		if (ecntrl & ECNTRL_REDUCED_MODE)
6097d993c5fSArseny Solokha 			return PHY_INTERFACE_MODE_RTBI;
6107d993c5fSArseny Solokha 		else
6117d993c5fSArseny Solokha 			return PHY_INTERFACE_MODE_TBI;
6127d993c5fSArseny Solokha 	}
6137d993c5fSArseny Solokha 
6147d993c5fSArseny Solokha 	if (ecntrl & ECNTRL_REDUCED_MODE) {
6157d993c5fSArseny Solokha 		if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
6167d993c5fSArseny Solokha 			return PHY_INTERFACE_MODE_RMII;
6177d993c5fSArseny Solokha 		}
6187d993c5fSArseny Solokha 		else {
6197d993c5fSArseny Solokha 			phy_interface_t interface = priv->interface;
6207d993c5fSArseny Solokha 
6217d993c5fSArseny Solokha 			/* This isn't autodetected right now, so it must
6227d993c5fSArseny Solokha 			 * be set by the device tree or platform code.
6237d993c5fSArseny Solokha 			 */
6247d993c5fSArseny Solokha 			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
6257d993c5fSArseny Solokha 				return PHY_INTERFACE_MODE_RGMII_ID;
6267d993c5fSArseny Solokha 
6277d993c5fSArseny Solokha 			return PHY_INTERFACE_MODE_RGMII;
6287d993c5fSArseny Solokha 		}
6297d993c5fSArseny Solokha 	}
6307d993c5fSArseny Solokha 
6317d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
6327d993c5fSArseny Solokha 		return PHY_INTERFACE_MODE_GMII;
6337d993c5fSArseny Solokha 
6347d993c5fSArseny Solokha 	return PHY_INTERFACE_MODE_MII;
6357d993c5fSArseny Solokha }
6367d993c5fSArseny Solokha 
637ec21e2ecSJeff Kirsher static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
638ec21e2ecSJeff Kirsher {
639ec21e2ecSJeff Kirsher 	const char *model;
640ec21e2ecSJeff Kirsher 	const void *mac_addr;
641ec21e2ecSJeff Kirsher 	int err = 0, i;
6420c65b2b9SAndrew Lunn 	phy_interface_t interface;
643ec21e2ecSJeff Kirsher 	struct net_device *dev = NULL;
644ec21e2ecSJeff Kirsher 	struct gfar_private *priv = NULL;
645ec21e2ecSJeff Kirsher 	struct device_node *np = ofdev->dev.of_node;
646ec21e2ecSJeff Kirsher 	struct device_node *child = NULL;
64755917641SJingchang Lu 	u32 stash_len = 0;
64855917641SJingchang Lu 	u32 stash_idx = 0;
649ec21e2ecSJeff Kirsher 	unsigned int num_tx_qs, num_rx_qs;
650b338ce27SClaudiu Manoil 	unsigned short mode, poll_mode;
651ec21e2ecSJeff Kirsher 
6524b222ca6SKevin Hao 	if (!np)
653ec21e2ecSJeff Kirsher 		return -ENODEV;
654ec21e2ecSJeff Kirsher 
655b338ce27SClaudiu Manoil 	if (of_device_is_compatible(np, "fsl,etsec2")) {
656b338ce27SClaudiu Manoil 		mode = MQ_MG_MODE;
657b338ce27SClaudiu Manoil 		poll_mode = GFAR_SQ_POLLING;
658b338ce27SClaudiu Manoil 	} else {
659b338ce27SClaudiu Manoil 		mode = SQ_SG_MODE;
660b338ce27SClaudiu Manoil 		poll_mode = GFAR_SQ_POLLING;
661b338ce27SClaudiu Manoil 	}
662b338ce27SClaudiu Manoil 
663b338ce27SClaudiu Manoil 	if (mode == SQ_SG_MODE) {
66471ff9e3dSClaudiu Manoil 		num_tx_qs = 1;
66571ff9e3dSClaudiu Manoil 		num_rx_qs = 1;
66671ff9e3dSClaudiu Manoil 	} else { /* MQ_MG_MODE */
667c65d7533SClaudiu Manoil 		/* get the actual number of supported groups */
668f50724cdSTobias Waldekranz 		unsigned int num_grps = gfar_of_group_count(np);
669c65d7533SClaudiu Manoil 
670c65d7533SClaudiu Manoil 		if (num_grps == 0 || num_grps > MAXGROUPS) {
671c65d7533SClaudiu Manoil 			dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
672c65d7533SClaudiu Manoil 				num_grps);
673c65d7533SClaudiu Manoil 			pr_err("Cannot do alloc_etherdev, aborting\n");
674c65d7533SClaudiu Manoil 			return -EINVAL;
675c65d7533SClaudiu Manoil 		}
676c65d7533SClaudiu Manoil 
677b338ce27SClaudiu Manoil 		if (poll_mode == GFAR_SQ_POLLING) {
678c65d7533SClaudiu Manoil 			num_tx_qs = num_grps; /* one txq per int group */
679c65d7533SClaudiu Manoil 			num_rx_qs = num_grps; /* one rxq per int group */
68071ff9e3dSClaudiu Manoil 		} else { /* GFAR_MQ_POLLING */
68155917641SJingchang Lu 			u32 tx_queues, rx_queues;
68255917641SJingchang Lu 			int ret;
68355917641SJingchang Lu 
68455917641SJingchang Lu 			/* parse the num of HW tx and rx queues */
68555917641SJingchang Lu 			ret = of_property_read_u32(np, "fsl,num_tx_queues",
68655917641SJingchang Lu 						   &tx_queues);
68755917641SJingchang Lu 			num_tx_qs = ret ? 1 : tx_queues;
68855917641SJingchang Lu 
68955917641SJingchang Lu 			ret = of_property_read_u32(np, "fsl,num_rx_queues",
69055917641SJingchang Lu 						   &rx_queues);
69155917641SJingchang Lu 			num_rx_qs = ret ? 1 : rx_queues;
69271ff9e3dSClaudiu Manoil 		}
69371ff9e3dSClaudiu Manoil 	}
694ec21e2ecSJeff Kirsher 
695ec21e2ecSJeff Kirsher 	if (num_tx_qs > MAX_TX_QS) {
696ec21e2ecSJeff Kirsher 		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
697ec21e2ecSJeff Kirsher 		       num_tx_qs, MAX_TX_QS);
698ec21e2ecSJeff Kirsher 		pr_err("Cannot do alloc_etherdev, aborting\n");
699ec21e2ecSJeff Kirsher 		return -EINVAL;
700ec21e2ecSJeff Kirsher 	}
701ec21e2ecSJeff Kirsher 
702ec21e2ecSJeff Kirsher 	if (num_rx_qs > MAX_RX_QS) {
703ec21e2ecSJeff Kirsher 		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
704ec21e2ecSJeff Kirsher 		       num_rx_qs, MAX_RX_QS);
705ec21e2ecSJeff Kirsher 		pr_err("Cannot do alloc_etherdev, aborting\n");
706ec21e2ecSJeff Kirsher 		return -EINVAL;
707ec21e2ecSJeff Kirsher 	}
708ec21e2ecSJeff Kirsher 
709ec21e2ecSJeff Kirsher 	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
710ec21e2ecSJeff Kirsher 	dev = *pdev;
711ec21e2ecSJeff Kirsher 	if (NULL == dev)
712ec21e2ecSJeff Kirsher 		return -ENOMEM;
713ec21e2ecSJeff Kirsher 
714ec21e2ecSJeff Kirsher 	priv = netdev_priv(dev);
715ec21e2ecSJeff Kirsher 	priv->ndev = dev;
716ec21e2ecSJeff Kirsher 
717b338ce27SClaudiu Manoil 	priv->mode = mode;
718b338ce27SClaudiu Manoil 	priv->poll_mode = poll_mode;
719b338ce27SClaudiu Manoil 
720ec21e2ecSJeff Kirsher 	priv->num_tx_queues = num_tx_qs;
721ec21e2ecSJeff Kirsher 	netif_set_real_num_rx_queues(dev, num_rx_qs);
722ec21e2ecSJeff Kirsher 	priv->num_rx_queues = num_rx_qs;
72320862788SClaudiu Manoil 
72420862788SClaudiu Manoil 	err = gfar_alloc_tx_queues(priv);
72520862788SClaudiu Manoil 	if (err)
72620862788SClaudiu Manoil 		goto tx_alloc_failed;
72720862788SClaudiu Manoil 
72820862788SClaudiu Manoil 	err = gfar_alloc_rx_queues(priv);
72920862788SClaudiu Manoil 	if (err)
73020862788SClaudiu Manoil 		goto rx_alloc_failed;
731ec21e2ecSJeff Kirsher 
73255917641SJingchang Lu 	err = of_property_read_string(np, "model", &model);
73355917641SJingchang Lu 	if (err) {
73455917641SJingchang Lu 		pr_err("Device model property missing, aborting\n");
73555917641SJingchang Lu 		goto rx_alloc_failed;
73655917641SJingchang Lu 	}
73755917641SJingchang Lu 
738ec21e2ecSJeff Kirsher 	/* Init Rx queue filer rule set linked list */
739ec21e2ecSJeff Kirsher 	INIT_LIST_HEAD(&priv->rx_list.list);
740ec21e2ecSJeff Kirsher 	priv->rx_list.count = 0;
741ec21e2ecSJeff Kirsher 	mutex_init(&priv->rx_queue_access);
742ec21e2ecSJeff Kirsher 
743ec21e2ecSJeff Kirsher 	for (i = 0; i < MAXGROUPS; i++)
744ec21e2ecSJeff Kirsher 		priv->gfargrp[i].regs = NULL;
745ec21e2ecSJeff Kirsher 
746ec21e2ecSJeff Kirsher 	/* Parse and initialize group specific information */
747b338ce27SClaudiu Manoil 	if (priv->mode == MQ_MG_MODE) {
748f50724cdSTobias Waldekranz 		for_each_available_child_of_node(np, child) {
749bf5849f1SRob Herring 			if (!of_node_name_eq(child, "queue-group"))
750f50724cdSTobias Waldekranz 				continue;
751f50724cdSTobias Waldekranz 
752ec21e2ecSJeff Kirsher 			err = gfar_parse_group(child, priv, model);
753ec21e2ecSJeff Kirsher 			if (err)
754ec21e2ecSJeff Kirsher 				goto err_grp_init;
755ec21e2ecSJeff Kirsher 		}
756b338ce27SClaudiu Manoil 	} else { /* SQ_SG_MODE */
757ec21e2ecSJeff Kirsher 		err = gfar_parse_group(np, priv, model);
758ec21e2ecSJeff Kirsher 		if (err)
759ec21e2ecSJeff Kirsher 			goto err_grp_init;
760ec21e2ecSJeff Kirsher 	}
761ec21e2ecSJeff Kirsher 
7623f8c0f7eSSaurabh Sengar 	if (of_property_read_bool(np, "bd-stash")) {
763ec21e2ecSJeff Kirsher 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
764ec21e2ecSJeff Kirsher 		priv->bd_stash_en = 1;
765ec21e2ecSJeff Kirsher 	}
766ec21e2ecSJeff Kirsher 
76755917641SJingchang Lu 	err = of_property_read_u32(np, "rx-stash-len", &stash_len);
768ec21e2ecSJeff Kirsher 
76955917641SJingchang Lu 	if (err == 0)
77055917641SJingchang Lu 		priv->rx_stash_size = stash_len;
771ec21e2ecSJeff Kirsher 
77255917641SJingchang Lu 	err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
773ec21e2ecSJeff Kirsher 
77455917641SJingchang Lu 	if (err == 0)
77555917641SJingchang Lu 		priv->rx_stash_index = stash_idx;
776ec21e2ecSJeff Kirsher 
777ec21e2ecSJeff Kirsher 	if (stash_len || stash_idx)
778ec21e2ecSJeff Kirsher 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
779ec21e2ecSJeff Kirsher 
780ec21e2ecSJeff Kirsher 	mac_addr = of_get_mac_address(np);
781bc4598bcSJan Ceuleers 
782*ff021f22SMaxim Kochetkov 	if (!IS_ERR(mac_addr)) {
7832d2924afSPetr Štetiar 		ether_addr_copy(dev->dev_addr, mac_addr);
784*ff021f22SMaxim Kochetkov 	} else {
785*ff021f22SMaxim Kochetkov 		eth_hw_addr_random(dev);
786*ff021f22SMaxim Kochetkov 		dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
787*ff021f22SMaxim Kochetkov 	}
788ec21e2ecSJeff Kirsher 
789ec21e2ecSJeff Kirsher 	if (model && !strcasecmp(model, "TSEC"))
79034018fd4SClaudiu Manoil 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
791ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_COALESCE |
792ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_RMON |
793ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_MULTI_INTR;
794bc4598bcSJan Ceuleers 
795ec21e2ecSJeff Kirsher 	if (model && !strcasecmp(model, "eTSEC"))
79634018fd4SClaudiu Manoil 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
797ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_COALESCE |
798ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_RMON |
799ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
800ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_CSUM |
801ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_VLAN |
802ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
803ec21e2ecSJeff Kirsher 				     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
8047bff47daSHamish Martin 				     FSL_GIANFAR_DEV_HAS_TIMER |
8057bff47daSHamish Martin 				     FSL_GIANFAR_DEV_HAS_RX_FILER;
806ec21e2ecSJeff Kirsher 
8078e578e73SArseny Solokha 	/* Use PHY connection type from the DT node if one is specified there.
8088e578e73SArseny Solokha 	 * rgmii-id really needs to be specified. Other types can be
8098e578e73SArseny Solokha 	 * detected by hardware
8108e578e73SArseny Solokha 	 */
8110c65b2b9SAndrew Lunn 	err = of_get_phy_mode(np, &interface);
8120c65b2b9SAndrew Lunn 	if (!err)
8130c65b2b9SAndrew Lunn 		priv->interface = interface;
814ec21e2ecSJeff Kirsher 	else
8158e578e73SArseny Solokha 		priv->interface = gfar_get_interface(dev);
816ec21e2ecSJeff Kirsher 
81755917641SJingchang Lu 	if (of_find_property(np, "fsl,magic-packet", NULL))
818ec21e2ecSJeff Kirsher 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
819ec21e2ecSJeff Kirsher 
8203e905b80SClaudiu Manoil 	if (of_get_property(np, "fsl,wake-on-filer", NULL))
8213e905b80SClaudiu Manoil 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
8223e905b80SClaudiu Manoil 
823ec21e2ecSJeff Kirsher 	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
824ec21e2ecSJeff Kirsher 
825be403645SFlorian Fainelli 	/* In the case of a fixed PHY, the DT node associated
826be403645SFlorian Fainelli 	 * to the PHY is the Ethernet MAC DT node.
827be403645SFlorian Fainelli 	 */
8286f2c9bd8SUwe Kleine-König 	if (!priv->phy_node && of_phy_is_fixed_link(np)) {
829be403645SFlorian Fainelli 		err = of_phy_register_fixed_link(np);
830be403645SFlorian Fainelli 		if (err)
831be403645SFlorian Fainelli 			goto err_grp_init;
832be403645SFlorian Fainelli 
8336f2c9bd8SUwe Kleine-König 		priv->phy_node = of_node_get(np);
834be403645SFlorian Fainelli 	}
835be403645SFlorian Fainelli 
836ec21e2ecSJeff Kirsher 	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
837ec21e2ecSJeff Kirsher 	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
838ec21e2ecSJeff Kirsher 
839ec21e2ecSJeff Kirsher 	return 0;
840ec21e2ecSJeff Kirsher 
841ec21e2ecSJeff Kirsher err_grp_init:
842ec21e2ecSJeff Kirsher 	unmap_group_regs(priv);
84320862788SClaudiu Manoil rx_alloc_failed:
84420862788SClaudiu Manoil 	gfar_free_rx_queues(priv);
84520862788SClaudiu Manoil tx_alloc_failed:
84620862788SClaudiu Manoil 	gfar_free_tx_queues(priv);
847ee873fdaSClaudiu Manoil 	free_gfar_dev(priv);
848ec21e2ecSJeff Kirsher 	return err;
849ec21e2ecSJeff Kirsher }
850ec21e2ecSJeff Kirsher 
851ec21e2ecSJeff Kirsher static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
852ec21e2ecSJeff Kirsher 				   u32 class)
853ec21e2ecSJeff Kirsher {
854ec21e2ecSJeff Kirsher 	u32 rqfpr = FPR_FILER_MASK;
855ec21e2ecSJeff Kirsher 	u32 rqfcr = 0x0;
856ec21e2ecSJeff Kirsher 
857ec21e2ecSJeff Kirsher 	rqfar--;
858ec21e2ecSJeff Kirsher 	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
859ec21e2ecSJeff Kirsher 	priv->ftp_rqfpr[rqfar] = rqfpr;
860ec21e2ecSJeff Kirsher 	priv->ftp_rqfcr[rqfar] = rqfcr;
861ec21e2ecSJeff Kirsher 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
862ec21e2ecSJeff Kirsher 
863ec21e2ecSJeff Kirsher 	rqfar--;
864ec21e2ecSJeff Kirsher 	rqfcr = RQFCR_CMP_NOMATCH;
865ec21e2ecSJeff Kirsher 	priv->ftp_rqfpr[rqfar] = rqfpr;
866ec21e2ecSJeff Kirsher 	priv->ftp_rqfcr[rqfar] = rqfcr;
867ec21e2ecSJeff Kirsher 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
868ec21e2ecSJeff Kirsher 
869ec21e2ecSJeff Kirsher 	rqfar--;
870ec21e2ecSJeff Kirsher 	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
871ec21e2ecSJeff Kirsher 	rqfpr = class;
872ec21e2ecSJeff Kirsher 	priv->ftp_rqfcr[rqfar] = rqfcr;
873ec21e2ecSJeff Kirsher 	priv->ftp_rqfpr[rqfar] = rqfpr;
874ec21e2ecSJeff Kirsher 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
875ec21e2ecSJeff Kirsher 
876ec21e2ecSJeff Kirsher 	rqfar--;
877ec21e2ecSJeff Kirsher 	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
878ec21e2ecSJeff Kirsher 	rqfpr = class;
879ec21e2ecSJeff Kirsher 	priv->ftp_rqfcr[rqfar] = rqfcr;
880ec21e2ecSJeff Kirsher 	priv->ftp_rqfpr[rqfar] = rqfpr;
881ec21e2ecSJeff Kirsher 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
882ec21e2ecSJeff Kirsher 
883ec21e2ecSJeff Kirsher 	return rqfar;
884ec21e2ecSJeff Kirsher }
885ec21e2ecSJeff Kirsher 
886ec21e2ecSJeff Kirsher static void gfar_init_filer_table(struct gfar_private *priv)
887ec21e2ecSJeff Kirsher {
888ec21e2ecSJeff Kirsher 	int i = 0x0;
889ec21e2ecSJeff Kirsher 	u32 rqfar = MAX_FILER_IDX;
890ec21e2ecSJeff Kirsher 	u32 rqfcr = 0x0;
891ec21e2ecSJeff Kirsher 	u32 rqfpr = FPR_FILER_MASK;
892ec21e2ecSJeff Kirsher 
893ec21e2ecSJeff Kirsher 	/* Default rule */
894ec21e2ecSJeff Kirsher 	rqfcr = RQFCR_CMP_MATCH;
895ec21e2ecSJeff Kirsher 	priv->ftp_rqfcr[rqfar] = rqfcr;
896ec21e2ecSJeff Kirsher 	priv->ftp_rqfpr[rqfar] = rqfpr;
897ec21e2ecSJeff Kirsher 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
898ec21e2ecSJeff Kirsher 
899ec21e2ecSJeff Kirsher 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
900ec21e2ecSJeff Kirsher 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
901ec21e2ecSJeff Kirsher 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
902ec21e2ecSJeff Kirsher 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
903ec21e2ecSJeff Kirsher 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
904ec21e2ecSJeff Kirsher 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
905ec21e2ecSJeff Kirsher 
906ec21e2ecSJeff Kirsher 	/* cur_filer_idx indicated the first non-masked rule */
907ec21e2ecSJeff Kirsher 	priv->cur_filer_idx = rqfar;
908ec21e2ecSJeff Kirsher 
909ec21e2ecSJeff Kirsher 	/* Rest are masked rules */
910ec21e2ecSJeff Kirsher 	rqfcr = RQFCR_CMP_NOMATCH;
911ec21e2ecSJeff Kirsher 	for (i = 0; i < rqfar; i++) {
912ec21e2ecSJeff Kirsher 		priv->ftp_rqfcr[i] = rqfcr;
913ec21e2ecSJeff Kirsher 		priv->ftp_rqfpr[i] = rqfpr;
914ec21e2ecSJeff Kirsher 		gfar_write_filer(priv, i, rqfcr, rqfpr);
915ec21e2ecSJeff Kirsher 	}
916ec21e2ecSJeff Kirsher }
917ec21e2ecSJeff Kirsher 
918d6ef0bccSClaudiu Manoil #ifdef CONFIG_PPC
9192969b1f7SClaudiu Manoil static void __gfar_detect_errata_83xx(struct gfar_private *priv)
920ec21e2ecSJeff Kirsher {
921ec21e2ecSJeff Kirsher 	unsigned int pvr = mfspr(SPRN_PVR);
922ec21e2ecSJeff Kirsher 	unsigned int svr = mfspr(SPRN_SVR);
923ec21e2ecSJeff Kirsher 	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
924ec21e2ecSJeff Kirsher 	unsigned int rev = svr & 0xffff;
925ec21e2ecSJeff Kirsher 
926ec21e2ecSJeff Kirsher 	/* MPC8313 Rev 2.0 and higher; All MPC837x */
927ec21e2ecSJeff Kirsher 	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
928ec21e2ecSJeff Kirsher 	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
929ec21e2ecSJeff Kirsher 		priv->errata |= GFAR_ERRATA_74;
930ec21e2ecSJeff Kirsher 
931ec21e2ecSJeff Kirsher 	/* MPC8313 and MPC837x all rev */
932ec21e2ecSJeff Kirsher 	if ((pvr == 0x80850010 && mod == 0x80b0) ||
933ec21e2ecSJeff Kirsher 	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
934ec21e2ecSJeff Kirsher 		priv->errata |= GFAR_ERRATA_76;
935ec21e2ecSJeff Kirsher 
9362969b1f7SClaudiu Manoil 	/* MPC8313 Rev < 2.0 */
9372969b1f7SClaudiu Manoil 	if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
938ec21e2ecSJeff Kirsher 		priv->errata |= GFAR_ERRATA_12;
9392969b1f7SClaudiu Manoil }
9402969b1f7SClaudiu Manoil 
9412969b1f7SClaudiu Manoil static void __gfar_detect_errata_85xx(struct gfar_private *priv)
9422969b1f7SClaudiu Manoil {
9432969b1f7SClaudiu Manoil 	unsigned int svr = mfspr(SPRN_SVR);
9442969b1f7SClaudiu Manoil 
9452969b1f7SClaudiu Manoil 	if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
9462969b1f7SClaudiu Manoil 		priv->errata |= GFAR_ERRATA_12;
9477bfc6082SAtsushi Nemoto 	/* P2020/P1010 Rev 1; MPC8548 Rev 2 */
94853fad773SClaudiu Manoil 	if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
9497bfc6082SAtsushi Nemoto 	    ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
9507bfc6082SAtsushi Nemoto 	    ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
95153fad773SClaudiu Manoil 		priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
9522969b1f7SClaudiu Manoil }
953d6ef0bccSClaudiu Manoil #endif
9542969b1f7SClaudiu Manoil 
9552969b1f7SClaudiu Manoil static void gfar_detect_errata(struct gfar_private *priv)
9562969b1f7SClaudiu Manoil {
9572969b1f7SClaudiu Manoil 	struct device *dev = &priv->ofdev->dev;
9582969b1f7SClaudiu Manoil 
9592969b1f7SClaudiu Manoil 	/* no plans to fix */
9602969b1f7SClaudiu Manoil 	priv->errata |= GFAR_ERRATA_A002;
9612969b1f7SClaudiu Manoil 
962d6ef0bccSClaudiu Manoil #ifdef CONFIG_PPC
9632969b1f7SClaudiu Manoil 	if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
9642969b1f7SClaudiu Manoil 		__gfar_detect_errata_85xx(priv);
9652969b1f7SClaudiu Manoil 	else /* non-mpc85xx parts, i.e. e300 core based */
9662969b1f7SClaudiu Manoil 		__gfar_detect_errata_83xx(priv);
967d6ef0bccSClaudiu Manoil #endif
968ec21e2ecSJeff Kirsher 
969ec21e2ecSJeff Kirsher 	if (priv->errata)
970ec21e2ecSJeff Kirsher 		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
971ec21e2ecSJeff Kirsher 			 priv->errata);
972ec21e2ecSJeff Kirsher }
973ec21e2ecSJeff Kirsher 
974898157edSXiubo Li static void gfar_init_addr_hash_table(struct gfar_private *priv)
97520862788SClaudiu Manoil {
97620862788SClaudiu Manoil 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
977ec21e2ecSJeff Kirsher 
978ec21e2ecSJeff Kirsher 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
979ec21e2ecSJeff Kirsher 		priv->extended_hash = 1;
980ec21e2ecSJeff Kirsher 		priv->hash_width = 9;
981ec21e2ecSJeff Kirsher 
982ec21e2ecSJeff Kirsher 		priv->hash_regs[0] = &regs->igaddr0;
983ec21e2ecSJeff Kirsher 		priv->hash_regs[1] = &regs->igaddr1;
984ec21e2ecSJeff Kirsher 		priv->hash_regs[2] = &regs->igaddr2;
985ec21e2ecSJeff Kirsher 		priv->hash_regs[3] = &regs->igaddr3;
986ec21e2ecSJeff Kirsher 		priv->hash_regs[4] = &regs->igaddr4;
987ec21e2ecSJeff Kirsher 		priv->hash_regs[5] = &regs->igaddr5;
988ec21e2ecSJeff Kirsher 		priv->hash_regs[6] = &regs->igaddr6;
989ec21e2ecSJeff Kirsher 		priv->hash_regs[7] = &regs->igaddr7;
990ec21e2ecSJeff Kirsher 		priv->hash_regs[8] = &regs->gaddr0;
991ec21e2ecSJeff Kirsher 		priv->hash_regs[9] = &regs->gaddr1;
992ec21e2ecSJeff Kirsher 		priv->hash_regs[10] = &regs->gaddr2;
993ec21e2ecSJeff Kirsher 		priv->hash_regs[11] = &regs->gaddr3;
994ec21e2ecSJeff Kirsher 		priv->hash_regs[12] = &regs->gaddr4;
995ec21e2ecSJeff Kirsher 		priv->hash_regs[13] = &regs->gaddr5;
996ec21e2ecSJeff Kirsher 		priv->hash_regs[14] = &regs->gaddr6;
997ec21e2ecSJeff Kirsher 		priv->hash_regs[15] = &regs->gaddr7;
998ec21e2ecSJeff Kirsher 
999ec21e2ecSJeff Kirsher 	} else {
1000ec21e2ecSJeff Kirsher 		priv->extended_hash = 0;
1001ec21e2ecSJeff Kirsher 		priv->hash_width = 8;
1002ec21e2ecSJeff Kirsher 
1003ec21e2ecSJeff Kirsher 		priv->hash_regs[0] = &regs->gaddr0;
1004ec21e2ecSJeff Kirsher 		priv->hash_regs[1] = &regs->gaddr1;
1005ec21e2ecSJeff Kirsher 		priv->hash_regs[2] = &regs->gaddr2;
1006ec21e2ecSJeff Kirsher 		priv->hash_regs[3] = &regs->gaddr3;
1007ec21e2ecSJeff Kirsher 		priv->hash_regs[4] = &regs->gaddr4;
1008ec21e2ecSJeff Kirsher 		priv->hash_regs[5] = &regs->gaddr5;
1009ec21e2ecSJeff Kirsher 		priv->hash_regs[6] = &regs->gaddr6;
1010ec21e2ecSJeff Kirsher 		priv->hash_regs[7] = &regs->gaddr7;
1011ec21e2ecSJeff Kirsher 	}
101220862788SClaudiu Manoil }
101320862788SClaudiu Manoil 
1014ec21e2ecSJeff Kirsher static int __gfar_is_rx_idle(struct gfar_private *priv)
1015ec21e2ecSJeff Kirsher {
1016ec21e2ecSJeff Kirsher 	u32 res;
1017ec21e2ecSJeff Kirsher 
10180977f817SJan Ceuleers 	/* Normaly TSEC should not hang on GRS commands, so we should
1019ec21e2ecSJeff Kirsher 	 * actually wait for IEVENT_GRSC flag.
1020ec21e2ecSJeff Kirsher 	 */
1021ad3660c2SClaudiu Manoil 	if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1022ec21e2ecSJeff Kirsher 		return 0;
1023ec21e2ecSJeff Kirsher 
10240977f817SJan Ceuleers 	/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1025ec21e2ecSJeff Kirsher 	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1026ec21e2ecSJeff Kirsher 	 * and the Rx can be safely reset.
1027ec21e2ecSJeff Kirsher 	 */
1028ec21e2ecSJeff Kirsher 	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1029ec21e2ecSJeff Kirsher 	res &= 0x7f807f80;
1030ec21e2ecSJeff Kirsher 	if ((res & 0xffff) == (res >> 16))
1031ec21e2ecSJeff Kirsher 		return 1;
1032ec21e2ecSJeff Kirsher 
1033ec21e2ecSJeff Kirsher 	return 0;
1034ec21e2ecSJeff Kirsher }
1035ec21e2ecSJeff Kirsher 
1036ec21e2ecSJeff Kirsher /* Halt the receive and transmit queues */
1037c10650b6SClaudiu Manoil static void gfar_halt_nodisable(struct gfar_private *priv)
1038ec21e2ecSJeff Kirsher {
1039efeddce7SClaudiu Manoil 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1040ec21e2ecSJeff Kirsher 	u32 tempval;
1041a4feee89SClaudiu Manoil 	unsigned int timeout;
1042a4feee89SClaudiu Manoil 	int stopped;
1043ec21e2ecSJeff Kirsher 
1044efeddce7SClaudiu Manoil 	gfar_ints_disable(priv);
1045ec21e2ecSJeff Kirsher 
1046a4feee89SClaudiu Manoil 	if (gfar_is_dma_stopped(priv))
1047a4feee89SClaudiu Manoil 		return;
1048a4feee89SClaudiu Manoil 
1049ec21e2ecSJeff Kirsher 	/* Stop the DMA, and wait for it to stop */
1050ec21e2ecSJeff Kirsher 	tempval = gfar_read(&regs->dmactrl);
1051ec21e2ecSJeff Kirsher 	tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1052ec21e2ecSJeff Kirsher 	gfar_write(&regs->dmactrl, tempval);
1053ec21e2ecSJeff Kirsher 
1054a4feee89SClaudiu Manoil retry:
1055a4feee89SClaudiu Manoil 	timeout = 1000;
1056a4feee89SClaudiu Manoil 	while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1057a4feee89SClaudiu Manoil 		cpu_relax();
1058a4feee89SClaudiu Manoil 		timeout--;
1059ec21e2ecSJeff Kirsher 	}
1060a4feee89SClaudiu Manoil 
1061a4feee89SClaudiu Manoil 	if (!timeout)
1062a4feee89SClaudiu Manoil 		stopped = gfar_is_dma_stopped(priv);
1063a4feee89SClaudiu Manoil 
1064a4feee89SClaudiu Manoil 	if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1065a4feee89SClaudiu Manoil 	    !__gfar_is_rx_idle(priv))
1066a4feee89SClaudiu Manoil 		goto retry;
1067ec21e2ecSJeff Kirsher }
1068ec21e2ecSJeff Kirsher 
1069ec21e2ecSJeff Kirsher /* Halt the receive and transmit queues */
10707ad38784SArseny Solokha static void gfar_halt(struct gfar_private *priv)
1071ec21e2ecSJeff Kirsher {
1072ec21e2ecSJeff Kirsher 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1073ec21e2ecSJeff Kirsher 	u32 tempval;
1074ec21e2ecSJeff Kirsher 
1075c10650b6SClaudiu Manoil 	/* Dissable the Rx/Tx hw queues */
1076c10650b6SClaudiu Manoil 	gfar_write(&regs->rqueue, 0);
1077c10650b6SClaudiu Manoil 	gfar_write(&regs->tqueue, 0);
1078ec21e2ecSJeff Kirsher 
1079c10650b6SClaudiu Manoil 	mdelay(10);
1080c10650b6SClaudiu Manoil 
1081c10650b6SClaudiu Manoil 	gfar_halt_nodisable(priv);
1082c10650b6SClaudiu Manoil 
1083c10650b6SClaudiu Manoil 	/* Disable Rx/Tx DMA */
1084ec21e2ecSJeff Kirsher 	tempval = gfar_read(&regs->maccfg1);
1085ec21e2ecSJeff Kirsher 	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1086ec21e2ecSJeff Kirsher 	gfar_write(&regs->maccfg1, tempval);
1087ec21e2ecSJeff Kirsher }
1088ec21e2ecSJeff Kirsher 
1089ec21e2ecSJeff Kirsher static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1090ec21e2ecSJeff Kirsher {
1091ec21e2ecSJeff Kirsher 	struct txbd8 *txbdp;
1092ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(tx_queue->dev);
1093ec21e2ecSJeff Kirsher 	int i, j;
1094ec21e2ecSJeff Kirsher 
1095ec21e2ecSJeff Kirsher 	txbdp = tx_queue->tx_bd_base;
1096ec21e2ecSJeff Kirsher 
1097ec21e2ecSJeff Kirsher 	for (i = 0; i < tx_queue->tx_ring_size; i++) {
1098ec21e2ecSJeff Kirsher 		if (!tx_queue->tx_skbuff[i])
1099ec21e2ecSJeff Kirsher 			continue;
1100ec21e2ecSJeff Kirsher 
1101a7312d58SClaudiu Manoil 		dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1102a7312d58SClaudiu Manoil 				 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
1103ec21e2ecSJeff Kirsher 		txbdp->lstatus = 0;
1104ec21e2ecSJeff Kirsher 		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1105ec21e2ecSJeff Kirsher 		     j++) {
1106ec21e2ecSJeff Kirsher 			txbdp++;
1107a7312d58SClaudiu Manoil 			dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1108a7312d58SClaudiu Manoil 				       be16_to_cpu(txbdp->length),
1109a7312d58SClaudiu Manoil 				       DMA_TO_DEVICE);
1110ec21e2ecSJeff Kirsher 		}
1111ec21e2ecSJeff Kirsher 		txbdp++;
1112ec21e2ecSJeff Kirsher 		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1113ec21e2ecSJeff Kirsher 		tx_queue->tx_skbuff[i] = NULL;
1114ec21e2ecSJeff Kirsher 	}
1115ec21e2ecSJeff Kirsher 	kfree(tx_queue->tx_skbuff);
11161eb8f7a7SClaudiu Manoil 	tx_queue->tx_skbuff = NULL;
1117ec21e2ecSJeff Kirsher }
1118ec21e2ecSJeff Kirsher 
1119ec21e2ecSJeff Kirsher static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1120ec21e2ecSJeff Kirsher {
1121ec21e2ecSJeff Kirsher 	int i;
1122ec21e2ecSJeff Kirsher 
112375354148SClaudiu Manoil 	struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
112475354148SClaudiu Manoil 
112575354148SClaudiu Manoil 	dev_kfree_skb(rx_queue->skb);
1126ec21e2ecSJeff Kirsher 
1127ec21e2ecSJeff Kirsher 	for (i = 0; i < rx_queue->rx_ring_size; i++) {
112875354148SClaudiu Manoil 		struct	gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
112975354148SClaudiu Manoil 
1130ec21e2ecSJeff Kirsher 		rxbdp->lstatus = 0;
1131ec21e2ecSJeff Kirsher 		rxbdp->bufPtr = 0;
1132ec21e2ecSJeff Kirsher 		rxbdp++;
113375354148SClaudiu Manoil 
113475354148SClaudiu Manoil 		if (!rxb->page)
113575354148SClaudiu Manoil 			continue;
113675354148SClaudiu Manoil 
11374af0e5bbSArseny Solokha 		dma_unmap_page(rx_queue->dev, rxb->dma,
113875354148SClaudiu Manoil 			       PAGE_SIZE, DMA_FROM_DEVICE);
113975354148SClaudiu Manoil 		__free_page(rxb->page);
114075354148SClaudiu Manoil 
114175354148SClaudiu Manoil 		rxb->page = NULL;
1142ec21e2ecSJeff Kirsher 	}
114375354148SClaudiu Manoil 
114475354148SClaudiu Manoil 	kfree(rx_queue->rx_buff);
114575354148SClaudiu Manoil 	rx_queue->rx_buff = NULL;
1146ec21e2ecSJeff Kirsher }
1147ec21e2ecSJeff Kirsher 
1148ec21e2ecSJeff Kirsher /* If there are any tx skbs or rx skbs still around, free them.
11490977f817SJan Ceuleers  * Then free tx_skbuff and rx_skbuff
11500977f817SJan Ceuleers  */
1151ec21e2ecSJeff Kirsher static void free_skb_resources(struct gfar_private *priv)
1152ec21e2ecSJeff Kirsher {
1153ec21e2ecSJeff Kirsher 	struct gfar_priv_tx_q *tx_queue = NULL;
1154ec21e2ecSJeff Kirsher 	struct gfar_priv_rx_q *rx_queue = NULL;
1155ec21e2ecSJeff Kirsher 	int i;
1156ec21e2ecSJeff Kirsher 
1157ec21e2ecSJeff Kirsher 	/* Go through all the buffer descriptors and free their data buffers */
1158ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_tx_queues; i++) {
1159d8a0f1b0SPaul Gortmaker 		struct netdev_queue *txq;
1160bc4598bcSJan Ceuleers 
1161ec21e2ecSJeff Kirsher 		tx_queue = priv->tx_queue[i];
1162d8a0f1b0SPaul Gortmaker 		txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1163ec21e2ecSJeff Kirsher 		if (tx_queue->tx_skbuff)
1164ec21e2ecSJeff Kirsher 			free_skb_tx_queue(tx_queue);
1165d8a0f1b0SPaul Gortmaker 		netdev_tx_reset_queue(txq);
1166ec21e2ecSJeff Kirsher 	}
1167ec21e2ecSJeff Kirsher 
1168ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_rx_queues; i++) {
1169ec21e2ecSJeff Kirsher 		rx_queue = priv->rx_queue[i];
117075354148SClaudiu Manoil 		if (rx_queue->rx_buff)
1171ec21e2ecSJeff Kirsher 			free_skb_rx_queue(rx_queue);
1172ec21e2ecSJeff Kirsher 	}
1173ec21e2ecSJeff Kirsher 
1174369ec162SClaudiu Manoil 	dma_free_coherent(priv->dev,
1175ec21e2ecSJeff Kirsher 			  sizeof(struct txbd8) * priv->total_tx_ring_size +
1176ec21e2ecSJeff Kirsher 			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
1177ec21e2ecSJeff Kirsher 			  priv->tx_queue[0]->tx_bd_base,
1178ec21e2ecSJeff Kirsher 			  priv->tx_queue[0]->tx_bd_dma_base);
1179ec21e2ecSJeff Kirsher }
1180ec21e2ecSJeff Kirsher 
11817d993c5fSArseny Solokha void stop_gfar(struct net_device *dev)
11827d993c5fSArseny Solokha {
11837d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
11847d993c5fSArseny Solokha 
11857d993c5fSArseny Solokha 	netif_tx_stop_all_queues(dev);
11867d993c5fSArseny Solokha 
11877d993c5fSArseny Solokha 	smp_mb__before_atomic();
11887d993c5fSArseny Solokha 	set_bit(GFAR_DOWN, &priv->state);
11897d993c5fSArseny Solokha 	smp_mb__after_atomic();
11907d993c5fSArseny Solokha 
11917d993c5fSArseny Solokha 	disable_napi(priv);
11927d993c5fSArseny Solokha 
11937d993c5fSArseny Solokha 	/* disable ints and gracefully shut down Rx/Tx DMA */
11947d993c5fSArseny Solokha 	gfar_halt(priv);
11957d993c5fSArseny Solokha 
11967d993c5fSArseny Solokha 	phy_stop(dev->phydev);
11977d993c5fSArseny Solokha 
11987d993c5fSArseny Solokha 	free_skb_resources(priv);
11997d993c5fSArseny Solokha }
12007d993c5fSArseny Solokha 
12017ad38784SArseny Solokha static void gfar_start(struct gfar_private *priv)
1202ec21e2ecSJeff Kirsher {
1203ec21e2ecSJeff Kirsher 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1204ec21e2ecSJeff Kirsher 	u32 tempval;
1205ec21e2ecSJeff Kirsher 	int i = 0;
1206ec21e2ecSJeff Kirsher 
1207c10650b6SClaudiu Manoil 	/* Enable Rx/Tx hw queues */
1208c10650b6SClaudiu Manoil 	gfar_write(&regs->rqueue, priv->rqueue);
1209c10650b6SClaudiu Manoil 	gfar_write(&regs->tqueue, priv->tqueue);
1210ec21e2ecSJeff Kirsher 
1211ec21e2ecSJeff Kirsher 	/* Initialize DMACTRL to have WWR and WOP */
1212ec21e2ecSJeff Kirsher 	tempval = gfar_read(&regs->dmactrl);
1213ec21e2ecSJeff Kirsher 	tempval |= DMACTRL_INIT_SETTINGS;
1214ec21e2ecSJeff Kirsher 	gfar_write(&regs->dmactrl, tempval);
1215ec21e2ecSJeff Kirsher 
1216ec21e2ecSJeff Kirsher 	/* Make sure we aren't stopped */
1217ec21e2ecSJeff Kirsher 	tempval = gfar_read(&regs->dmactrl);
1218ec21e2ecSJeff Kirsher 	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1219ec21e2ecSJeff Kirsher 	gfar_write(&regs->dmactrl, tempval);
1220ec21e2ecSJeff Kirsher 
1221ec21e2ecSJeff Kirsher 	for (i = 0; i < priv->num_grps; i++) {
1222ec21e2ecSJeff Kirsher 		regs = priv->gfargrp[i].regs;
1223ec21e2ecSJeff Kirsher 		/* Clear THLT/RHLT, so that the DMA starts polling now */
1224ec21e2ecSJeff Kirsher 		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1225ec21e2ecSJeff Kirsher 		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1226ec21e2ecSJeff Kirsher 	}
1227ec21e2ecSJeff Kirsher 
1228c10650b6SClaudiu Manoil 	/* Enable Rx/Tx DMA */
1229c10650b6SClaudiu Manoil 	tempval = gfar_read(&regs->maccfg1);
1230c10650b6SClaudiu Manoil 	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1231c10650b6SClaudiu Manoil 	gfar_write(&regs->maccfg1, tempval);
1232c10650b6SClaudiu Manoil 
1233efeddce7SClaudiu Manoil 	gfar_ints_enable(priv);
1234efeddce7SClaudiu Manoil 
1235860e9538SFlorian Westphal 	netif_trans_update(priv->ndev); /* prevent tx timeout */
1236ec21e2ecSJeff Kirsher }
1237ec21e2ecSJeff Kirsher 
12387d993c5fSArseny Solokha static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
123980ec396cSClaudiu Manoil {
12407d993c5fSArseny Solokha 	struct page *page;
12417d993c5fSArseny Solokha 	dma_addr_t addr;
12427d993c5fSArseny Solokha 
12437d993c5fSArseny Solokha 	page = dev_alloc_page();
12447d993c5fSArseny Solokha 	if (unlikely(!page))
12457d993c5fSArseny Solokha 		return false;
12467d993c5fSArseny Solokha 
12477d993c5fSArseny Solokha 	addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
12487d993c5fSArseny Solokha 	if (unlikely(dma_mapping_error(rxq->dev, addr))) {
12497d993c5fSArseny Solokha 		__free_page(page);
12507d993c5fSArseny Solokha 
12517d993c5fSArseny Solokha 		return false;
125280ec396cSClaudiu Manoil 	}
125380ec396cSClaudiu Manoil 
12547d993c5fSArseny Solokha 	rxb->dma = addr;
12557d993c5fSArseny Solokha 	rxb->page = page;
12567d993c5fSArseny Solokha 	rxb->page_offset = 0;
12577d993c5fSArseny Solokha 
12587d993c5fSArseny Solokha 	return true;
12597d993c5fSArseny Solokha }
12607d993c5fSArseny Solokha 
12617d993c5fSArseny Solokha static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
1262ec21e2ecSJeff Kirsher {
12637d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(rx_queue->ndev);
12647d993c5fSArseny Solokha 	struct gfar_extra_stats *estats = &priv->extra_stats;
1265ec21e2ecSJeff Kirsher 
12667d993c5fSArseny Solokha 	netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
12677d993c5fSArseny Solokha 	atomic64_inc(&estats->rx_alloc_err);
1268ec21e2ecSJeff Kirsher }
1269ec21e2ecSJeff Kirsher 
12707d993c5fSArseny Solokha static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
12717d993c5fSArseny Solokha 				int alloc_cnt)
127280ec396cSClaudiu Manoil {
12737d993c5fSArseny Solokha 	struct rxbd8 *bdp;
12747d993c5fSArseny Solokha 	struct gfar_rx_buff *rxb;
127580ec396cSClaudiu Manoil 	int i;
127680ec396cSClaudiu Manoil 
12777d993c5fSArseny Solokha 	i = rx_queue->next_to_use;
12787d993c5fSArseny Solokha 	bdp = &rx_queue->rx_bd_base[i];
12797d993c5fSArseny Solokha 	rxb = &rx_queue->rx_buff[i];
12807d993c5fSArseny Solokha 
12817d993c5fSArseny Solokha 	while (alloc_cnt--) {
12827d993c5fSArseny Solokha 		/* try reuse page */
12837d993c5fSArseny Solokha 		if (unlikely(!rxb->page)) {
12847d993c5fSArseny Solokha 			if (unlikely(!gfar_new_page(rx_queue, rxb))) {
12857d993c5fSArseny Solokha 				gfar_rx_alloc_err(rx_queue);
12867d993c5fSArseny Solokha 				break;
128780ec396cSClaudiu Manoil 			}
128880ec396cSClaudiu Manoil 		}
128980ec396cSClaudiu Manoil 
12907d993c5fSArseny Solokha 		/* Setup the new RxBD */
12917d993c5fSArseny Solokha 		gfar_init_rxbdp(rx_queue, bdp,
12927d993c5fSArseny Solokha 				rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
12937d993c5fSArseny Solokha 
12947d993c5fSArseny Solokha 		/* Update to the next pointer */
12957d993c5fSArseny Solokha 		bdp++;
12967d993c5fSArseny Solokha 		rxb++;
12977d993c5fSArseny Solokha 
12987d993c5fSArseny Solokha 		if (unlikely(++i == rx_queue->rx_ring_size)) {
12997d993c5fSArseny Solokha 			i = 0;
13007d993c5fSArseny Solokha 			bdp = rx_queue->rx_bd_base;
13017d993c5fSArseny Solokha 			rxb = rx_queue->rx_buff;
13027d993c5fSArseny Solokha 		}
13037d993c5fSArseny Solokha 	}
13047d993c5fSArseny Solokha 
13057d993c5fSArseny Solokha 	rx_queue->next_to_use = i;
13067d993c5fSArseny Solokha 	rx_queue->next_to_alloc = i;
13077d993c5fSArseny Solokha }
13087d993c5fSArseny Solokha 
13097d993c5fSArseny Solokha static void gfar_init_bds(struct net_device *ndev)
131080ec396cSClaudiu Manoil {
13117d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(ndev);
13127d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
13137d993c5fSArseny Solokha 	struct gfar_priv_tx_q *tx_queue = NULL;
13147d993c5fSArseny Solokha 	struct gfar_priv_rx_q *rx_queue = NULL;
13157d993c5fSArseny Solokha 	struct txbd8 *txbdp;
13167d993c5fSArseny Solokha 	u32 __iomem *rfbptr;
13177d993c5fSArseny Solokha 	int i, j;
131880ec396cSClaudiu Manoil 
13197d993c5fSArseny Solokha 	for (i = 0; i < priv->num_tx_queues; i++) {
13207d993c5fSArseny Solokha 		tx_queue = priv->tx_queue[i];
13217d993c5fSArseny Solokha 		/* Initialize some variables in our dev structure */
13227d993c5fSArseny Solokha 		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
13237d993c5fSArseny Solokha 		tx_queue->dirty_tx = tx_queue->tx_bd_base;
13247d993c5fSArseny Solokha 		tx_queue->cur_tx = tx_queue->tx_bd_base;
13257d993c5fSArseny Solokha 		tx_queue->skb_curtx = 0;
13267d993c5fSArseny Solokha 		tx_queue->skb_dirtytx = 0;
13277d993c5fSArseny Solokha 
13287d993c5fSArseny Solokha 		/* Initialize Transmit Descriptor Ring */
13297d993c5fSArseny Solokha 		txbdp = tx_queue->tx_bd_base;
13307d993c5fSArseny Solokha 		for (j = 0; j < tx_queue->tx_ring_size; j++) {
13317d993c5fSArseny Solokha 			txbdp->lstatus = 0;
13327d993c5fSArseny Solokha 			txbdp->bufPtr = 0;
13337d993c5fSArseny Solokha 			txbdp++;
13347d993c5fSArseny Solokha 		}
13357d993c5fSArseny Solokha 
13367d993c5fSArseny Solokha 		/* Set the last descriptor in the ring to indicate wrap */
13377d993c5fSArseny Solokha 		txbdp--;
13387d993c5fSArseny Solokha 		txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
13397d993c5fSArseny Solokha 					    TXBD_WRAP);
13407d993c5fSArseny Solokha 	}
13417d993c5fSArseny Solokha 
13427d993c5fSArseny Solokha 	rfbptr = &regs->rfbptr0;
13437d993c5fSArseny Solokha 	for (i = 0; i < priv->num_rx_queues; i++) {
13447d993c5fSArseny Solokha 		rx_queue = priv->rx_queue[i];
13457d993c5fSArseny Solokha 
13467d993c5fSArseny Solokha 		rx_queue->next_to_clean = 0;
13477d993c5fSArseny Solokha 		rx_queue->next_to_use = 0;
13487d993c5fSArseny Solokha 		rx_queue->next_to_alloc = 0;
13497d993c5fSArseny Solokha 
13507d993c5fSArseny Solokha 		/* make sure next_to_clean != next_to_use after this
13517d993c5fSArseny Solokha 		 * by leaving at least 1 unused descriptor
13527d993c5fSArseny Solokha 		 */
13537d993c5fSArseny Solokha 		gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
13547d993c5fSArseny Solokha 
13557d993c5fSArseny Solokha 		rx_queue->rfbptr = rfbptr;
13567d993c5fSArseny Solokha 		rfbptr += 2;
135780ec396cSClaudiu Manoil 	}
135880ec396cSClaudiu Manoil }
135980ec396cSClaudiu Manoil 
13607d993c5fSArseny Solokha static int gfar_alloc_skb_resources(struct net_device *ndev)
13617d993c5fSArseny Solokha {
13627d993c5fSArseny Solokha 	void *vaddr;
13637d993c5fSArseny Solokha 	dma_addr_t addr;
13647d993c5fSArseny Solokha 	int i, j;
13657d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(ndev);
13667d993c5fSArseny Solokha 	struct device *dev = priv->dev;
13677d993c5fSArseny Solokha 	struct gfar_priv_tx_q *tx_queue = NULL;
13687d993c5fSArseny Solokha 	struct gfar_priv_rx_q *rx_queue = NULL;
13697d993c5fSArseny Solokha 
13707d993c5fSArseny Solokha 	priv->total_tx_ring_size = 0;
13717d993c5fSArseny Solokha 	for (i = 0; i < priv->num_tx_queues; i++)
13727d993c5fSArseny Solokha 		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
13737d993c5fSArseny Solokha 
13747d993c5fSArseny Solokha 	priv->total_rx_ring_size = 0;
13757d993c5fSArseny Solokha 	for (i = 0; i < priv->num_rx_queues; i++)
13767d993c5fSArseny Solokha 		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
13777d993c5fSArseny Solokha 
13787d993c5fSArseny Solokha 	/* Allocate memory for the buffer descriptors */
13797d993c5fSArseny Solokha 	vaddr = dma_alloc_coherent(dev,
13807d993c5fSArseny Solokha 				   (priv->total_tx_ring_size *
13817d993c5fSArseny Solokha 				    sizeof(struct txbd8)) +
13827d993c5fSArseny Solokha 				   (priv->total_rx_ring_size *
13837d993c5fSArseny Solokha 				    sizeof(struct rxbd8)),
13847d993c5fSArseny Solokha 				   &addr, GFP_KERNEL);
13857d993c5fSArseny Solokha 	if (!vaddr)
13867d993c5fSArseny Solokha 		return -ENOMEM;
13877d993c5fSArseny Solokha 
13887d993c5fSArseny Solokha 	for (i = 0; i < priv->num_tx_queues; i++) {
13897d993c5fSArseny Solokha 		tx_queue = priv->tx_queue[i];
13907d993c5fSArseny Solokha 		tx_queue->tx_bd_base = vaddr;
13917d993c5fSArseny Solokha 		tx_queue->tx_bd_dma_base = addr;
13927d993c5fSArseny Solokha 		tx_queue->dev = ndev;
13937d993c5fSArseny Solokha 		/* enet DMA only understands physical addresses */
13947d993c5fSArseny Solokha 		addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
13957d993c5fSArseny Solokha 		vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
13967d993c5fSArseny Solokha 	}
13977d993c5fSArseny Solokha 
13987d993c5fSArseny Solokha 	/* Start the rx descriptor ring where the tx ring leaves off */
13997d993c5fSArseny Solokha 	for (i = 0; i < priv->num_rx_queues; i++) {
14007d993c5fSArseny Solokha 		rx_queue = priv->rx_queue[i];
14017d993c5fSArseny Solokha 		rx_queue->rx_bd_base = vaddr;
14027d993c5fSArseny Solokha 		rx_queue->rx_bd_dma_base = addr;
14037d993c5fSArseny Solokha 		rx_queue->ndev = ndev;
14047d993c5fSArseny Solokha 		rx_queue->dev = dev;
14057d993c5fSArseny Solokha 		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
14067d993c5fSArseny Solokha 		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
14077d993c5fSArseny Solokha 	}
14087d993c5fSArseny Solokha 
14097d993c5fSArseny Solokha 	/* Setup the skbuff rings */
14107d993c5fSArseny Solokha 	for (i = 0; i < priv->num_tx_queues; i++) {
14117d993c5fSArseny Solokha 		tx_queue = priv->tx_queue[i];
14127d993c5fSArseny Solokha 		tx_queue->tx_skbuff =
14137d993c5fSArseny Solokha 			kmalloc_array(tx_queue->tx_ring_size,
14147d993c5fSArseny Solokha 				      sizeof(*tx_queue->tx_skbuff),
14157d993c5fSArseny Solokha 				      GFP_KERNEL);
14167d993c5fSArseny Solokha 		if (!tx_queue->tx_skbuff)
14177d993c5fSArseny Solokha 			goto cleanup;
14187d993c5fSArseny Solokha 
14197d993c5fSArseny Solokha 		for (j = 0; j < tx_queue->tx_ring_size; j++)
14207d993c5fSArseny Solokha 			tx_queue->tx_skbuff[j] = NULL;
14217d993c5fSArseny Solokha 	}
14227d993c5fSArseny Solokha 
14237d993c5fSArseny Solokha 	for (i = 0; i < priv->num_rx_queues; i++) {
14247d993c5fSArseny Solokha 		rx_queue = priv->rx_queue[i];
14257d993c5fSArseny Solokha 		rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
14267d993c5fSArseny Solokha 					    sizeof(*rx_queue->rx_buff),
14277d993c5fSArseny Solokha 					    GFP_KERNEL);
14287d993c5fSArseny Solokha 		if (!rx_queue->rx_buff)
14297d993c5fSArseny Solokha 			goto cleanup;
14307d993c5fSArseny Solokha 	}
14317d993c5fSArseny Solokha 
14327d993c5fSArseny Solokha 	gfar_init_bds(ndev);
14337d993c5fSArseny Solokha 
143480ec396cSClaudiu Manoil 	return 0;
14357d993c5fSArseny Solokha 
14367d993c5fSArseny Solokha cleanup:
14377d993c5fSArseny Solokha 	free_skb_resources(priv);
14387d993c5fSArseny Solokha 	return -ENOMEM;
143980ec396cSClaudiu Manoil }
144080ec396cSClaudiu Manoil 
1441ec21e2ecSJeff Kirsher /* Bring the controller up and running */
1442ec21e2ecSJeff Kirsher int startup_gfar(struct net_device *ndev)
1443ec21e2ecSJeff Kirsher {
1444ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(ndev);
144580ec396cSClaudiu Manoil 	int err;
1446ec21e2ecSJeff Kirsher 
1447a328ac92SClaudiu Manoil 	gfar_mac_reset(priv);
1448ec21e2ecSJeff Kirsher 
1449ec21e2ecSJeff Kirsher 	err = gfar_alloc_skb_resources(ndev);
1450ec21e2ecSJeff Kirsher 	if (err)
1451ec21e2ecSJeff Kirsher 		return err;
1452ec21e2ecSJeff Kirsher 
1453a328ac92SClaudiu Manoil 	gfar_init_tx_rx_base(priv);
1454ec21e2ecSJeff Kirsher 
14554e857c58SPeter Zijlstra 	smp_mb__before_atomic();
14560851133bSClaudiu Manoil 	clear_bit(GFAR_DOWN, &priv->state);
14574e857c58SPeter Zijlstra 	smp_mb__after_atomic();
14580851133bSClaudiu Manoil 
14590851133bSClaudiu Manoil 	/* Start Rx/Tx DMA and enable the interrupts */
1460c10650b6SClaudiu Manoil 	gfar_start(priv);
1461ec21e2ecSJeff Kirsher 
14622a4eebf0SClaudiu Manoil 	/* force link state update after mac reset */
14632a4eebf0SClaudiu Manoil 	priv->oldlink = 0;
14642a4eebf0SClaudiu Manoil 	priv->oldspeed = 0;
14652a4eebf0SClaudiu Manoil 	priv->oldduplex = -1;
14662a4eebf0SClaudiu Manoil 
14674c4a6b0eSPhilippe Reynes 	phy_start(ndev->phydev);
1468ec21e2ecSJeff Kirsher 
14690851133bSClaudiu Manoil 	enable_napi(priv);
14700851133bSClaudiu Manoil 
14710851133bSClaudiu Manoil 	netif_tx_wake_all_queues(ndev);
14720851133bSClaudiu Manoil 
1473ec21e2ecSJeff Kirsher 	return 0;
1474ec21e2ecSJeff Kirsher }
1475ec21e2ecSJeff Kirsher 
14767d993c5fSArseny Solokha static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
14777d993c5fSArseny Solokha {
14787d993c5fSArseny Solokha 	struct net_device *ndev = priv->ndev;
14797d993c5fSArseny Solokha 	struct phy_device *phydev = ndev->phydev;
14807d993c5fSArseny Solokha 	u32 val = 0;
14817d993c5fSArseny Solokha 
14827d993c5fSArseny Solokha 	if (!phydev->duplex)
14837d993c5fSArseny Solokha 		return val;
14847d993c5fSArseny Solokha 
14857d993c5fSArseny Solokha 	if (!priv->pause_aneg_en) {
14867d993c5fSArseny Solokha 		if (priv->tx_pause_en)
14877d993c5fSArseny Solokha 			val |= MACCFG1_TX_FLOW;
14887d993c5fSArseny Solokha 		if (priv->rx_pause_en)
14897d993c5fSArseny Solokha 			val |= MACCFG1_RX_FLOW;
14907d993c5fSArseny Solokha 	} else {
14917d993c5fSArseny Solokha 		u16 lcl_adv, rmt_adv;
14927d993c5fSArseny Solokha 		u8 flowctrl;
14937d993c5fSArseny Solokha 		/* get link partner capabilities */
14947d993c5fSArseny Solokha 		rmt_adv = 0;
14957d993c5fSArseny Solokha 		if (phydev->pause)
14967d993c5fSArseny Solokha 			rmt_adv = LPA_PAUSE_CAP;
14977d993c5fSArseny Solokha 		if (phydev->asym_pause)
14987d993c5fSArseny Solokha 			rmt_adv |= LPA_PAUSE_ASYM;
14997d993c5fSArseny Solokha 
15007d993c5fSArseny Solokha 		lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
15017d993c5fSArseny Solokha 		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
15027d993c5fSArseny Solokha 		if (flowctrl & FLOW_CTRL_TX)
15037d993c5fSArseny Solokha 			val |= MACCFG1_TX_FLOW;
15047d993c5fSArseny Solokha 		if (flowctrl & FLOW_CTRL_RX)
15057d993c5fSArseny Solokha 			val |= MACCFG1_RX_FLOW;
15067d993c5fSArseny Solokha 	}
15077d993c5fSArseny Solokha 
15087d993c5fSArseny Solokha 	return val;
15097d993c5fSArseny Solokha }
15107d993c5fSArseny Solokha 
15117d993c5fSArseny Solokha static noinline void gfar_update_link_state(struct gfar_private *priv)
15127d993c5fSArseny Solokha {
15137d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
15147d993c5fSArseny Solokha 	struct net_device *ndev = priv->ndev;
15157d993c5fSArseny Solokha 	struct phy_device *phydev = ndev->phydev;
15167d993c5fSArseny Solokha 	struct gfar_priv_rx_q *rx_queue = NULL;
15177d993c5fSArseny Solokha 	int i;
15187d993c5fSArseny Solokha 
15197d993c5fSArseny Solokha 	if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
15207d993c5fSArseny Solokha 		return;
15217d993c5fSArseny Solokha 
15227d993c5fSArseny Solokha 	if (phydev->link) {
15237d993c5fSArseny Solokha 		u32 tempval1 = gfar_read(&regs->maccfg1);
15247d993c5fSArseny Solokha 		u32 tempval = gfar_read(&regs->maccfg2);
15257d993c5fSArseny Solokha 		u32 ecntrl = gfar_read(&regs->ecntrl);
15267d993c5fSArseny Solokha 		u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
15277d993c5fSArseny Solokha 
15287d993c5fSArseny Solokha 		if (phydev->duplex != priv->oldduplex) {
15297d993c5fSArseny Solokha 			if (!(phydev->duplex))
15307d993c5fSArseny Solokha 				tempval &= ~(MACCFG2_FULL_DUPLEX);
15317d993c5fSArseny Solokha 			else
15327d993c5fSArseny Solokha 				tempval |= MACCFG2_FULL_DUPLEX;
15337d993c5fSArseny Solokha 
15347d993c5fSArseny Solokha 			priv->oldduplex = phydev->duplex;
15357d993c5fSArseny Solokha 		}
15367d993c5fSArseny Solokha 
15377d993c5fSArseny Solokha 		if (phydev->speed != priv->oldspeed) {
15387d993c5fSArseny Solokha 			switch (phydev->speed) {
15397d993c5fSArseny Solokha 			case 1000:
15407d993c5fSArseny Solokha 				tempval =
15417d993c5fSArseny Solokha 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
15427d993c5fSArseny Solokha 
15437d993c5fSArseny Solokha 				ecntrl &= ~(ECNTRL_R100);
15447d993c5fSArseny Solokha 				break;
15457d993c5fSArseny Solokha 			case 100:
15467d993c5fSArseny Solokha 			case 10:
15477d993c5fSArseny Solokha 				tempval =
15487d993c5fSArseny Solokha 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
15497d993c5fSArseny Solokha 
15507d993c5fSArseny Solokha 				/* Reduced mode distinguishes
15517d993c5fSArseny Solokha 				 * between 10 and 100
15520977f817SJan Ceuleers 				 */
15537d993c5fSArseny Solokha 				if (phydev->speed == SPEED_100)
15547d993c5fSArseny Solokha 					ecntrl |= ECNTRL_R100;
15557d993c5fSArseny Solokha 				else
15567d993c5fSArseny Solokha 					ecntrl &= ~(ECNTRL_R100);
15577d993c5fSArseny Solokha 				break;
15587d993c5fSArseny Solokha 			default:
15597d993c5fSArseny Solokha 				netif_warn(priv, link, priv->ndev,
15607d993c5fSArseny Solokha 					   "Ack!  Speed (%d) is not 10/100/1000!\n",
15617d993c5fSArseny Solokha 					   phydev->speed);
15627d993c5fSArseny Solokha 				break;
15637d993c5fSArseny Solokha 			}
15647d993c5fSArseny Solokha 
15657d993c5fSArseny Solokha 			priv->oldspeed = phydev->speed;
15667d993c5fSArseny Solokha 		}
15677d993c5fSArseny Solokha 
15687d993c5fSArseny Solokha 		tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
15697d993c5fSArseny Solokha 		tempval1 |= gfar_get_flowctrl_cfg(priv);
15707d993c5fSArseny Solokha 
15717d993c5fSArseny Solokha 		/* Turn last free buffer recording on */
15727d993c5fSArseny Solokha 		if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
15737d993c5fSArseny Solokha 			for (i = 0; i < priv->num_rx_queues; i++) {
15747d993c5fSArseny Solokha 				u32 bdp_dma;
15757d993c5fSArseny Solokha 
15767d993c5fSArseny Solokha 				rx_queue = priv->rx_queue[i];
15777d993c5fSArseny Solokha 				bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
15787d993c5fSArseny Solokha 				gfar_write(rx_queue->rfbptr, bdp_dma);
15797d993c5fSArseny Solokha 			}
15807d993c5fSArseny Solokha 
15817d993c5fSArseny Solokha 			priv->tx_actual_en = 1;
15827d993c5fSArseny Solokha 		}
15837d993c5fSArseny Solokha 
15847d993c5fSArseny Solokha 		if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
15857d993c5fSArseny Solokha 			priv->tx_actual_en = 0;
15867d993c5fSArseny Solokha 
15877d993c5fSArseny Solokha 		gfar_write(&regs->maccfg1, tempval1);
15887d993c5fSArseny Solokha 		gfar_write(&regs->maccfg2, tempval);
15897d993c5fSArseny Solokha 		gfar_write(&regs->ecntrl, ecntrl);
15907d993c5fSArseny Solokha 
15917d993c5fSArseny Solokha 		if (!priv->oldlink)
15927d993c5fSArseny Solokha 			priv->oldlink = 1;
15937d993c5fSArseny Solokha 
15947d993c5fSArseny Solokha 	} else if (priv->oldlink) {
15957d993c5fSArseny Solokha 		priv->oldlink = 0;
15967d993c5fSArseny Solokha 		priv->oldspeed = 0;
15977d993c5fSArseny Solokha 		priv->oldduplex = -1;
15987d993c5fSArseny Solokha 	}
15997d993c5fSArseny Solokha 
16007d993c5fSArseny Solokha 	if (netif_msg_link(priv))
16017d993c5fSArseny Solokha 		phy_print_status(phydev);
16027d993c5fSArseny Solokha }
16037d993c5fSArseny Solokha 
16047d993c5fSArseny Solokha /* Called every time the controller might need to be made
16057d993c5fSArseny Solokha  * aware of new link state.  The PHY code conveys this
16067d993c5fSArseny Solokha  * information through variables in the phydev structure, and this
16077d993c5fSArseny Solokha  * function converts those variables into the appropriate
16087d993c5fSArseny Solokha  * register values, and can bring down the device if needed.
16097d993c5fSArseny Solokha  */
16107d993c5fSArseny Solokha static void adjust_link(struct net_device *dev)
1611ec21e2ecSJeff Kirsher {
1612ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
16137d993c5fSArseny Solokha 	struct phy_device *phydev = dev->phydev;
1614ec21e2ecSJeff Kirsher 
16157d993c5fSArseny Solokha 	if (unlikely(phydev->link != priv->oldlink ||
16167d993c5fSArseny Solokha 		     (phydev->link && (phydev->duplex != priv->oldduplex ||
16177d993c5fSArseny Solokha 				       phydev->speed != priv->oldspeed))))
16187d993c5fSArseny Solokha 		gfar_update_link_state(priv);
16197d993c5fSArseny Solokha }
1620ec21e2ecSJeff Kirsher 
16217d993c5fSArseny Solokha /* Initialize TBI PHY interface for communicating with the
16227d993c5fSArseny Solokha  * SERDES lynx PHY on the chip.  We communicate with this PHY
16237d993c5fSArseny Solokha  * through the MDIO bus on each controller, treating it as a
16247d993c5fSArseny Solokha  * "normal" PHY at the address found in the TBIPA register.  We assume
16257d993c5fSArseny Solokha  * that the TBIPA register is valid.  Either the MDIO bus code will set
16267d993c5fSArseny Solokha  * it to a value that doesn't conflict with other PHYs on the bus, or the
16277d993c5fSArseny Solokha  * value doesn't matter, as there are no other PHYs on the bus.
16287d993c5fSArseny Solokha  */
16297d993c5fSArseny Solokha static void gfar_configure_serdes(struct net_device *dev)
16307d993c5fSArseny Solokha {
16317d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
16327d993c5fSArseny Solokha 	struct phy_device *tbiphy;
163380ec396cSClaudiu Manoil 
16347d993c5fSArseny Solokha 	if (!priv->tbi_node) {
16357d993c5fSArseny Solokha 		dev_warn(&dev->dev, "error: SGMII mode requires that the "
16367d993c5fSArseny Solokha 				    "device tree specify a tbi-handle\n");
16377d993c5fSArseny Solokha 		return;
16387d993c5fSArseny Solokha 	}
1639ec21e2ecSJeff Kirsher 
16407d993c5fSArseny Solokha 	tbiphy = of_phy_find_device(priv->tbi_node);
16417d993c5fSArseny Solokha 	if (!tbiphy) {
16427d993c5fSArseny Solokha 		dev_err(&dev->dev, "error: Could not get TBI device\n");
16437d993c5fSArseny Solokha 		return;
16447d993c5fSArseny Solokha 	}
16457d993c5fSArseny Solokha 
16467d993c5fSArseny Solokha 	/* If the link is already up, we must already be ok, and don't need to
16477d993c5fSArseny Solokha 	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
16487d993c5fSArseny Solokha 	 * everything for us?  Resetting it takes the link down and requires
16497d993c5fSArseny Solokha 	 * several seconds for it to come back.
16507d993c5fSArseny Solokha 	 */
16517d993c5fSArseny Solokha 	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
16527d993c5fSArseny Solokha 		put_device(&tbiphy->mdio.dev);
16537d993c5fSArseny Solokha 		return;
16547d993c5fSArseny Solokha 	}
16557d993c5fSArseny Solokha 
16567d993c5fSArseny Solokha 	/* Single clk mode, mii mode off(for serdes communication) */
16577d993c5fSArseny Solokha 	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
16587d993c5fSArseny Solokha 
16597d993c5fSArseny Solokha 	phy_write(tbiphy, MII_ADVERTISE,
16607d993c5fSArseny Solokha 		  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
16617d993c5fSArseny Solokha 		  ADVERTISE_1000XPSE_ASYM);
16627d993c5fSArseny Solokha 
16637d993c5fSArseny Solokha 	phy_write(tbiphy, MII_BMCR,
16647d993c5fSArseny Solokha 		  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
16657d993c5fSArseny Solokha 		  BMCR_SPEED1000);
16667d993c5fSArseny Solokha 
16677d993c5fSArseny Solokha 	put_device(&tbiphy->mdio.dev);
16687d993c5fSArseny Solokha }
16697d993c5fSArseny Solokha 
16707d993c5fSArseny Solokha /* Initializes driver's PHY state, and attaches to the PHY.
16717d993c5fSArseny Solokha  * Returns 0 on success.
16727d993c5fSArseny Solokha  */
16737d993c5fSArseny Solokha static int init_phy(struct net_device *dev)
16747d993c5fSArseny Solokha {
16757d993c5fSArseny Solokha 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
16767d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
16778e578e73SArseny Solokha 	phy_interface_t interface = priv->interface;
16787d993c5fSArseny Solokha 	struct phy_device *phydev;
16797d993c5fSArseny Solokha 	struct ethtool_eee edata;
16807d993c5fSArseny Solokha 
16817d993c5fSArseny Solokha 	linkmode_set_bit_array(phy_10_100_features_array,
16827d993c5fSArseny Solokha 			       ARRAY_SIZE(phy_10_100_features_array),
16837d993c5fSArseny Solokha 			       mask);
16847d993c5fSArseny Solokha 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
16857d993c5fSArseny Solokha 	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
16867d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
16877d993c5fSArseny Solokha 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
16887d993c5fSArseny Solokha 
16897d993c5fSArseny Solokha 	priv->oldlink = 0;
16907d993c5fSArseny Solokha 	priv->oldspeed = 0;
16917d993c5fSArseny Solokha 	priv->oldduplex = -1;
16927d993c5fSArseny Solokha 
16937d993c5fSArseny Solokha 	phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
16947d993c5fSArseny Solokha 				interface);
16957d993c5fSArseny Solokha 	if (!phydev) {
16967d993c5fSArseny Solokha 		dev_err(&dev->dev, "could not attach to PHY\n");
16977d993c5fSArseny Solokha 		return -ENODEV;
16987d993c5fSArseny Solokha 	}
16997d993c5fSArseny Solokha 
17007d993c5fSArseny Solokha 	if (interface == PHY_INTERFACE_MODE_SGMII)
17017d993c5fSArseny Solokha 		gfar_configure_serdes(dev);
17027d993c5fSArseny Solokha 
17037d993c5fSArseny Solokha 	/* Remove any features not supported by the controller */
17047d993c5fSArseny Solokha 	linkmode_and(phydev->supported, phydev->supported, mask);
17057d993c5fSArseny Solokha 	linkmode_copy(phydev->advertising, phydev->supported);
17067d993c5fSArseny Solokha 
17077d993c5fSArseny Solokha 	/* Add support for flow control */
17087d993c5fSArseny Solokha 	phy_support_asym_pause(phydev);
17097d993c5fSArseny Solokha 
17107d993c5fSArseny Solokha 	/* disable EEE autoneg, EEE not supported by eTSEC */
17117d993c5fSArseny Solokha 	memset(&edata, 0, sizeof(struct ethtool_eee));
17127d993c5fSArseny Solokha 	phy_ethtool_set_eee(phydev, &edata);
17137d993c5fSArseny Solokha 
17147d993c5fSArseny Solokha 	return 0;
1715ec21e2ecSJeff Kirsher }
1716ec21e2ecSJeff Kirsher 
1717ec21e2ecSJeff Kirsher static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1718ec21e2ecSJeff Kirsher {
1719d58ff351SJohannes Berg 	struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
1720ec21e2ecSJeff Kirsher 
1721ec21e2ecSJeff Kirsher 	memset(fcb, 0, GMAC_FCB_LEN);
1722ec21e2ecSJeff Kirsher 
1723ec21e2ecSJeff Kirsher 	return fcb;
1724ec21e2ecSJeff Kirsher }
1725ec21e2ecSJeff Kirsher 
17269c4886e5SManfred Rudigier static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
17279c4886e5SManfred Rudigier 				    int fcb_length)
1728ec21e2ecSJeff Kirsher {
1729ec21e2ecSJeff Kirsher 	/* If we're here, it's a IP packet with a TCP or UDP
1730ec21e2ecSJeff Kirsher 	 * payload.  We set it to checksum, using a pseudo-header
1731ec21e2ecSJeff Kirsher 	 * we provide
1732ec21e2ecSJeff Kirsher 	 */
17333a2e16c8SJan Ceuleers 	u8 flags = TXFCB_DEFAULT;
1734ec21e2ecSJeff Kirsher 
17350977f817SJan Ceuleers 	/* Tell the controller what the protocol is
17360977f817SJan Ceuleers 	 * And provide the already calculated phcs
17370977f817SJan Ceuleers 	 */
1738ec21e2ecSJeff Kirsher 	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1739ec21e2ecSJeff Kirsher 		flags |= TXFCB_UDP;
174026eb9374SClaudiu Manoil 		fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
1741ec21e2ecSJeff Kirsher 	} else
174226eb9374SClaudiu Manoil 		fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
1743ec21e2ecSJeff Kirsher 
1744ec21e2ecSJeff Kirsher 	/* l3os is the distance between the start of the
1745ec21e2ecSJeff Kirsher 	 * frame (skb->data) and the start of the IP hdr.
1746ec21e2ecSJeff Kirsher 	 * l4os is the distance between the start of the
17470977f817SJan Ceuleers 	 * l3 hdr and the l4 hdr
17480977f817SJan Ceuleers 	 */
174926eb9374SClaudiu Manoil 	fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
1750ec21e2ecSJeff Kirsher 	fcb->l4os = skb_network_header_len(skb);
1751ec21e2ecSJeff Kirsher 
1752ec21e2ecSJeff Kirsher 	fcb->flags = flags;
1753ec21e2ecSJeff Kirsher }
1754ec21e2ecSJeff Kirsher 
1755278af574SArnd Bergmann static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1756ec21e2ecSJeff Kirsher {
1757ec21e2ecSJeff Kirsher 	fcb->flags |= TXFCB_VLN;
175826eb9374SClaudiu Manoil 	fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
1759ec21e2ecSJeff Kirsher }
1760ec21e2ecSJeff Kirsher 
1761ec21e2ecSJeff Kirsher static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1762ec21e2ecSJeff Kirsher 				      struct txbd8 *base, int ring_size)
1763ec21e2ecSJeff Kirsher {
1764ec21e2ecSJeff Kirsher 	struct txbd8 *new_bd = bdp + stride;
1765ec21e2ecSJeff Kirsher 
1766ec21e2ecSJeff Kirsher 	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1767ec21e2ecSJeff Kirsher }
1768ec21e2ecSJeff Kirsher 
1769ec21e2ecSJeff Kirsher static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1770ec21e2ecSJeff Kirsher 				      int ring_size)
1771ec21e2ecSJeff Kirsher {
1772ec21e2ecSJeff Kirsher 	return skip_txbd(bdp, 1, base, ring_size);
1773ec21e2ecSJeff Kirsher }
1774ec21e2ecSJeff Kirsher 
177502d88fb4SClaudiu Manoil /* eTSEC12: csum generation not supported for some fcb offsets */
177602d88fb4SClaudiu Manoil static inline bool gfar_csum_errata_12(struct gfar_private *priv,
177702d88fb4SClaudiu Manoil 				       unsigned long fcb_addr)
177802d88fb4SClaudiu Manoil {
177902d88fb4SClaudiu Manoil 	return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
178002d88fb4SClaudiu Manoil 	       (fcb_addr % 0x20) > 0x18);
178102d88fb4SClaudiu Manoil }
178202d88fb4SClaudiu Manoil 
178302d88fb4SClaudiu Manoil /* eTSEC76: csum generation for frames larger than 2500 may
178402d88fb4SClaudiu Manoil  * cause excess delays before start of transmission
178502d88fb4SClaudiu Manoil  */
178602d88fb4SClaudiu Manoil static inline bool gfar_csum_errata_76(struct gfar_private *priv,
178702d88fb4SClaudiu Manoil 				       unsigned int len)
178802d88fb4SClaudiu Manoil {
178902d88fb4SClaudiu Manoil 	return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
179002d88fb4SClaudiu Manoil 	       (len > 2500));
179102d88fb4SClaudiu Manoil }
179202d88fb4SClaudiu Manoil 
17930977f817SJan Ceuleers /* This is called by the kernel when a frame is ready for transmission.
17940977f817SJan Ceuleers  * It is pointed to by the dev->hard_start_xmit function pointer
17950977f817SJan Ceuleers  */
179606983aa5SYueHaibing static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1797ec21e2ecSJeff Kirsher {
1798ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
1799ec21e2ecSJeff Kirsher 	struct gfar_priv_tx_q *tx_queue = NULL;
1800ec21e2ecSJeff Kirsher 	struct netdev_queue *txq;
1801ec21e2ecSJeff Kirsher 	struct gfar __iomem *regs = NULL;
1802ec21e2ecSJeff Kirsher 	struct txfcb *fcb = NULL;
1803ec21e2ecSJeff Kirsher 	struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
1804ec21e2ecSJeff Kirsher 	u32 lstatus;
180542f397adSClaudiu Manoil 	skb_frag_t *frag;
18060d0cffdcSClaudiu Manoil 	int i, rq = 0;
18070d0cffdcSClaudiu Manoil 	int do_tstamp, do_csum, do_vlan;
1808ec21e2ecSJeff Kirsher 	u32 bufaddr;
180950ad076bSClaudiu Manoil 	unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
1810ec21e2ecSJeff Kirsher 
1811ec21e2ecSJeff Kirsher 	rq = skb->queue_mapping;
1812ec21e2ecSJeff Kirsher 	tx_queue = priv->tx_queue[rq];
1813ec21e2ecSJeff Kirsher 	txq = netdev_get_tx_queue(dev, rq);
1814ec21e2ecSJeff Kirsher 	base = tx_queue->tx_bd_base;
1815ec21e2ecSJeff Kirsher 	regs = tx_queue->grp->regs;
1816ec21e2ecSJeff Kirsher 
18170d0cffdcSClaudiu Manoil 	do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
1818df8a39deSJiri Pirko 	do_vlan = skb_vlan_tag_present(skb);
18190d0cffdcSClaudiu Manoil 	do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
18200d0cffdcSClaudiu Manoil 		    priv->hwts_tx_en;
18210d0cffdcSClaudiu Manoil 
18220d0cffdcSClaudiu Manoil 	if (do_csum || do_vlan)
18230d0cffdcSClaudiu Manoil 		fcb_len = GMAC_FCB_LEN;
18240d0cffdcSClaudiu Manoil 
1825ec21e2ecSJeff Kirsher 	/* check if time stamp should be generated */
18260d0cffdcSClaudiu Manoil 	if (unlikely(do_tstamp))
18270d0cffdcSClaudiu Manoil 		fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
1828ec21e2ecSJeff Kirsher 
1829ec21e2ecSJeff Kirsher 	/* make space for additional header when fcb is needed */
18300d0cffdcSClaudiu Manoil 	if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
1831ec21e2ecSJeff Kirsher 		struct sk_buff *skb_new;
1832ec21e2ecSJeff Kirsher 
18330d0cffdcSClaudiu Manoil 		skb_new = skb_realloc_headroom(skb, fcb_len);
1834ec21e2ecSJeff Kirsher 		if (!skb_new) {
1835ec21e2ecSJeff Kirsher 			dev->stats.tx_errors++;
1836c9974ad4SEric W. Biederman 			dev_kfree_skb_any(skb);
1837ec21e2ecSJeff Kirsher 			return NETDEV_TX_OK;
1838ec21e2ecSJeff Kirsher 		}
1839db83d136SManfred Rudigier 
1840313b037cSEric Dumazet 		if (skb->sk)
1841313b037cSEric Dumazet 			skb_set_owner_w(skb_new, skb->sk);
1842c9974ad4SEric W. Biederman 		dev_consume_skb_any(skb);
1843ec21e2ecSJeff Kirsher 		skb = skb_new;
1844ec21e2ecSJeff Kirsher 	}
1845ec21e2ecSJeff Kirsher 
1846ec21e2ecSJeff Kirsher 	/* total number of fragments in the SKB */
1847ec21e2ecSJeff Kirsher 	nr_frags = skb_shinfo(skb)->nr_frags;
1848ec21e2ecSJeff Kirsher 
1849ec21e2ecSJeff Kirsher 	/* calculate the required number of TxBDs for this skb */
1850ec21e2ecSJeff Kirsher 	if (unlikely(do_tstamp))
1851ec21e2ecSJeff Kirsher 		nr_txbds = nr_frags + 2;
1852ec21e2ecSJeff Kirsher 	else
1853ec21e2ecSJeff Kirsher 		nr_txbds = nr_frags + 1;
1854ec21e2ecSJeff Kirsher 
1855ec21e2ecSJeff Kirsher 	/* check if there is space to queue this packet */
1856ec21e2ecSJeff Kirsher 	if (nr_txbds > tx_queue->num_txbdfree) {
1857ec21e2ecSJeff Kirsher 		/* no space, stop the queue */
1858ec21e2ecSJeff Kirsher 		netif_tx_stop_queue(txq);
1859ec21e2ecSJeff Kirsher 		dev->stats.tx_fifo_errors++;
1860ec21e2ecSJeff Kirsher 		return NETDEV_TX_BUSY;
1861ec21e2ecSJeff Kirsher 	}
1862ec21e2ecSJeff Kirsher 
1863ec21e2ecSJeff Kirsher 	/* Update transmit stats */
186450ad076bSClaudiu Manoil 	bytes_sent = skb->len;
186550ad076bSClaudiu Manoil 	tx_queue->stats.tx_bytes += bytes_sent;
186650ad076bSClaudiu Manoil 	/* keep Tx bytes on wire for BQL accounting */
186750ad076bSClaudiu Manoil 	GFAR_CB(skb)->bytes_sent = bytes_sent;
1868ec21e2ecSJeff Kirsher 	tx_queue->stats.tx_packets++;
1869ec21e2ecSJeff Kirsher 
1870ec21e2ecSJeff Kirsher 	txbdp = txbdp_start = tx_queue->cur_tx;
1871a7312d58SClaudiu Manoil 	lstatus = be32_to_cpu(txbdp->lstatus);
1872ec21e2ecSJeff Kirsher 
18739c4886e5SManfred Rudigier 	/* Add TxPAL between FCB and frame if required */
18749c4886e5SManfred Rudigier 	if (unlikely(do_tstamp)) {
18759c4886e5SManfred Rudigier 		skb_push(skb, GMAC_TXPAL_LEN);
18769c4886e5SManfred Rudigier 		memset(skb->data, 0, GMAC_TXPAL_LEN);
18779c4886e5SManfred Rudigier 	}
18789c4886e5SManfred Rudigier 
18790d0cffdcSClaudiu Manoil 	/* Add TxFCB if required */
18800d0cffdcSClaudiu Manoil 	if (fcb_len) {
1881ec21e2ecSJeff Kirsher 		fcb = gfar_add_fcb(skb);
1882ec21e2ecSJeff Kirsher 		lstatus |= BD_LFLAG(TXBD_TOE);
18830d0cffdcSClaudiu Manoil 	}
18840d0cffdcSClaudiu Manoil 
18850d0cffdcSClaudiu Manoil 	/* Set up checksumming */
18860d0cffdcSClaudiu Manoil 	if (do_csum) {
18870d0cffdcSClaudiu Manoil 		gfar_tx_checksum(skb, fcb, fcb_len);
188802d88fb4SClaudiu Manoil 
188902d88fb4SClaudiu Manoil 		if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
189002d88fb4SClaudiu Manoil 		    unlikely(gfar_csum_errata_76(priv, skb->len))) {
189102d88fb4SClaudiu Manoil 			__skb_pull(skb, GMAC_FCB_LEN);
189202d88fb4SClaudiu Manoil 			skb_checksum_help(skb);
18930d0cffdcSClaudiu Manoil 			if (do_vlan || do_tstamp) {
18940d0cffdcSClaudiu Manoil 				/* put back a new fcb for vlan/tstamp TOE */
18950d0cffdcSClaudiu Manoil 				fcb = gfar_add_fcb(skb);
18960d0cffdcSClaudiu Manoil 			} else {
18970d0cffdcSClaudiu Manoil 				/* Tx TOE not used */
189802d88fb4SClaudiu Manoil 				lstatus &= ~(BD_LFLAG(TXBD_TOE));
189902d88fb4SClaudiu Manoil 				fcb = NULL;
1900ec21e2ecSJeff Kirsher 			}
1901ec21e2ecSJeff Kirsher 		}
1902ec21e2ecSJeff Kirsher 	}
1903ec21e2ecSJeff Kirsher 
19040d0cffdcSClaudiu Manoil 	if (do_vlan)
1905ec21e2ecSJeff Kirsher 		gfar_tx_vlan(skb, fcb);
1906ec21e2ecSJeff Kirsher 
19070a4b5a24SKevin Hao 	bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
19080a4b5a24SKevin Hao 				 DMA_TO_DEVICE);
19090a4b5a24SKevin Hao 	if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
19100a4b5a24SKevin Hao 		goto dma_map_err;
19110a4b5a24SKevin Hao 
1912a7312d58SClaudiu Manoil 	txbdp_start->bufPtr = cpu_to_be32(bufaddr);
1913ec21e2ecSJeff Kirsher 
1914e19d0839SClaudiu Manoil 	/* Time stamp insertion requires one additional TxBD */
1915e19d0839SClaudiu Manoil 	if (unlikely(do_tstamp))
1916e19d0839SClaudiu Manoil 		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
1917e19d0839SClaudiu Manoil 						 tx_queue->tx_ring_size);
1918e19d0839SClaudiu Manoil 
191948963b44SClaudiu Manoil 	if (likely(!nr_frags)) {
19209c8b0778SYangbo Lu 		if (likely(!do_tstamp))
1921e19d0839SClaudiu Manoil 			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1922e19d0839SClaudiu Manoil 	} else {
1923e19d0839SClaudiu Manoil 		u32 lstatus_start = lstatus;
1924e19d0839SClaudiu Manoil 
1925e19d0839SClaudiu Manoil 		/* Place the fragment addresses and lengths into the TxBDs */
192642f397adSClaudiu Manoil 		frag = &skb_shinfo(skb)->frags[0];
192742f397adSClaudiu Manoil 		for (i = 0; i < nr_frags; i++, frag++) {
192842f397adSClaudiu Manoil 			unsigned int size;
192942f397adSClaudiu Manoil 
1930e19d0839SClaudiu Manoil 			/* Point at the next BD, wrapping as needed */
1931e19d0839SClaudiu Manoil 			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1932e19d0839SClaudiu Manoil 
193342f397adSClaudiu Manoil 			size = skb_frag_size(frag);
1934e19d0839SClaudiu Manoil 
193542f397adSClaudiu Manoil 			lstatus = be32_to_cpu(txbdp->lstatus) | size |
1936e19d0839SClaudiu Manoil 				  BD_LFLAG(TXBD_READY);
1937e19d0839SClaudiu Manoil 
1938e19d0839SClaudiu Manoil 			/* Handle the last BD specially */
1939e19d0839SClaudiu Manoil 			if (i == nr_frags - 1)
1940e19d0839SClaudiu Manoil 				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1941e19d0839SClaudiu Manoil 
194242f397adSClaudiu Manoil 			bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
194342f397adSClaudiu Manoil 						   size, DMA_TO_DEVICE);
1944e19d0839SClaudiu Manoil 			if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1945e19d0839SClaudiu Manoil 				goto dma_map_err;
1946e19d0839SClaudiu Manoil 
1947e19d0839SClaudiu Manoil 			/* set the TxBD length and buffer pointer */
1948e19d0839SClaudiu Manoil 			txbdp->bufPtr = cpu_to_be32(bufaddr);
1949e19d0839SClaudiu Manoil 			txbdp->lstatus = cpu_to_be32(lstatus);
1950e19d0839SClaudiu Manoil 		}
1951e19d0839SClaudiu Manoil 
1952e19d0839SClaudiu Manoil 		lstatus = lstatus_start;
1953e19d0839SClaudiu Manoil 	}
1954e19d0839SClaudiu Manoil 
19550977f817SJan Ceuleers 	/* If time stamping is requested one additional TxBD must be set up. The
1956ec21e2ecSJeff Kirsher 	 * first TxBD points to the FCB and must have a data length of
1957ec21e2ecSJeff Kirsher 	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
1958ec21e2ecSJeff Kirsher 	 * the full frame length.
1959ec21e2ecSJeff Kirsher 	 */
1960ec21e2ecSJeff Kirsher 	if (unlikely(do_tstamp)) {
1961a7312d58SClaudiu Manoil 		u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
1962a7312d58SClaudiu Manoil 
1963a7312d58SClaudiu Manoil 		bufaddr = be32_to_cpu(txbdp_start->bufPtr);
1964a7312d58SClaudiu Manoil 		bufaddr += fcb_len;
196548963b44SClaudiu Manoil 
1966a7312d58SClaudiu Manoil 		lstatus_ts |= BD_LFLAG(TXBD_READY) |
19670d0cffdcSClaudiu Manoil 			      (skb_headlen(skb) - fcb_len);
196848963b44SClaudiu Manoil 		if (!nr_frags)
196948963b44SClaudiu Manoil 			lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1970a7312d58SClaudiu Manoil 
1971a7312d58SClaudiu Manoil 		txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
1972a7312d58SClaudiu Manoil 		txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
1973ec21e2ecSJeff Kirsher 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
1974e19d0839SClaudiu Manoil 
1975e19d0839SClaudiu Manoil 		/* Setup tx hardware time stamping */
1976e19d0839SClaudiu Manoil 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1977e19d0839SClaudiu Manoil 		fcb->ptp = 1;
1978ec21e2ecSJeff Kirsher 	} else {
1979ec21e2ecSJeff Kirsher 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1980ec21e2ecSJeff Kirsher 	}
1981ec21e2ecSJeff Kirsher 
198250ad076bSClaudiu Manoil 	netdev_tx_sent_queue(txq, bytes_sent);
1983d8a0f1b0SPaul Gortmaker 
1984d55398baSClaudiu Manoil 	gfar_wmb();
1985ec21e2ecSJeff Kirsher 
1986a7312d58SClaudiu Manoil 	txbdp_start->lstatus = cpu_to_be32(lstatus);
1987ec21e2ecSJeff Kirsher 
1988d55398baSClaudiu Manoil 	gfar_wmb(); /* force lstatus write before tx_skbuff */
1989ec21e2ecSJeff Kirsher 
1990ec21e2ecSJeff Kirsher 	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1991ec21e2ecSJeff Kirsher 
1992ec21e2ecSJeff Kirsher 	/* Update the current skb pointer to the next entry we will use
19930977f817SJan Ceuleers 	 * (wrapping if necessary)
19940977f817SJan Ceuleers 	 */
1995ec21e2ecSJeff Kirsher 	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1996ec21e2ecSJeff Kirsher 			      TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1997ec21e2ecSJeff Kirsher 
1998ec21e2ecSJeff Kirsher 	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1999ec21e2ecSJeff Kirsher 
2000bc602280SClaudiu Manoil 	/* We can work in parallel with gfar_clean_tx_ring(), except
2001bc602280SClaudiu Manoil 	 * when modifying num_txbdfree. Note that we didn't grab the lock
2002bc602280SClaudiu Manoil 	 * when we were reading the num_txbdfree and checking for available
2003bc602280SClaudiu Manoil 	 * space, that's because outside of this function it can only grow.
2004bc602280SClaudiu Manoil 	 */
2005bc602280SClaudiu Manoil 	spin_lock_bh(&tx_queue->txlock);
2006ec21e2ecSJeff Kirsher 	/* reduce TxBD free count */
2007ec21e2ecSJeff Kirsher 	tx_queue->num_txbdfree -= (nr_txbds);
2008bc602280SClaudiu Manoil 	spin_unlock_bh(&tx_queue->txlock);
2009ec21e2ecSJeff Kirsher 
2010ec21e2ecSJeff Kirsher 	/* If the next BD still needs to be cleaned up, then the bds
20110977f817SJan Ceuleers 	 * are full.  We need to tell the kernel to stop sending us stuff.
20120977f817SJan Ceuleers 	 */
2013ec21e2ecSJeff Kirsher 	if (!tx_queue->num_txbdfree) {
2014ec21e2ecSJeff Kirsher 		netif_tx_stop_queue(txq);
2015ec21e2ecSJeff Kirsher 
2016ec21e2ecSJeff Kirsher 		dev->stats.tx_fifo_errors++;
2017ec21e2ecSJeff Kirsher 	}
2018ec21e2ecSJeff Kirsher 
2019ec21e2ecSJeff Kirsher 	/* Tell the DMA to go go go */
2020ec21e2ecSJeff Kirsher 	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2021ec21e2ecSJeff Kirsher 
2022ec21e2ecSJeff Kirsher 	return NETDEV_TX_OK;
20230a4b5a24SKevin Hao 
20240a4b5a24SKevin Hao dma_map_err:
20250a4b5a24SKevin Hao 	txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
20260a4b5a24SKevin Hao 	if (do_tstamp)
20270a4b5a24SKevin Hao 		txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
20280a4b5a24SKevin Hao 	for (i = 0; i < nr_frags; i++) {
2029a7312d58SClaudiu Manoil 		lstatus = be32_to_cpu(txbdp->lstatus);
20300a4b5a24SKevin Hao 		if (!(lstatus & BD_LFLAG(TXBD_READY)))
20310a4b5a24SKevin Hao 			break;
20320a4b5a24SKevin Hao 
2033a7312d58SClaudiu Manoil 		lstatus &= ~BD_LFLAG(TXBD_READY);
2034a7312d58SClaudiu Manoil 		txbdp->lstatus = cpu_to_be32(lstatus);
2035a7312d58SClaudiu Manoil 		bufaddr = be32_to_cpu(txbdp->bufPtr);
2036a7312d58SClaudiu Manoil 		dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
20370a4b5a24SKevin Hao 			       DMA_TO_DEVICE);
20380a4b5a24SKevin Hao 		txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
20390a4b5a24SKevin Hao 	}
20400a4b5a24SKevin Hao 	gfar_wmb();
20410a4b5a24SKevin Hao 	dev_kfree_skb_any(skb);
20420a4b5a24SKevin Hao 	return NETDEV_TX_OK;
2043ec21e2ecSJeff Kirsher }
2044ec21e2ecSJeff Kirsher 
2045ec21e2ecSJeff Kirsher /* Changes the mac address if the controller is not running. */
2046ec21e2ecSJeff Kirsher static int gfar_set_mac_address(struct net_device *dev)
2047ec21e2ecSJeff Kirsher {
2048ec21e2ecSJeff Kirsher 	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2049ec21e2ecSJeff Kirsher 
2050ec21e2ecSJeff Kirsher 	return 0;
2051ec21e2ecSJeff Kirsher }
2052ec21e2ecSJeff Kirsher 
2053ec21e2ecSJeff Kirsher static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2054ec21e2ecSJeff Kirsher {
2055ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
2056ec21e2ecSJeff Kirsher 
20570851133bSClaudiu Manoil 	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
20580851133bSClaudiu Manoil 		cpu_relax();
20590851133bSClaudiu Manoil 
206088302648SClaudiu Manoil 	if (dev->flags & IFF_UP)
2061ec21e2ecSJeff Kirsher 		stop_gfar(dev);
2062ec21e2ecSJeff Kirsher 
2063ec21e2ecSJeff Kirsher 	dev->mtu = new_mtu;
2064ec21e2ecSJeff Kirsher 
206588302648SClaudiu Manoil 	if (dev->flags & IFF_UP)
2066ec21e2ecSJeff Kirsher 		startup_gfar(dev);
2067ec21e2ecSJeff Kirsher 
20680851133bSClaudiu Manoil 	clear_bit_unlock(GFAR_RESETTING, &priv->state);
20690851133bSClaudiu Manoil 
2070ec21e2ecSJeff Kirsher 	return 0;
2071ec21e2ecSJeff Kirsher }
2072ec21e2ecSJeff Kirsher 
20739f5c44cfSYueHaibing static void reset_gfar(struct net_device *ndev)
20740851133bSClaudiu Manoil {
20750851133bSClaudiu Manoil 	struct gfar_private *priv = netdev_priv(ndev);
20760851133bSClaudiu Manoil 
20770851133bSClaudiu Manoil 	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
20780851133bSClaudiu Manoil 		cpu_relax();
20790851133bSClaudiu Manoil 
20800851133bSClaudiu Manoil 	stop_gfar(ndev);
20810851133bSClaudiu Manoil 	startup_gfar(ndev);
20820851133bSClaudiu Manoil 
20830851133bSClaudiu Manoil 	clear_bit_unlock(GFAR_RESETTING, &priv->state);
20840851133bSClaudiu Manoil }
20850851133bSClaudiu Manoil 
2086ec21e2ecSJeff Kirsher /* gfar_reset_task gets scheduled when a packet has not been
2087ec21e2ecSJeff Kirsher  * transmitted after a set amount of time.
2088ec21e2ecSJeff Kirsher  * For now, assume that clearing out all the structures, and
2089ec21e2ecSJeff Kirsher  * starting over will fix the problem.
2090ec21e2ecSJeff Kirsher  */
2091ec21e2ecSJeff Kirsher static void gfar_reset_task(struct work_struct *work)
2092ec21e2ecSJeff Kirsher {
2093ec21e2ecSJeff Kirsher 	struct gfar_private *priv = container_of(work, struct gfar_private,
2094ec21e2ecSJeff Kirsher 						 reset_task);
20950851133bSClaudiu Manoil 	reset_gfar(priv->ndev);
2096ec21e2ecSJeff Kirsher }
2097ec21e2ecSJeff Kirsher 
20980290bd29SMichael S. Tsirkin static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
2099ec21e2ecSJeff Kirsher {
2100ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
2101ec21e2ecSJeff Kirsher 
2102ec21e2ecSJeff Kirsher 	dev->stats.tx_errors++;
2103ec21e2ecSJeff Kirsher 	schedule_work(&priv->reset_task);
2104ec21e2ecSJeff Kirsher }
2105ec21e2ecSJeff Kirsher 
21067d993c5fSArseny Solokha static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
21077d993c5fSArseny Solokha {
21087d993c5fSArseny Solokha 	struct hwtstamp_config config;
21097d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(netdev);
21107d993c5fSArseny Solokha 
21117d993c5fSArseny Solokha 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
21127d993c5fSArseny Solokha 		return -EFAULT;
21137d993c5fSArseny Solokha 
21147d993c5fSArseny Solokha 	/* reserved for future extensions */
21157d993c5fSArseny Solokha 	if (config.flags)
21167d993c5fSArseny Solokha 		return -EINVAL;
21177d993c5fSArseny Solokha 
21187d993c5fSArseny Solokha 	switch (config.tx_type) {
21197d993c5fSArseny Solokha 	case HWTSTAMP_TX_OFF:
21207d993c5fSArseny Solokha 		priv->hwts_tx_en = 0;
21217d993c5fSArseny Solokha 		break;
21227d993c5fSArseny Solokha 	case HWTSTAMP_TX_ON:
21237d993c5fSArseny Solokha 		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
21247d993c5fSArseny Solokha 			return -ERANGE;
21257d993c5fSArseny Solokha 		priv->hwts_tx_en = 1;
21267d993c5fSArseny Solokha 		break;
21277d993c5fSArseny Solokha 	default:
21287d993c5fSArseny Solokha 		return -ERANGE;
21297d993c5fSArseny Solokha 	}
21307d993c5fSArseny Solokha 
21317d993c5fSArseny Solokha 	switch (config.rx_filter) {
21327d993c5fSArseny Solokha 	case HWTSTAMP_FILTER_NONE:
21337d993c5fSArseny Solokha 		if (priv->hwts_rx_en) {
21347d993c5fSArseny Solokha 			priv->hwts_rx_en = 0;
21357d993c5fSArseny Solokha 			reset_gfar(netdev);
21367d993c5fSArseny Solokha 		}
21377d993c5fSArseny Solokha 		break;
21387d993c5fSArseny Solokha 	default:
21397d993c5fSArseny Solokha 		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
21407d993c5fSArseny Solokha 			return -ERANGE;
21417d993c5fSArseny Solokha 		if (!priv->hwts_rx_en) {
21427d993c5fSArseny Solokha 			priv->hwts_rx_en = 1;
21437d993c5fSArseny Solokha 			reset_gfar(netdev);
21447d993c5fSArseny Solokha 		}
21457d993c5fSArseny Solokha 		config.rx_filter = HWTSTAMP_FILTER_ALL;
21467d993c5fSArseny Solokha 		break;
21477d993c5fSArseny Solokha 	}
21487d993c5fSArseny Solokha 
21497d993c5fSArseny Solokha 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
21507d993c5fSArseny Solokha 		-EFAULT : 0;
21517d993c5fSArseny Solokha }
21527d993c5fSArseny Solokha 
21537d993c5fSArseny Solokha static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
21547d993c5fSArseny Solokha {
21557d993c5fSArseny Solokha 	struct hwtstamp_config config;
21567d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(netdev);
21577d993c5fSArseny Solokha 
21587d993c5fSArseny Solokha 	config.flags = 0;
21597d993c5fSArseny Solokha 	config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
21607d993c5fSArseny Solokha 	config.rx_filter = (priv->hwts_rx_en ?
21617d993c5fSArseny Solokha 			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
21627d993c5fSArseny Solokha 
21637d993c5fSArseny Solokha 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
21647d993c5fSArseny Solokha 		-EFAULT : 0;
21657d993c5fSArseny Solokha }
21667d993c5fSArseny Solokha 
21677d993c5fSArseny Solokha static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
21687d993c5fSArseny Solokha {
21697d993c5fSArseny Solokha 	struct phy_device *phydev = dev->phydev;
21707d993c5fSArseny Solokha 
21717d993c5fSArseny Solokha 	if (!netif_running(dev))
21727d993c5fSArseny Solokha 		return -EINVAL;
21737d993c5fSArseny Solokha 
21747d993c5fSArseny Solokha 	if (cmd == SIOCSHWTSTAMP)
21757d993c5fSArseny Solokha 		return gfar_hwtstamp_set(dev, rq);
21767d993c5fSArseny Solokha 	if (cmd == SIOCGHWTSTAMP)
21777d993c5fSArseny Solokha 		return gfar_hwtstamp_get(dev, rq);
21787d993c5fSArseny Solokha 
21797d993c5fSArseny Solokha 	if (!phydev)
21807d993c5fSArseny Solokha 		return -ENODEV;
21817d993c5fSArseny Solokha 
21827d993c5fSArseny Solokha 	return phy_mii_ioctl(phydev, rq, cmd);
21837d993c5fSArseny Solokha }
21847d993c5fSArseny Solokha 
2185ec21e2ecSJeff Kirsher /* Interrupt Handler for Transmit complete */
2186c233cf40SClaudiu Manoil static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2187ec21e2ecSJeff Kirsher {
2188ec21e2ecSJeff Kirsher 	struct net_device *dev = tx_queue->dev;
2189d8a0f1b0SPaul Gortmaker 	struct netdev_queue *txq;
2190ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
2191ec21e2ecSJeff Kirsher 	struct txbd8 *bdp, *next = NULL;
2192ec21e2ecSJeff Kirsher 	struct txbd8 *lbdp = NULL;
2193ec21e2ecSJeff Kirsher 	struct txbd8 *base = tx_queue->tx_bd_base;
2194ec21e2ecSJeff Kirsher 	struct sk_buff *skb;
2195ec21e2ecSJeff Kirsher 	int skb_dirtytx;
2196ec21e2ecSJeff Kirsher 	int tx_ring_size = tx_queue->tx_ring_size;
2197ec21e2ecSJeff Kirsher 	int frags = 0, nr_txbds = 0;
2198ec21e2ecSJeff Kirsher 	int i;
2199ec21e2ecSJeff Kirsher 	int howmany = 0;
2200d8a0f1b0SPaul Gortmaker 	int tqi = tx_queue->qindex;
2201d8a0f1b0SPaul Gortmaker 	unsigned int bytes_sent = 0;
2202ec21e2ecSJeff Kirsher 	u32 lstatus;
2203ec21e2ecSJeff Kirsher 	size_t buflen;
2204ec21e2ecSJeff Kirsher 
2205d8a0f1b0SPaul Gortmaker 	txq = netdev_get_tx_queue(dev, tqi);
2206ec21e2ecSJeff Kirsher 	bdp = tx_queue->dirty_tx;
2207ec21e2ecSJeff Kirsher 	skb_dirtytx = tx_queue->skb_dirtytx;
2208ec21e2ecSJeff Kirsher 
2209ec21e2ecSJeff Kirsher 	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2210c26a2c2dSVladimir Oltean 		bool do_tstamp;
2211c26a2c2dSVladimir Oltean 
2212c26a2c2dSVladimir Oltean 		do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2213c26a2c2dSVladimir Oltean 			    priv->hwts_tx_en;
2214ec21e2ecSJeff Kirsher 
2215ec21e2ecSJeff Kirsher 		frags = skb_shinfo(skb)->nr_frags;
2216ec21e2ecSJeff Kirsher 
22170977f817SJan Ceuleers 		/* When time stamping, one additional TxBD must be freed.
2218ec21e2ecSJeff Kirsher 		 * Also, we need to dma_unmap_single() the TxPAL.
2219ec21e2ecSJeff Kirsher 		 */
2220c26a2c2dSVladimir Oltean 		if (unlikely(do_tstamp))
2221ec21e2ecSJeff Kirsher 			nr_txbds = frags + 2;
2222ec21e2ecSJeff Kirsher 		else
2223ec21e2ecSJeff Kirsher 			nr_txbds = frags + 1;
2224ec21e2ecSJeff Kirsher 
2225ec21e2ecSJeff Kirsher 		lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2226ec21e2ecSJeff Kirsher 
2227a7312d58SClaudiu Manoil 		lstatus = be32_to_cpu(lbdp->lstatus);
2228ec21e2ecSJeff Kirsher 
2229ec21e2ecSJeff Kirsher 		/* Only clean completed frames */
2230ec21e2ecSJeff Kirsher 		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2231ec21e2ecSJeff Kirsher 		    (lstatus & BD_LENGTH_MASK))
2232ec21e2ecSJeff Kirsher 			break;
2233ec21e2ecSJeff Kirsher 
2234c26a2c2dSVladimir Oltean 		if (unlikely(do_tstamp)) {
2235ec21e2ecSJeff Kirsher 			next = next_txbd(bdp, base, tx_ring_size);
2236a7312d58SClaudiu Manoil 			buflen = be16_to_cpu(next->length) +
2237a7312d58SClaudiu Manoil 				 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2238ec21e2ecSJeff Kirsher 		} else
2239a7312d58SClaudiu Manoil 			buflen = be16_to_cpu(bdp->length);
2240ec21e2ecSJeff Kirsher 
2241a7312d58SClaudiu Manoil 		dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2242ec21e2ecSJeff Kirsher 				 buflen, DMA_TO_DEVICE);
2243ec21e2ecSJeff Kirsher 
2244c26a2c2dSVladimir Oltean 		if (unlikely(do_tstamp)) {
2245ec21e2ecSJeff Kirsher 			struct skb_shared_hwtstamps shhwtstamps;
2246b4b67f26SScott Wood 			u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
2247b4b67f26SScott Wood 					  ~0x7UL);
2248bc4598bcSJan Ceuleers 
2249ec21e2ecSJeff Kirsher 			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2250f54af12fSYangbo Lu 			shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
22519c4886e5SManfred Rudigier 			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2252ec21e2ecSJeff Kirsher 			skb_tstamp_tx(skb, &shhwtstamps);
2253a7312d58SClaudiu Manoil 			gfar_clear_txbd_status(bdp);
2254ec21e2ecSJeff Kirsher 			bdp = next;
2255ec21e2ecSJeff Kirsher 		}
2256ec21e2ecSJeff Kirsher 
2257a7312d58SClaudiu Manoil 		gfar_clear_txbd_status(bdp);
2258ec21e2ecSJeff Kirsher 		bdp = next_txbd(bdp, base, tx_ring_size);
2259ec21e2ecSJeff Kirsher 
2260ec21e2ecSJeff Kirsher 		for (i = 0; i < frags; i++) {
2261a7312d58SClaudiu Manoil 			dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2262a7312d58SClaudiu Manoil 				       be16_to_cpu(bdp->length),
2263a7312d58SClaudiu Manoil 				       DMA_TO_DEVICE);
2264a7312d58SClaudiu Manoil 			gfar_clear_txbd_status(bdp);
2265ec21e2ecSJeff Kirsher 			bdp = next_txbd(bdp, base, tx_ring_size);
2266ec21e2ecSJeff Kirsher 		}
2267ec21e2ecSJeff Kirsher 
226850ad076bSClaudiu Manoil 		bytes_sent += GFAR_CB(skb)->bytes_sent;
2269d8a0f1b0SPaul Gortmaker 
2270ec21e2ecSJeff Kirsher 		dev_kfree_skb_any(skb);
2271ec21e2ecSJeff Kirsher 
2272ec21e2ecSJeff Kirsher 		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2273ec21e2ecSJeff Kirsher 
2274ec21e2ecSJeff Kirsher 		skb_dirtytx = (skb_dirtytx + 1) &
2275ec21e2ecSJeff Kirsher 			      TX_RING_MOD_MASK(tx_ring_size);
2276ec21e2ecSJeff Kirsher 
2277ec21e2ecSJeff Kirsher 		howmany++;
2278bc602280SClaudiu Manoil 		spin_lock(&tx_queue->txlock);
2279ec21e2ecSJeff Kirsher 		tx_queue->num_txbdfree += nr_txbds;
2280bc602280SClaudiu Manoil 		spin_unlock(&tx_queue->txlock);
2281ec21e2ecSJeff Kirsher 	}
2282ec21e2ecSJeff Kirsher 
2283ec21e2ecSJeff Kirsher 	/* If we freed a buffer, we can restart transmission, if necessary */
22840851133bSClaudiu Manoil 	if (tx_queue->num_txbdfree &&
22850851133bSClaudiu Manoil 	    netif_tx_queue_stopped(txq) &&
22860851133bSClaudiu Manoil 	    !(test_bit(GFAR_DOWN, &priv->state)))
22870851133bSClaudiu Manoil 		netif_wake_subqueue(priv->ndev, tqi);
2288ec21e2ecSJeff Kirsher 
2289ec21e2ecSJeff Kirsher 	/* Update dirty indicators */
2290ec21e2ecSJeff Kirsher 	tx_queue->skb_dirtytx = skb_dirtytx;
2291ec21e2ecSJeff Kirsher 	tx_queue->dirty_tx = bdp;
2292ec21e2ecSJeff Kirsher 
2293d8a0f1b0SPaul Gortmaker 	netdev_tx_completed_queue(txq, howmany, bytes_sent);
2294ec21e2ecSJeff Kirsher }
2295ec21e2ecSJeff Kirsher 
2296f23223f1SClaudiu Manoil static void count_errors(u32 lstatus, struct net_device *ndev)
2297ec21e2ecSJeff Kirsher {
2298f23223f1SClaudiu Manoil 	struct gfar_private *priv = netdev_priv(ndev);
2299f23223f1SClaudiu Manoil 	struct net_device_stats *stats = &ndev->stats;
2300ec21e2ecSJeff Kirsher 	struct gfar_extra_stats *estats = &priv->extra_stats;
2301ec21e2ecSJeff Kirsher 
23020977f817SJan Ceuleers 	/* If the packet was truncated, none of the other errors matter */
2303f966082eSClaudiu Manoil 	if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
2304ec21e2ecSJeff Kirsher 		stats->rx_length_errors++;
2305ec21e2ecSJeff Kirsher 
2306212079dfSPaul Gortmaker 		atomic64_inc(&estats->rx_trunc);
2307ec21e2ecSJeff Kirsher 
2308ec21e2ecSJeff Kirsher 		return;
2309ec21e2ecSJeff Kirsher 	}
2310ec21e2ecSJeff Kirsher 	/* Count the errors, if there were any */
2311f966082eSClaudiu Manoil 	if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
2312ec21e2ecSJeff Kirsher 		stats->rx_length_errors++;
2313ec21e2ecSJeff Kirsher 
2314f966082eSClaudiu Manoil 		if (lstatus & BD_LFLAG(RXBD_LARGE))
2315212079dfSPaul Gortmaker 			atomic64_inc(&estats->rx_large);
2316ec21e2ecSJeff Kirsher 		else
2317212079dfSPaul Gortmaker 			atomic64_inc(&estats->rx_short);
2318ec21e2ecSJeff Kirsher 	}
2319f966082eSClaudiu Manoil 	if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
2320ec21e2ecSJeff Kirsher 		stats->rx_frame_errors++;
2321212079dfSPaul Gortmaker 		atomic64_inc(&estats->rx_nonoctet);
2322ec21e2ecSJeff Kirsher 	}
2323f966082eSClaudiu Manoil 	if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
2324212079dfSPaul Gortmaker 		atomic64_inc(&estats->rx_crcerr);
2325ec21e2ecSJeff Kirsher 		stats->rx_crc_errors++;
2326ec21e2ecSJeff Kirsher 	}
2327f966082eSClaudiu Manoil 	if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
2328212079dfSPaul Gortmaker 		atomic64_inc(&estats->rx_overrun);
2329f966082eSClaudiu Manoil 		stats->rx_over_errors++;
2330ec21e2ecSJeff Kirsher 	}
2331ec21e2ecSJeff Kirsher }
2332ec21e2ecSJeff Kirsher 
23337ad38784SArseny Solokha static irqreturn_t gfar_receive(int irq, void *grp_id)
2334ec21e2ecSJeff Kirsher {
2335aeb12c5eSClaudiu Manoil 	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2336aeb12c5eSClaudiu Manoil 	unsigned long flags;
23373e905b80SClaudiu Manoil 	u32 imask, ievent;
23383e905b80SClaudiu Manoil 
23393e905b80SClaudiu Manoil 	ievent = gfar_read(&grp->regs->ievent);
23403e905b80SClaudiu Manoil 
23413e905b80SClaudiu Manoil 	if (unlikely(ievent & IEVENT_FGPI)) {
23423e905b80SClaudiu Manoil 		gfar_write(&grp->regs->ievent, IEVENT_FGPI);
23433e905b80SClaudiu Manoil 		return IRQ_HANDLED;
23443e905b80SClaudiu Manoil 	}
2345aeb12c5eSClaudiu Manoil 
2346aeb12c5eSClaudiu Manoil 	if (likely(napi_schedule_prep(&grp->napi_rx))) {
2347aeb12c5eSClaudiu Manoil 		spin_lock_irqsave(&grp->grplock, flags);
2348aeb12c5eSClaudiu Manoil 		imask = gfar_read(&grp->regs->imask);
2349aeb12c5eSClaudiu Manoil 		imask &= IMASK_RX_DISABLED;
2350aeb12c5eSClaudiu Manoil 		gfar_write(&grp->regs->imask, imask);
2351aeb12c5eSClaudiu Manoil 		spin_unlock_irqrestore(&grp->grplock, flags);
2352aeb12c5eSClaudiu Manoil 		__napi_schedule(&grp->napi_rx);
2353aeb12c5eSClaudiu Manoil 	} else {
2354aeb12c5eSClaudiu Manoil 		/* Clear IEVENT, so interrupts aren't called again
2355aeb12c5eSClaudiu Manoil 		 * because of the packets that have already arrived.
2356aeb12c5eSClaudiu Manoil 		 */
2357aeb12c5eSClaudiu Manoil 		gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2358aeb12c5eSClaudiu Manoil 	}
2359aeb12c5eSClaudiu Manoil 
2360aeb12c5eSClaudiu Manoil 	return IRQ_HANDLED;
2361aeb12c5eSClaudiu Manoil }
2362aeb12c5eSClaudiu Manoil 
2363aeb12c5eSClaudiu Manoil /* Interrupt Handler for Transmit complete */
2364aeb12c5eSClaudiu Manoil static irqreturn_t gfar_transmit(int irq, void *grp_id)
2365aeb12c5eSClaudiu Manoil {
2366aeb12c5eSClaudiu Manoil 	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2367aeb12c5eSClaudiu Manoil 	unsigned long flags;
2368aeb12c5eSClaudiu Manoil 	u32 imask;
2369aeb12c5eSClaudiu Manoil 
2370aeb12c5eSClaudiu Manoil 	if (likely(napi_schedule_prep(&grp->napi_tx))) {
2371aeb12c5eSClaudiu Manoil 		spin_lock_irqsave(&grp->grplock, flags);
2372aeb12c5eSClaudiu Manoil 		imask = gfar_read(&grp->regs->imask);
2373aeb12c5eSClaudiu Manoil 		imask &= IMASK_TX_DISABLED;
2374aeb12c5eSClaudiu Manoil 		gfar_write(&grp->regs->imask, imask);
2375aeb12c5eSClaudiu Manoil 		spin_unlock_irqrestore(&grp->grplock, flags);
2376aeb12c5eSClaudiu Manoil 		__napi_schedule(&grp->napi_tx);
2377aeb12c5eSClaudiu Manoil 	} else {
2378aeb12c5eSClaudiu Manoil 		/* Clear IEVENT, so interrupts aren't called again
2379aeb12c5eSClaudiu Manoil 		 * because of the packets that have already arrived.
2380aeb12c5eSClaudiu Manoil 		 */
2381aeb12c5eSClaudiu Manoil 		gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2382aeb12c5eSClaudiu Manoil 	}
2383aeb12c5eSClaudiu Manoil 
2384ec21e2ecSJeff Kirsher 	return IRQ_HANDLED;
2385ec21e2ecSJeff Kirsher }
2386ec21e2ecSJeff Kirsher 
238775354148SClaudiu Manoil static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
238875354148SClaudiu Manoil 			     struct sk_buff *skb, bool first)
238975354148SClaudiu Manoil {
2390202a0a70SAndy Spencer 	int size = lstatus & BD_LENGTH_MASK;
239175354148SClaudiu Manoil 	struct page *page = rxb->page;
239275354148SClaudiu Manoil 
23936c389fc9SZefir Kurtisi 	if (likely(first)) {
239475354148SClaudiu Manoil 		skb_put(skb, size);
23956c389fc9SZefir Kurtisi 	} else {
23966c389fc9SZefir Kurtisi 		/* the last fragments' length contains the full frame length */
2397d903ec77SAndy Spencer 		if (lstatus & BD_LFLAG(RXBD_LAST))
23986c389fc9SZefir Kurtisi 			size -= skb->len;
23996c389fc9SZefir Kurtisi 
240075354148SClaudiu Manoil 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
240175354148SClaudiu Manoil 				rxb->page_offset + RXBUF_ALIGNMENT,
240275354148SClaudiu Manoil 				size, GFAR_RXB_TRUESIZE);
24036c389fc9SZefir Kurtisi 	}
240475354148SClaudiu Manoil 
240575354148SClaudiu Manoil 	/* try reuse page */
240669fed99bSEric Dumazet 	if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
240775354148SClaudiu Manoil 		return false;
240875354148SClaudiu Manoil 
240975354148SClaudiu Manoil 	/* change offset to the other half */
241075354148SClaudiu Manoil 	rxb->page_offset ^= GFAR_RXB_TRUESIZE;
241175354148SClaudiu Manoil 
2412fe896d18SJoonsoo Kim 	page_ref_inc(page);
241375354148SClaudiu Manoil 
241475354148SClaudiu Manoil 	return true;
241575354148SClaudiu Manoil }
241675354148SClaudiu Manoil 
241775354148SClaudiu Manoil static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
241875354148SClaudiu Manoil 			       struct gfar_rx_buff *old_rxb)
241975354148SClaudiu Manoil {
242075354148SClaudiu Manoil 	struct gfar_rx_buff *new_rxb;
242175354148SClaudiu Manoil 	u16 nta = rxq->next_to_alloc;
242275354148SClaudiu Manoil 
242375354148SClaudiu Manoil 	new_rxb = &rxq->rx_buff[nta];
242475354148SClaudiu Manoil 
242575354148SClaudiu Manoil 	/* find next buf that can reuse a page */
242675354148SClaudiu Manoil 	nta++;
242775354148SClaudiu Manoil 	rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
242875354148SClaudiu Manoil 
242975354148SClaudiu Manoil 	/* copy page reference */
243075354148SClaudiu Manoil 	*new_rxb = *old_rxb;
243175354148SClaudiu Manoil 
243275354148SClaudiu Manoil 	/* sync for use by the device */
243375354148SClaudiu Manoil 	dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
243475354148SClaudiu Manoil 					 old_rxb->page_offset,
243575354148SClaudiu Manoil 					 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
243675354148SClaudiu Manoil }
243775354148SClaudiu Manoil 
243875354148SClaudiu Manoil static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
243975354148SClaudiu Manoil 					    u32 lstatus, struct sk_buff *skb)
244075354148SClaudiu Manoil {
244175354148SClaudiu Manoil 	struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
244275354148SClaudiu Manoil 	struct page *page = rxb->page;
244375354148SClaudiu Manoil 	bool first = false;
244475354148SClaudiu Manoil 
244575354148SClaudiu Manoil 	if (likely(!skb)) {
244675354148SClaudiu Manoil 		void *buff_addr = page_address(page) + rxb->page_offset;
244775354148SClaudiu Manoil 
244875354148SClaudiu Manoil 		skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
244975354148SClaudiu Manoil 		if (unlikely(!skb)) {
245075354148SClaudiu Manoil 			gfar_rx_alloc_err(rx_queue);
245175354148SClaudiu Manoil 			return NULL;
245275354148SClaudiu Manoil 		}
245375354148SClaudiu Manoil 		skb_reserve(skb, RXBUF_ALIGNMENT);
245475354148SClaudiu Manoil 		first = true;
245575354148SClaudiu Manoil 	}
245675354148SClaudiu Manoil 
245775354148SClaudiu Manoil 	dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
245875354148SClaudiu Manoil 				      GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
245975354148SClaudiu Manoil 
246075354148SClaudiu Manoil 	if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
246175354148SClaudiu Manoil 		/* reuse the free half of the page */
246275354148SClaudiu Manoil 		gfar_reuse_rx_page(rx_queue, rxb);
246375354148SClaudiu Manoil 	} else {
246475354148SClaudiu Manoil 		/* page cannot be reused, unmap it */
246575354148SClaudiu Manoil 		dma_unmap_page(rx_queue->dev, rxb->dma,
246675354148SClaudiu Manoil 			       PAGE_SIZE, DMA_FROM_DEVICE);
246775354148SClaudiu Manoil 	}
246875354148SClaudiu Manoil 
246975354148SClaudiu Manoil 	/* clear rxb content */
247075354148SClaudiu Manoil 	rxb->page = NULL;
247175354148SClaudiu Manoil 
247275354148SClaudiu Manoil 	return skb;
247375354148SClaudiu Manoil }
247475354148SClaudiu Manoil 
2475ec21e2ecSJeff Kirsher static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2476ec21e2ecSJeff Kirsher {
2477ec21e2ecSJeff Kirsher 	/* If valid headers were found, and valid sums
2478ec21e2ecSJeff Kirsher 	 * were verified, then we tell the kernel that no
24790977f817SJan Ceuleers 	 * checksumming is necessary.  Otherwise, it is [FIXME]
24800977f817SJan Ceuleers 	 */
248126eb9374SClaudiu Manoil 	if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
248226eb9374SClaudiu Manoil 	    (RXFCB_CIP | RXFCB_CTU))
2483ec21e2ecSJeff Kirsher 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2484ec21e2ecSJeff Kirsher 	else
2485ec21e2ecSJeff Kirsher 		skb_checksum_none_assert(skb);
2486ec21e2ecSJeff Kirsher }
2487ec21e2ecSJeff Kirsher 
24880977f817SJan Ceuleers /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2489f23223f1SClaudiu Manoil static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
2490ec21e2ecSJeff Kirsher {
2491f23223f1SClaudiu Manoil 	struct gfar_private *priv = netdev_priv(ndev);
2492ec21e2ecSJeff Kirsher 	struct rxfcb *fcb = NULL;
2493ec21e2ecSJeff Kirsher 
2494ec21e2ecSJeff Kirsher 	/* fcb is at the beginning if exists */
2495ec21e2ecSJeff Kirsher 	fcb = (struct rxfcb *)skb->data;
2496ec21e2ecSJeff Kirsher 
24970977f817SJan Ceuleers 	/* Remove the FCB from the skb
24980977f817SJan Ceuleers 	 * Remove the padded bytes, if there are any
24990977f817SJan Ceuleers 	 */
2500f23223f1SClaudiu Manoil 	if (priv->uses_rxfcb)
250176f31e8bSClaudiu Manoil 		skb_pull(skb, GMAC_FCB_LEN);
2502ec21e2ecSJeff Kirsher 
2503ec21e2ecSJeff Kirsher 	/* Get receive timestamp from the skb */
2504ec21e2ecSJeff Kirsher 	if (priv->hwts_rx_en) {
2505ec21e2ecSJeff Kirsher 		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2506ec21e2ecSJeff Kirsher 		u64 *ns = (u64 *) skb->data;
2507bc4598bcSJan Ceuleers 
2508ec21e2ecSJeff Kirsher 		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2509f54af12fSYangbo Lu 		shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2510ec21e2ecSJeff Kirsher 	}
2511ec21e2ecSJeff Kirsher 
2512ec21e2ecSJeff Kirsher 	if (priv->padding)
2513ec21e2ecSJeff Kirsher 		skb_pull(skb, priv->padding);
2514ec21e2ecSJeff Kirsher 
2515d903ec77SAndy Spencer 	/* Trim off the FCS */
2516d903ec77SAndy Spencer 	pskb_trim(skb, skb->len - ETH_FCS_LEN);
2517d903ec77SAndy Spencer 
2518f23223f1SClaudiu Manoil 	if (ndev->features & NETIF_F_RXCSUM)
2519ec21e2ecSJeff Kirsher 		gfar_rx_checksum(skb, fcb);
2520ec21e2ecSJeff Kirsher 
2521f646968fSPatrick McHardy 	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2522823dcd25SDavid S. Miller 	 * Even if vlan rx accel is disabled, on some chips
2523823dcd25SDavid S. Miller 	 * RXFCB_VLN is pseudo randomly set.
2524823dcd25SDavid S. Miller 	 */
2525f23223f1SClaudiu Manoil 	if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
252626eb9374SClaudiu Manoil 	    be16_to_cpu(fcb->flags) & RXFCB_VLN)
252726eb9374SClaudiu Manoil 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
252826eb9374SClaudiu Manoil 				       be16_to_cpu(fcb->vlctl));
2529ec21e2ecSJeff Kirsher }
2530ec21e2ecSJeff Kirsher 
2531ec21e2ecSJeff Kirsher /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2532ec21e2ecSJeff Kirsher  * until the budget/quota has been reached. Returns the number
2533ec21e2ecSJeff Kirsher  * of frames handled
2534ec21e2ecSJeff Kirsher  */
25357ad38784SArseny Solokha static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
25367ad38784SArseny Solokha 			      int rx_work_limit)
2537ec21e2ecSJeff Kirsher {
2538f23223f1SClaudiu Manoil 	struct net_device *ndev = rx_queue->ndev;
2539f23223f1SClaudiu Manoil 	struct gfar_private *priv = netdev_priv(ndev);
254075354148SClaudiu Manoil 	struct rxbd8 *bdp;
254175354148SClaudiu Manoil 	int i, howmany = 0;
254275354148SClaudiu Manoil 	struct sk_buff *skb = rx_queue->skb;
254375354148SClaudiu Manoil 	int cleaned_cnt = gfar_rxbd_unused(rx_queue);
254475354148SClaudiu Manoil 	unsigned int total_bytes = 0, total_pkts = 0;
2545ec21e2ecSJeff Kirsher 
2546ec21e2ecSJeff Kirsher 	/* Get the first full descriptor */
254776f31e8bSClaudiu Manoil 	i = rx_queue->next_to_clean;
2548ec21e2ecSJeff Kirsher 
254976f31e8bSClaudiu Manoil 	while (rx_work_limit--) {
2550f966082eSClaudiu Manoil 		u32 lstatus;
2551ec21e2ecSJeff Kirsher 
255276f31e8bSClaudiu Manoil 		if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
255376f31e8bSClaudiu Manoil 			gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
255476f31e8bSClaudiu Manoil 			cleaned_cnt = 0;
255576f31e8bSClaudiu Manoil 		}
2556bc4598bcSJan Ceuleers 
255776f31e8bSClaudiu Manoil 		bdp = &rx_queue->rx_bd_base[i];
2558f966082eSClaudiu Manoil 		lstatus = be32_to_cpu(bdp->lstatus);
2559f966082eSClaudiu Manoil 		if (lstatus & BD_LFLAG(RXBD_EMPTY))
256076f31e8bSClaudiu Manoil 			break;
256176f31e8bSClaudiu Manoil 
256276f31e8bSClaudiu Manoil 		/* order rx buffer descriptor reads */
2563ec21e2ecSJeff Kirsher 		rmb();
2564ec21e2ecSJeff Kirsher 
256576f31e8bSClaudiu Manoil 		/* fetch next to clean buffer from the ring */
256675354148SClaudiu Manoil 		skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
256775354148SClaudiu Manoil 		if (unlikely(!skb))
256875354148SClaudiu Manoil 			break;
2569ec21e2ecSJeff Kirsher 
257075354148SClaudiu Manoil 		cleaned_cnt++;
257175354148SClaudiu Manoil 		howmany++;
2572ec21e2ecSJeff Kirsher 
257375354148SClaudiu Manoil 		if (unlikely(++i == rx_queue->rx_ring_size))
257475354148SClaudiu Manoil 			i = 0;
2575ec21e2ecSJeff Kirsher 
257675354148SClaudiu Manoil 		rx_queue->next_to_clean = i;
257775354148SClaudiu Manoil 
257875354148SClaudiu Manoil 		/* fetch next buffer if not the last in frame */
257975354148SClaudiu Manoil 		if (!(lstatus & BD_LFLAG(RXBD_LAST)))
258075354148SClaudiu Manoil 			continue;
258175354148SClaudiu Manoil 
258275354148SClaudiu Manoil 		if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
2583f23223f1SClaudiu Manoil 			count_errors(lstatus, ndev);
2584ec21e2ecSJeff Kirsher 
258576f31e8bSClaudiu Manoil 			/* discard faulty buffer */
2586acb600deSEric Dumazet 			dev_kfree_skb(skb);
258775354148SClaudiu Manoil 			skb = NULL;
258875354148SClaudiu Manoil 			rx_queue->stats.rx_dropped++;
258975354148SClaudiu Manoil 			continue;
259075354148SClaudiu Manoil 		}
259176f31e8bSClaudiu Manoil 
2592590399ddSClaudiu Manoil 		gfar_process_frame(ndev, skb);
2593590399ddSClaudiu Manoil 
2594ec21e2ecSJeff Kirsher 		/* Increment the number of packets */
259575354148SClaudiu Manoil 		total_pkts++;
259675354148SClaudiu Manoil 		total_bytes += skb->len;
2597ec21e2ecSJeff Kirsher 
2598ec21e2ecSJeff Kirsher 		skb_record_rx_queue(skb, rx_queue->qindex);
259975354148SClaudiu Manoil 
2600590399ddSClaudiu Manoil 		skb->protocol = eth_type_trans(skb, ndev);
2601f23223f1SClaudiu Manoil 
2602f23223f1SClaudiu Manoil 		/* Send the packet up the stack */
2603f23223f1SClaudiu Manoil 		napi_gro_receive(&rx_queue->grp->napi_rx, skb);
2604ec21e2ecSJeff Kirsher 
260575354148SClaudiu Manoil 		skb = NULL;
2606ec21e2ecSJeff Kirsher 	}
2607ec21e2ecSJeff Kirsher 
260875354148SClaudiu Manoil 	/* Store incomplete frames for completion */
260975354148SClaudiu Manoil 	rx_queue->skb = skb;
2610ec21e2ecSJeff Kirsher 
261175354148SClaudiu Manoil 	rx_queue->stats.rx_packets += total_pkts;
261275354148SClaudiu Manoil 	rx_queue->stats.rx_bytes += total_bytes;
261376f31e8bSClaudiu Manoil 
261476f31e8bSClaudiu Manoil 	if (cleaned_cnt)
261576f31e8bSClaudiu Manoil 		gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
261676f31e8bSClaudiu Manoil 
261776f31e8bSClaudiu Manoil 	/* Update Last Free RxBD pointer for LFC */
261876f31e8bSClaudiu Manoil 	if (unlikely(priv->tx_actual_en)) {
2619b4b67f26SScott Wood 		u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
2620b4b67f26SScott Wood 
2621b4b67f26SScott Wood 		gfar_write(rx_queue->rfbptr, bdp_dma);
262276f31e8bSClaudiu Manoil 	}
2623ec21e2ecSJeff Kirsher 
2624ec21e2ecSJeff Kirsher 	return howmany;
2625ec21e2ecSJeff Kirsher }
2626ec21e2ecSJeff Kirsher 
2627aeb12c5eSClaudiu Manoil static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
26285eaedf31SClaudiu Manoil {
26295eaedf31SClaudiu Manoil 	struct gfar_priv_grp *gfargrp =
2630aeb12c5eSClaudiu Manoil 		container_of(napi, struct gfar_priv_grp, napi_rx);
26315eaedf31SClaudiu Manoil 	struct gfar __iomem *regs = gfargrp->regs;
263271ff9e3dSClaudiu Manoil 	struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
26335eaedf31SClaudiu Manoil 	int work_done = 0;
26345eaedf31SClaudiu Manoil 
26355eaedf31SClaudiu Manoil 	/* Clear IEVENT, so interrupts aren't called again
26365eaedf31SClaudiu Manoil 	 * because of the packets that have already arrived
26375eaedf31SClaudiu Manoil 	 */
2638aeb12c5eSClaudiu Manoil 	gfar_write(&regs->ievent, IEVENT_RX_MASK);
26395eaedf31SClaudiu Manoil 
26405eaedf31SClaudiu Manoil 	work_done = gfar_clean_rx_ring(rx_queue, budget);
26415eaedf31SClaudiu Manoil 
26425eaedf31SClaudiu Manoil 	if (work_done < budget) {
2643aeb12c5eSClaudiu Manoil 		u32 imask;
26446ad20165SEric Dumazet 		napi_complete_done(napi, work_done);
26455eaedf31SClaudiu Manoil 		/* Clear the halt bit in RSTAT */
26465eaedf31SClaudiu Manoil 		gfar_write(&regs->rstat, gfargrp->rstat);
26475eaedf31SClaudiu Manoil 
2648aeb12c5eSClaudiu Manoil 		spin_lock_irq(&gfargrp->grplock);
2649aeb12c5eSClaudiu Manoil 		imask = gfar_read(&regs->imask);
2650aeb12c5eSClaudiu Manoil 		imask |= IMASK_RX_DEFAULT;
2651aeb12c5eSClaudiu Manoil 		gfar_write(&regs->imask, imask);
2652aeb12c5eSClaudiu Manoil 		spin_unlock_irq(&gfargrp->grplock);
26535eaedf31SClaudiu Manoil 	}
26545eaedf31SClaudiu Manoil 
26555eaedf31SClaudiu Manoil 	return work_done;
26565eaedf31SClaudiu Manoil }
26575eaedf31SClaudiu Manoil 
2658aeb12c5eSClaudiu Manoil static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2659ec21e2ecSJeff Kirsher {
2660bc4598bcSJan Ceuleers 	struct gfar_priv_grp *gfargrp =
2661aeb12c5eSClaudiu Manoil 		container_of(napi, struct gfar_priv_grp, napi_tx);
2662aeb12c5eSClaudiu Manoil 	struct gfar __iomem *regs = gfargrp->regs;
266371ff9e3dSClaudiu Manoil 	struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2664aeb12c5eSClaudiu Manoil 	u32 imask;
2665aeb12c5eSClaudiu Manoil 
2666aeb12c5eSClaudiu Manoil 	/* Clear IEVENT, so interrupts aren't called again
2667aeb12c5eSClaudiu Manoil 	 * because of the packets that have already arrived
2668aeb12c5eSClaudiu Manoil 	 */
2669aeb12c5eSClaudiu Manoil 	gfar_write(&regs->ievent, IEVENT_TX_MASK);
2670aeb12c5eSClaudiu Manoil 
2671aeb12c5eSClaudiu Manoil 	/* run Tx cleanup to completion */
2672aeb12c5eSClaudiu Manoil 	if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2673aeb12c5eSClaudiu Manoil 		gfar_clean_tx_ring(tx_queue);
2674aeb12c5eSClaudiu Manoil 
2675aeb12c5eSClaudiu Manoil 	napi_complete(napi);
2676aeb12c5eSClaudiu Manoil 
2677aeb12c5eSClaudiu Manoil 	spin_lock_irq(&gfargrp->grplock);
2678aeb12c5eSClaudiu Manoil 	imask = gfar_read(&regs->imask);
2679aeb12c5eSClaudiu Manoil 	imask |= IMASK_TX_DEFAULT;
2680aeb12c5eSClaudiu Manoil 	gfar_write(&regs->imask, imask);
2681aeb12c5eSClaudiu Manoil 	spin_unlock_irq(&gfargrp->grplock);
2682aeb12c5eSClaudiu Manoil 
2683aeb12c5eSClaudiu Manoil 	return 0;
2684aeb12c5eSClaudiu Manoil }
2685aeb12c5eSClaudiu Manoil 
2686aeb12c5eSClaudiu Manoil static int gfar_poll_rx(struct napi_struct *napi, int budget)
2687aeb12c5eSClaudiu Manoil {
2688aeb12c5eSClaudiu Manoil 	struct gfar_priv_grp *gfargrp =
2689aeb12c5eSClaudiu Manoil 		container_of(napi, struct gfar_priv_grp, napi_rx);
2690ec21e2ecSJeff Kirsher 	struct gfar_private *priv = gfargrp->priv;
2691ec21e2ecSJeff Kirsher 	struct gfar __iomem *regs = gfargrp->regs;
2692ec21e2ecSJeff Kirsher 	struct gfar_priv_rx_q *rx_queue = NULL;
2693c233cf40SClaudiu Manoil 	int work_done = 0, work_done_per_q = 0;
269439c0a0d5SClaudiu Manoil 	int i, budget_per_q = 0;
26956be5ed3fSClaudiu Manoil 	unsigned long rstat_rxf;
26966be5ed3fSClaudiu Manoil 	int num_act_queues;
2697ec21e2ecSJeff Kirsher 
2698ec21e2ecSJeff Kirsher 	/* Clear IEVENT, so interrupts aren't called again
26990977f817SJan Ceuleers 	 * because of the packets that have already arrived
27000977f817SJan Ceuleers 	 */
2701aeb12c5eSClaudiu Manoil 	gfar_write(&regs->ievent, IEVENT_RX_MASK);
2702ec21e2ecSJeff Kirsher 
27036be5ed3fSClaudiu Manoil 	rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
27046be5ed3fSClaudiu Manoil 
27056be5ed3fSClaudiu Manoil 	num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
27066be5ed3fSClaudiu Manoil 	if (num_act_queues)
27076be5ed3fSClaudiu Manoil 		budget_per_q = budget/num_act_queues;
27086be5ed3fSClaudiu Manoil 
2709ec21e2ecSJeff Kirsher 	for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
27106be5ed3fSClaudiu Manoil 		/* skip queue if not active */
27116be5ed3fSClaudiu Manoil 		if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
2712ec21e2ecSJeff Kirsher 			continue;
2713ec21e2ecSJeff Kirsher 
2714c233cf40SClaudiu Manoil 		rx_queue = priv->rx_queue[i];
2715c233cf40SClaudiu Manoil 		work_done_per_q =
2716c233cf40SClaudiu Manoil 			gfar_clean_rx_ring(rx_queue, budget_per_q);
2717c233cf40SClaudiu Manoil 		work_done += work_done_per_q;
2718c233cf40SClaudiu Manoil 
2719c233cf40SClaudiu Manoil 		/* finished processing this queue */
2720c233cf40SClaudiu Manoil 		if (work_done_per_q < budget_per_q) {
27216be5ed3fSClaudiu Manoil 			/* clear active queue hw indication */
27226be5ed3fSClaudiu Manoil 			gfar_write(&regs->rstat,
27236be5ed3fSClaudiu Manoil 				   RSTAT_CLEAR_RXF0 >> i);
27246be5ed3fSClaudiu Manoil 			num_act_queues--;
27256be5ed3fSClaudiu Manoil 
27266be5ed3fSClaudiu Manoil 			if (!num_act_queues)
2727c233cf40SClaudiu Manoil 				break;
2728ec21e2ecSJeff Kirsher 		}
2729ec21e2ecSJeff Kirsher 	}
2730ec21e2ecSJeff Kirsher 
2731aeb12c5eSClaudiu Manoil 	if (!num_act_queues) {
2732aeb12c5eSClaudiu Manoil 		u32 imask;
27336ad20165SEric Dumazet 		napi_complete_done(napi, work_done);
2734ec21e2ecSJeff Kirsher 
2735ec21e2ecSJeff Kirsher 		/* Clear the halt bit in RSTAT */
2736ec21e2ecSJeff Kirsher 		gfar_write(&regs->rstat, gfargrp->rstat);
2737ec21e2ecSJeff Kirsher 
2738aeb12c5eSClaudiu Manoil 		spin_lock_irq(&gfargrp->grplock);
2739aeb12c5eSClaudiu Manoil 		imask = gfar_read(&regs->imask);
2740aeb12c5eSClaudiu Manoil 		imask |= IMASK_RX_DEFAULT;
2741aeb12c5eSClaudiu Manoil 		gfar_write(&regs->imask, imask);
2742aeb12c5eSClaudiu Manoil 		spin_unlock_irq(&gfargrp->grplock);
2743ec21e2ecSJeff Kirsher 	}
2744ec21e2ecSJeff Kirsher 
2745c233cf40SClaudiu Manoil 	return work_done;
2746ec21e2ecSJeff Kirsher }
2747ec21e2ecSJeff Kirsher 
2748aeb12c5eSClaudiu Manoil static int gfar_poll_tx(struct napi_struct *napi, int budget)
2749aeb12c5eSClaudiu Manoil {
2750aeb12c5eSClaudiu Manoil 	struct gfar_priv_grp *gfargrp =
2751aeb12c5eSClaudiu Manoil 		container_of(napi, struct gfar_priv_grp, napi_tx);
2752aeb12c5eSClaudiu Manoil 	struct gfar_private *priv = gfargrp->priv;
2753aeb12c5eSClaudiu Manoil 	struct gfar __iomem *regs = gfargrp->regs;
2754aeb12c5eSClaudiu Manoil 	struct gfar_priv_tx_q *tx_queue = NULL;
2755aeb12c5eSClaudiu Manoil 	int has_tx_work = 0;
2756aeb12c5eSClaudiu Manoil 	int i;
2757aeb12c5eSClaudiu Manoil 
2758aeb12c5eSClaudiu Manoil 	/* Clear IEVENT, so interrupts aren't called again
2759aeb12c5eSClaudiu Manoil 	 * because of the packets that have already arrived
2760aeb12c5eSClaudiu Manoil 	 */
2761aeb12c5eSClaudiu Manoil 	gfar_write(&regs->ievent, IEVENT_TX_MASK);
2762aeb12c5eSClaudiu Manoil 
2763aeb12c5eSClaudiu Manoil 	for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
2764aeb12c5eSClaudiu Manoil 		tx_queue = priv->tx_queue[i];
2765aeb12c5eSClaudiu Manoil 		/* run Tx cleanup to completion */
2766aeb12c5eSClaudiu Manoil 		if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
2767aeb12c5eSClaudiu Manoil 			gfar_clean_tx_ring(tx_queue);
2768aeb12c5eSClaudiu Manoil 			has_tx_work = 1;
2769aeb12c5eSClaudiu Manoil 		}
2770aeb12c5eSClaudiu Manoil 	}
2771aeb12c5eSClaudiu Manoil 
2772aeb12c5eSClaudiu Manoil 	if (!has_tx_work) {
2773aeb12c5eSClaudiu Manoil 		u32 imask;
2774aeb12c5eSClaudiu Manoil 		napi_complete(napi);
2775aeb12c5eSClaudiu Manoil 
2776aeb12c5eSClaudiu Manoil 		spin_lock_irq(&gfargrp->grplock);
2777aeb12c5eSClaudiu Manoil 		imask = gfar_read(&regs->imask);
2778aeb12c5eSClaudiu Manoil 		imask |= IMASK_TX_DEFAULT;
2779aeb12c5eSClaudiu Manoil 		gfar_write(&regs->imask, imask);
2780aeb12c5eSClaudiu Manoil 		spin_unlock_irq(&gfargrp->grplock);
2781aeb12c5eSClaudiu Manoil 	}
2782aeb12c5eSClaudiu Manoil 
2783aeb12c5eSClaudiu Manoil 	return 0;
2784aeb12c5eSClaudiu Manoil }
2785aeb12c5eSClaudiu Manoil 
27867d993c5fSArseny Solokha /* GFAR error interrupt handler */
27877d993c5fSArseny Solokha static irqreturn_t gfar_error(int irq, void *grp_id)
27887d993c5fSArseny Solokha {
27897d993c5fSArseny Solokha 	struct gfar_priv_grp *gfargrp = grp_id;
27907d993c5fSArseny Solokha 	struct gfar __iomem *regs = gfargrp->regs;
27917d993c5fSArseny Solokha 	struct gfar_private *priv= gfargrp->priv;
27927d993c5fSArseny Solokha 	struct net_device *dev = priv->ndev;
27937d993c5fSArseny Solokha 
27947d993c5fSArseny Solokha 	/* Save ievent for future reference */
27957d993c5fSArseny Solokha 	u32 events = gfar_read(&regs->ievent);
27967d993c5fSArseny Solokha 
27977d993c5fSArseny Solokha 	/* Clear IEVENT */
27987d993c5fSArseny Solokha 	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
27997d993c5fSArseny Solokha 
28007d993c5fSArseny Solokha 	/* Magic Packet is not an error. */
28017d993c5fSArseny Solokha 	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
28027d993c5fSArseny Solokha 	    (events & IEVENT_MAG))
28037d993c5fSArseny Solokha 		events &= ~IEVENT_MAG;
28047d993c5fSArseny Solokha 
28057d993c5fSArseny Solokha 	/* Hmm... */
28067d993c5fSArseny Solokha 	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
28077d993c5fSArseny Solokha 		netdev_dbg(dev,
28087d993c5fSArseny Solokha 			   "error interrupt (ievent=0x%08x imask=0x%08x)\n",
28097d993c5fSArseny Solokha 			   events, gfar_read(&regs->imask));
28107d993c5fSArseny Solokha 
28117d993c5fSArseny Solokha 	/* Update the error counters */
28127d993c5fSArseny Solokha 	if (events & IEVENT_TXE) {
28137d993c5fSArseny Solokha 		dev->stats.tx_errors++;
28147d993c5fSArseny Solokha 
28157d993c5fSArseny Solokha 		if (events & IEVENT_LC)
28167d993c5fSArseny Solokha 			dev->stats.tx_window_errors++;
28177d993c5fSArseny Solokha 		if (events & IEVENT_CRL)
28187d993c5fSArseny Solokha 			dev->stats.tx_aborted_errors++;
28197d993c5fSArseny Solokha 		if (events & IEVENT_XFUN) {
28207d993c5fSArseny Solokha 			netif_dbg(priv, tx_err, dev,
28217d993c5fSArseny Solokha 				  "TX FIFO underrun, packet dropped\n");
28227d993c5fSArseny Solokha 			dev->stats.tx_dropped++;
28237d993c5fSArseny Solokha 			atomic64_inc(&priv->extra_stats.tx_underrun);
28247d993c5fSArseny Solokha 
28257d993c5fSArseny Solokha 			schedule_work(&priv->reset_task);
28267d993c5fSArseny Solokha 		}
28277d993c5fSArseny Solokha 		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
28287d993c5fSArseny Solokha 	}
28297d993c5fSArseny Solokha 	if (events & IEVENT_BSY) {
28307d993c5fSArseny Solokha 		dev->stats.rx_over_errors++;
28317d993c5fSArseny Solokha 		atomic64_inc(&priv->extra_stats.rx_bsy);
28327d993c5fSArseny Solokha 
28337d993c5fSArseny Solokha 		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
28347d993c5fSArseny Solokha 			  gfar_read(&regs->rstat));
28357d993c5fSArseny Solokha 	}
28367d993c5fSArseny Solokha 	if (events & IEVENT_BABR) {
28377d993c5fSArseny Solokha 		dev->stats.rx_errors++;
28387d993c5fSArseny Solokha 		atomic64_inc(&priv->extra_stats.rx_babr);
28397d993c5fSArseny Solokha 
28407d993c5fSArseny Solokha 		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
28417d993c5fSArseny Solokha 	}
28427d993c5fSArseny Solokha 	if (events & IEVENT_EBERR) {
28437d993c5fSArseny Solokha 		atomic64_inc(&priv->extra_stats.eberr);
28447d993c5fSArseny Solokha 		netif_dbg(priv, rx_err, dev, "bus error\n");
28457d993c5fSArseny Solokha 	}
28467d993c5fSArseny Solokha 	if (events & IEVENT_RXC)
28477d993c5fSArseny Solokha 		netif_dbg(priv, rx_status, dev, "control frame\n");
28487d993c5fSArseny Solokha 
28497d993c5fSArseny Solokha 	if (events & IEVENT_BABT) {
28507d993c5fSArseny Solokha 		atomic64_inc(&priv->extra_stats.tx_babt);
28517d993c5fSArseny Solokha 		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
28527d993c5fSArseny Solokha 	}
28537d993c5fSArseny Solokha 	return IRQ_HANDLED;
28547d993c5fSArseny Solokha }
28557d993c5fSArseny Solokha 
28567d993c5fSArseny Solokha /* The interrupt handler for devices with one interrupt */
28577d993c5fSArseny Solokha static irqreturn_t gfar_interrupt(int irq, void *grp_id)
28587d993c5fSArseny Solokha {
28597d993c5fSArseny Solokha 	struct gfar_priv_grp *gfargrp = grp_id;
28607d993c5fSArseny Solokha 
28617d993c5fSArseny Solokha 	/* Save ievent for future reference */
28627d993c5fSArseny Solokha 	u32 events = gfar_read(&gfargrp->regs->ievent);
28637d993c5fSArseny Solokha 
28647d993c5fSArseny Solokha 	/* Check for reception */
28657d993c5fSArseny Solokha 	if (events & IEVENT_RX_MASK)
28667d993c5fSArseny Solokha 		gfar_receive(irq, grp_id);
28677d993c5fSArseny Solokha 
28687d993c5fSArseny Solokha 	/* Check for transmit completion */
28697d993c5fSArseny Solokha 	if (events & IEVENT_TX_MASK)
28707d993c5fSArseny Solokha 		gfar_transmit(irq, grp_id);
28717d993c5fSArseny Solokha 
28727d993c5fSArseny Solokha 	/* Check for errors */
28737d993c5fSArseny Solokha 	if (events & IEVENT_ERR_MASK)
28747d993c5fSArseny Solokha 		gfar_error(irq, grp_id);
28757d993c5fSArseny Solokha 
28767d993c5fSArseny Solokha 	return IRQ_HANDLED;
28777d993c5fSArseny Solokha }
2878aeb12c5eSClaudiu Manoil 
2879ec21e2ecSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
28800977f817SJan Ceuleers /* Polling 'interrupt' - used by things like netconsole to send skbs
2881ec21e2ecSJeff Kirsher  * without having to re-enable interrupts. It's not called while
2882ec21e2ecSJeff Kirsher  * the interrupt routine is executing.
2883ec21e2ecSJeff Kirsher  */
2884ec21e2ecSJeff Kirsher static void gfar_netpoll(struct net_device *dev)
2885ec21e2ecSJeff Kirsher {
2886ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
28873a2e16c8SJan Ceuleers 	int i;
2888ec21e2ecSJeff Kirsher 
2889ec21e2ecSJeff Kirsher 	/* If the device has multiple interrupts, run tx/rx */
2890ec21e2ecSJeff Kirsher 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2891ec21e2ecSJeff Kirsher 		for (i = 0; i < priv->num_grps; i++) {
289262ed839dSPaul Gortmaker 			struct gfar_priv_grp *grp = &priv->gfargrp[i];
289362ed839dSPaul Gortmaker 
289462ed839dSPaul Gortmaker 			disable_irq(gfar_irq(grp, TX)->irq);
289562ed839dSPaul Gortmaker 			disable_irq(gfar_irq(grp, RX)->irq);
289662ed839dSPaul Gortmaker 			disable_irq(gfar_irq(grp, ER)->irq);
289762ed839dSPaul Gortmaker 			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
289862ed839dSPaul Gortmaker 			enable_irq(gfar_irq(grp, ER)->irq);
289962ed839dSPaul Gortmaker 			enable_irq(gfar_irq(grp, RX)->irq);
290062ed839dSPaul Gortmaker 			enable_irq(gfar_irq(grp, TX)->irq);
2901ec21e2ecSJeff Kirsher 		}
2902ec21e2ecSJeff Kirsher 	} else {
2903ec21e2ecSJeff Kirsher 		for (i = 0; i < priv->num_grps; i++) {
290462ed839dSPaul Gortmaker 			struct gfar_priv_grp *grp = &priv->gfargrp[i];
290562ed839dSPaul Gortmaker 
290662ed839dSPaul Gortmaker 			disable_irq(gfar_irq(grp, TX)->irq);
290762ed839dSPaul Gortmaker 			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
290862ed839dSPaul Gortmaker 			enable_irq(gfar_irq(grp, TX)->irq);
2909ec21e2ecSJeff Kirsher 		}
2910ec21e2ecSJeff Kirsher 	}
2911ec21e2ecSJeff Kirsher }
2912ec21e2ecSJeff Kirsher #endif
2913ec21e2ecSJeff Kirsher 
29147d993c5fSArseny Solokha static void free_grp_irqs(struct gfar_priv_grp *grp)
2915ec21e2ecSJeff Kirsher {
29167d993c5fSArseny Solokha 	free_irq(gfar_irq(grp, TX)->irq, grp);
29177d993c5fSArseny Solokha 	free_irq(gfar_irq(grp, RX)->irq, grp);
29187d993c5fSArseny Solokha 	free_irq(gfar_irq(grp, ER)->irq, grp);
2919ec21e2ecSJeff Kirsher }
2920ec21e2ecSJeff Kirsher 
29217d993c5fSArseny Solokha static int register_grp_irqs(struct gfar_priv_grp *grp)
29227d993c5fSArseny Solokha {
29237d993c5fSArseny Solokha 	struct gfar_private *priv = grp->priv;
29247d993c5fSArseny Solokha 	struct net_device *dev = priv->ndev;
29257d993c5fSArseny Solokha 	int err;
29267d993c5fSArseny Solokha 
29277d993c5fSArseny Solokha 	/* If the device has multiple interrupts, register for
29287d993c5fSArseny Solokha 	 * them.  Otherwise, only register for the one
2929ec21e2ecSJeff Kirsher 	 */
29307d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
29317d993c5fSArseny Solokha 		/* Install our interrupt handlers for Error,
29327d993c5fSArseny Solokha 		 * Transmit, and Receive
29337d993c5fSArseny Solokha 		 */
29347d993c5fSArseny Solokha 		err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
29357d993c5fSArseny Solokha 				  gfar_irq(grp, ER)->name, grp);
29367d993c5fSArseny Solokha 		if (err < 0) {
29377d993c5fSArseny Solokha 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
29387d993c5fSArseny Solokha 				  gfar_irq(grp, ER)->irq);
29397d993c5fSArseny Solokha 
29407d993c5fSArseny Solokha 			goto err_irq_fail;
29417d993c5fSArseny Solokha 		}
29427d993c5fSArseny Solokha 		enable_irq_wake(gfar_irq(grp, ER)->irq);
29437d993c5fSArseny Solokha 
29447d993c5fSArseny Solokha 		err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
29457d993c5fSArseny Solokha 				  gfar_irq(grp, TX)->name, grp);
29467d993c5fSArseny Solokha 		if (err < 0) {
29477d993c5fSArseny Solokha 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
29487d993c5fSArseny Solokha 				  gfar_irq(grp, TX)->irq);
29497d993c5fSArseny Solokha 			goto tx_irq_fail;
29507d993c5fSArseny Solokha 		}
29517d993c5fSArseny Solokha 		err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
29527d993c5fSArseny Solokha 				  gfar_irq(grp, RX)->name, grp);
29537d993c5fSArseny Solokha 		if (err < 0) {
29547d993c5fSArseny Solokha 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
29557d993c5fSArseny Solokha 				  gfar_irq(grp, RX)->irq);
29567d993c5fSArseny Solokha 			goto rx_irq_fail;
29577d993c5fSArseny Solokha 		}
29587d993c5fSArseny Solokha 		enable_irq_wake(gfar_irq(grp, RX)->irq);
29597d993c5fSArseny Solokha 
29607d993c5fSArseny Solokha 	} else {
29617d993c5fSArseny Solokha 		err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
29627d993c5fSArseny Solokha 				  gfar_irq(grp, TX)->name, grp);
29637d993c5fSArseny Solokha 		if (err < 0) {
29647d993c5fSArseny Solokha 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
29657d993c5fSArseny Solokha 				  gfar_irq(grp, TX)->irq);
29667d993c5fSArseny Solokha 			goto err_irq_fail;
29677d993c5fSArseny Solokha 		}
29687d993c5fSArseny Solokha 		enable_irq_wake(gfar_irq(grp, TX)->irq);
29697d993c5fSArseny Solokha 	}
29707d993c5fSArseny Solokha 
29717d993c5fSArseny Solokha 	return 0;
29727d993c5fSArseny Solokha 
29737d993c5fSArseny Solokha rx_irq_fail:
29747d993c5fSArseny Solokha 	free_irq(gfar_irq(grp, TX)->irq, grp);
29757d993c5fSArseny Solokha tx_irq_fail:
29767d993c5fSArseny Solokha 	free_irq(gfar_irq(grp, ER)->irq, grp);
29777d993c5fSArseny Solokha err_irq_fail:
29787d993c5fSArseny Solokha 	return err;
29797d993c5fSArseny Solokha 
29807d993c5fSArseny Solokha }
29817d993c5fSArseny Solokha 
29827d993c5fSArseny Solokha static void gfar_free_irq(struct gfar_private *priv)
29837d993c5fSArseny Solokha {
29847d993c5fSArseny Solokha 	int i;
29857d993c5fSArseny Solokha 
29867d993c5fSArseny Solokha 	/* Free the IRQs */
29877d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
29887d993c5fSArseny Solokha 		for (i = 0; i < priv->num_grps; i++)
29897d993c5fSArseny Solokha 			free_grp_irqs(&priv->gfargrp[i]);
29907d993c5fSArseny Solokha 	} else {
29917d993c5fSArseny Solokha 		for (i = 0; i < priv->num_grps; i++)
29927d993c5fSArseny Solokha 			free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
29937d993c5fSArseny Solokha 				 &priv->gfargrp[i]);
29947d993c5fSArseny Solokha 	}
29957d993c5fSArseny Solokha }
29967d993c5fSArseny Solokha 
29977d993c5fSArseny Solokha static int gfar_request_irq(struct gfar_private *priv)
29987d993c5fSArseny Solokha {
29997d993c5fSArseny Solokha 	int err, i, j;
30007d993c5fSArseny Solokha 
30017d993c5fSArseny Solokha 	for (i = 0; i < priv->num_grps; i++) {
30027d993c5fSArseny Solokha 		err = register_grp_irqs(&priv->gfargrp[i]);
30037d993c5fSArseny Solokha 		if (err) {
30047d993c5fSArseny Solokha 			for (j = 0; j < i; j++)
30057d993c5fSArseny Solokha 				free_grp_irqs(&priv->gfargrp[j]);
30067d993c5fSArseny Solokha 			return err;
30077d993c5fSArseny Solokha 		}
30087d993c5fSArseny Solokha 	}
30097d993c5fSArseny Solokha 
30107d993c5fSArseny Solokha 	return 0;
30117d993c5fSArseny Solokha }
30127d993c5fSArseny Solokha 
30137d993c5fSArseny Solokha /* Called when something needs to use the ethernet device
30147d993c5fSArseny Solokha  * Returns 0 for success.
30157d993c5fSArseny Solokha  */
30167d993c5fSArseny Solokha static int gfar_enet_open(struct net_device *dev)
3017ec21e2ecSJeff Kirsher {
3018ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
30197d993c5fSArseny Solokha 	int err;
3020ec21e2ecSJeff Kirsher 
30217d993c5fSArseny Solokha 	err = init_phy(dev);
30227d993c5fSArseny Solokha 	if (err)
30237d993c5fSArseny Solokha 		return err;
30247d993c5fSArseny Solokha 
30257d993c5fSArseny Solokha 	err = gfar_request_irq(priv);
30267d993c5fSArseny Solokha 	if (err)
30277d993c5fSArseny Solokha 		return err;
30287d993c5fSArseny Solokha 
30297d993c5fSArseny Solokha 	err = startup_gfar(dev);
30307d993c5fSArseny Solokha 	if (err)
30317d993c5fSArseny Solokha 		return err;
30327d993c5fSArseny Solokha 
30337d993c5fSArseny Solokha 	return err;
30347d993c5fSArseny Solokha }
30357d993c5fSArseny Solokha 
30367d993c5fSArseny Solokha /* Stops the kernel queue, and halts the controller */
30377d993c5fSArseny Solokha static int gfar_close(struct net_device *dev)
30387d993c5fSArseny Solokha {
30397d993c5fSArseny Solokha 	struct gfar_private *priv = netdev_priv(dev);
30407d993c5fSArseny Solokha 
30417d993c5fSArseny Solokha 	cancel_work_sync(&priv->reset_task);
30427d993c5fSArseny Solokha 	stop_gfar(dev);
30437d993c5fSArseny Solokha 
30447d993c5fSArseny Solokha 	/* Disconnect from the PHY */
30457d993c5fSArseny Solokha 	phy_disconnect(dev->phydev);
30467d993c5fSArseny Solokha 
30477d993c5fSArseny Solokha 	gfar_free_irq(priv);
30487d993c5fSArseny Solokha 
30497d993c5fSArseny Solokha 	return 0;
30507d993c5fSArseny Solokha }
30517d993c5fSArseny Solokha 
30527d993c5fSArseny Solokha /* Clears each of the exact match registers to zero, so they
30537d993c5fSArseny Solokha  * don't interfere with normal reception
30547d993c5fSArseny Solokha  */
30557d993c5fSArseny Solokha static void gfar_clear_exact_match(struct net_device *dev)
30567d993c5fSArseny Solokha {
30577d993c5fSArseny Solokha 	int idx;
30587d993c5fSArseny Solokha 	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
30597d993c5fSArseny Solokha 
30607d993c5fSArseny Solokha 	for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
30617d993c5fSArseny Solokha 		gfar_set_mac_for_addr(dev, idx, zero_arr);
3062ec21e2ecSJeff Kirsher }
3063ec21e2ecSJeff Kirsher 
3064ec21e2ecSJeff Kirsher /* Update the hash table based on the current list of multicast
3065ec21e2ecSJeff Kirsher  * addresses we subscribe to.  Also, change the promiscuity of
3066ec21e2ecSJeff Kirsher  * the device based on the flags (this function is called
30670977f817SJan Ceuleers  * whenever dev->flags is changed
30680977f817SJan Ceuleers  */
3069ec21e2ecSJeff Kirsher static void gfar_set_multi(struct net_device *dev)
3070ec21e2ecSJeff Kirsher {
3071ec21e2ecSJeff Kirsher 	struct netdev_hw_addr *ha;
3072ec21e2ecSJeff Kirsher 	struct gfar_private *priv = netdev_priv(dev);
3073ec21e2ecSJeff Kirsher 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3074ec21e2ecSJeff Kirsher 	u32 tempval;
3075ec21e2ecSJeff Kirsher 
3076ec21e2ecSJeff Kirsher 	if (dev->flags & IFF_PROMISC) {
3077ec21e2ecSJeff Kirsher 		/* Set RCTRL to PROM */
3078ec21e2ecSJeff Kirsher 		tempval = gfar_read(&regs->rctrl);
3079ec21e2ecSJeff Kirsher 		tempval |= RCTRL_PROM;
3080ec21e2ecSJeff Kirsher 		gfar_write(&regs->rctrl, tempval);
3081ec21e2ecSJeff Kirsher 	} else {
3082ec21e2ecSJeff Kirsher 		/* Set RCTRL to not PROM */
3083ec21e2ecSJeff Kirsher 		tempval = gfar_read(&regs->rctrl);
3084ec21e2ecSJeff Kirsher 		tempval &= ~(RCTRL_PROM);
3085ec21e2ecSJeff Kirsher 		gfar_write(&regs->rctrl, tempval);
3086ec21e2ecSJeff Kirsher 	}
3087ec21e2ecSJeff Kirsher 
3088ec21e2ecSJeff Kirsher 	if (dev->flags & IFF_ALLMULTI) {
3089ec21e2ecSJeff Kirsher 		/* Set the hash to rx all multicast frames */
3090ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr0, 0xffffffff);
3091ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr1, 0xffffffff);
3092ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr2, 0xffffffff);
3093ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr3, 0xffffffff);
3094ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr4, 0xffffffff);
3095ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr5, 0xffffffff);
3096ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr6, 0xffffffff);
3097ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr7, 0xffffffff);
3098ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr0, 0xffffffff);
3099ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr1, 0xffffffff);
3100ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr2, 0xffffffff);
3101ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr3, 0xffffffff);
3102ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr4, 0xffffffff);
3103ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr5, 0xffffffff);
3104ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr6, 0xffffffff);
3105ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr7, 0xffffffff);
3106ec21e2ecSJeff Kirsher 	} else {
3107ec21e2ecSJeff Kirsher 		int em_num;
3108ec21e2ecSJeff Kirsher 		int idx;
3109ec21e2ecSJeff Kirsher 
3110ec21e2ecSJeff Kirsher 		/* zero out the hash */
3111ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr0, 0x0);
3112ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr1, 0x0);
3113ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr2, 0x0);
3114ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr3, 0x0);
3115ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr4, 0x0);
3116ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr5, 0x0);
3117ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr6, 0x0);
3118ec21e2ecSJeff Kirsher 		gfar_write(&regs->igaddr7, 0x0);
3119ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr0, 0x0);
3120ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr1, 0x0);
3121ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr2, 0x0);
3122ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr3, 0x0);
3123ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr4, 0x0);
3124ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr5, 0x0);
3125ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr6, 0x0);
3126ec21e2ecSJeff Kirsher 		gfar_write(&regs->gaddr7, 0x0);
3127ec21e2ecSJeff Kirsher 
3128ec21e2ecSJeff Kirsher 		/* If we have extended hash tables, we need to
3129ec21e2ecSJeff Kirsher 		 * clear the exact match registers to prepare for
31300977f817SJan Ceuleers 		 * setting them
31310977f817SJan Ceuleers 		 */
3132ec21e2ecSJeff Kirsher 		if (priv->extended_hash) {
3133ec21e2ecSJeff Kirsher 			em_num = GFAR_EM_NUM + 1;
3134ec21e2ecSJeff Kirsher 			gfar_clear_exact_match(dev);
3135ec21e2ecSJeff Kirsher 			idx = 1;
3136ec21e2ecSJeff Kirsher 		} else {
3137ec21e2ecSJeff Kirsher 			idx = 0;
3138ec21e2ecSJeff Kirsher 			em_num = 0;
3139ec21e2ecSJeff Kirsher 		}
3140ec21e2ecSJeff Kirsher 
3141ec21e2ecSJeff Kirsher 		if (netdev_mc_empty(dev))
3142ec21e2ecSJeff Kirsher 			return;
3143ec21e2ecSJeff Kirsher 
3144ec21e2ecSJeff Kirsher 		/* Parse the list, and set the appropriate bits */
3145ec21e2ecSJeff Kirsher 		netdev_for_each_mc_addr(ha, dev) {
3146ec21e2ecSJeff Kirsher 			if (idx < em_num) {
3147ec21e2ecSJeff Kirsher 				gfar_set_mac_for_addr(dev, idx, ha->addr);
3148ec21e2ecSJeff Kirsher 				idx++;
3149ec21e2ecSJeff Kirsher 			} else
3150ec21e2ecSJeff Kirsher 				gfar_set_hash_for_addr(dev, ha->addr);
3151ec21e2ecSJeff Kirsher 		}
3152ec21e2ecSJeff Kirsher 	}
3153ec21e2ecSJeff Kirsher }
3154ec21e2ecSJeff Kirsher 
31557d993c5fSArseny Solokha void gfar_mac_reset(struct gfar_private *priv)
31566ce29b0eSClaudiu Manoil {
31576ce29b0eSClaudiu Manoil 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
31587d993c5fSArseny Solokha 	u32 tempval;
31596ce29b0eSClaudiu Manoil 
31607d993c5fSArseny Solokha 	/* Reset MAC layer */
31617d993c5fSArseny Solokha 	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
31626ce29b0eSClaudiu Manoil 
31637d993c5fSArseny Solokha 	/* We need to delay at least 3 TX clocks */
31647d993c5fSArseny Solokha 	udelay(3);
31656ce29b0eSClaudiu Manoil 
31667d993c5fSArseny Solokha 	/* the soft reset bit is not self-resetting, so we need to
31677d993c5fSArseny Solokha 	 * clear it before resuming normal operation
31686ce29b0eSClaudiu Manoil 	 */
31697d993c5fSArseny Solokha 	gfar_write(&regs->maccfg1, 0);
31706ce29b0eSClaudiu Manoil 
31717d993c5fSArseny Solokha 	udelay(3);
31726ce29b0eSClaudiu Manoil 
31737d993c5fSArseny Solokha 	gfar_rx_offload_en(priv);
31746ce29b0eSClaudiu Manoil 
31757d993c5fSArseny Solokha 	/* Initialize the max receive frame/buffer lengths */
31767d993c5fSArseny Solokha 	gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
31777d993c5fSArseny Solokha 	gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
3178b4b67f26SScott Wood 
31797d993c5fSArseny Solokha 	/* Initialize the Minimum Frame Length Register */
31807d993c5fSArseny Solokha 	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
318145b679c9SMatei Pavaluca 
31827d993c5fSArseny Solokha 	/* Initialize MACCFG2. */
31837d993c5fSArseny Solokha 	tempval = MACCFG2_INIT_SETTINGS;
318445b679c9SMatei Pavaluca 
31857d993c5fSArseny Solokha 	/* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
31867d993c5fSArseny Solokha 	 * are marked as truncated.  Avoid this by MACCFG2[Huge Frame]=1,
31877d993c5fSArseny Solokha 	 * and by checking RxBD[LG] and discarding larger than MAXFRM.
31887d993c5fSArseny Solokha 	 */
31897d993c5fSArseny Solokha 	if (gfar_has_errata(priv, GFAR_ERRATA_74))
31907d993c5fSArseny Solokha 		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
319145b679c9SMatei Pavaluca 
31926ce29b0eSClaudiu Manoil 	gfar_write(&regs->maccfg2, tempval);
31936ce29b0eSClaudiu Manoil 
31947d993c5fSArseny Solokha 	/* Clear mac addr hash registers */
31957d993c5fSArseny Solokha 	gfar_write(&regs->igaddr0, 0);
31967d993c5fSArseny Solokha 	gfar_write(&regs->igaddr1, 0);
31977d993c5fSArseny Solokha 	gfar_write(&regs->igaddr2, 0);
31987d993c5fSArseny Solokha 	gfar_write(&regs->igaddr3, 0);
31997d993c5fSArseny Solokha 	gfar_write(&regs->igaddr4, 0);
32007d993c5fSArseny Solokha 	gfar_write(&regs->igaddr5, 0);
32017d993c5fSArseny Solokha 	gfar_write(&regs->igaddr6, 0);
32027d993c5fSArseny Solokha 	gfar_write(&regs->igaddr7, 0);
32036ce29b0eSClaudiu Manoil 
32047d993c5fSArseny Solokha 	gfar_write(&regs->gaddr0, 0);
32057d993c5fSArseny Solokha 	gfar_write(&regs->gaddr1, 0);
32067d993c5fSArseny Solokha 	gfar_write(&regs->gaddr2, 0);
32077d993c5fSArseny Solokha 	gfar_write(&regs->gaddr3, 0);
32087d993c5fSArseny Solokha 	gfar_write(&regs->gaddr4, 0);
32097d993c5fSArseny Solokha 	gfar_write(&regs->gaddr5, 0);
32107d993c5fSArseny Solokha 	gfar_write(&regs->gaddr6, 0);
32117d993c5fSArseny Solokha 	gfar_write(&regs->gaddr7, 0);
32127d993c5fSArseny Solokha 
32137d993c5fSArseny Solokha 	if (priv->extended_hash)
32147d993c5fSArseny Solokha 		gfar_clear_exact_match(priv->ndev);
32157d993c5fSArseny Solokha 
32167d993c5fSArseny Solokha 	gfar_mac_rx_config(priv);
32177d993c5fSArseny Solokha 
32187d993c5fSArseny Solokha 	gfar_mac_tx_config(priv);
32197d993c5fSArseny Solokha 
32207d993c5fSArseny Solokha 	gfar_set_mac_address(priv->ndev);
32217d993c5fSArseny Solokha 
32227d993c5fSArseny Solokha 	gfar_set_multi(priv->ndev);
32237d993c5fSArseny Solokha 
32247d993c5fSArseny Solokha 	/* clear ievent and imask before configuring coalescing */
32257d993c5fSArseny Solokha 	gfar_ints_disable(priv);
32267d993c5fSArseny Solokha 
32277d993c5fSArseny Solokha 	/* Configure the coalescing support */
32287d993c5fSArseny Solokha 	gfar_configure_coalescing_all(priv);
32297d993c5fSArseny Solokha }
32307d993c5fSArseny Solokha 
32317d993c5fSArseny Solokha static void gfar_hw_init(struct gfar_private *priv)
32327d993c5fSArseny Solokha {
32337d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
32347d993c5fSArseny Solokha 	u32 attrs;
32357d993c5fSArseny Solokha 
32367d993c5fSArseny Solokha 	/* Stop the DMA engine now, in case it was running before
32377d993c5fSArseny Solokha 	 * (The firmware could have used it, and left it running).
32387d993c5fSArseny Solokha 	 */
32397d993c5fSArseny Solokha 	gfar_halt(priv);
32407d993c5fSArseny Solokha 
32417d993c5fSArseny Solokha 	gfar_mac_reset(priv);
32427d993c5fSArseny Solokha 
32437d993c5fSArseny Solokha 	/* Zero out the rmon mib registers if it has them */
32447d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
32457d993c5fSArseny Solokha 		memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
32467d993c5fSArseny Solokha 
32477d993c5fSArseny Solokha 		/* Mask off the CAM interrupts */
32487d993c5fSArseny Solokha 		gfar_write(&regs->rmon.cam1, 0xffffffff);
32497d993c5fSArseny Solokha 		gfar_write(&regs->rmon.cam2, 0xffffffff);
32507d993c5fSArseny Solokha 	}
32517d993c5fSArseny Solokha 
32527d993c5fSArseny Solokha 	/* Initialize ECNTRL */
32537d993c5fSArseny Solokha 	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
32547d993c5fSArseny Solokha 
32557d993c5fSArseny Solokha 	/* Set the extraction length and index */
32567d993c5fSArseny Solokha 	attrs = ATTRELI_EL(priv->rx_stash_size) |
32577d993c5fSArseny Solokha 		ATTRELI_EI(priv->rx_stash_index);
32587d993c5fSArseny Solokha 
32597d993c5fSArseny Solokha 	gfar_write(&regs->attreli, attrs);
32607d993c5fSArseny Solokha 
32617d993c5fSArseny Solokha 	/* Start with defaults, and add stashing
32627d993c5fSArseny Solokha 	 * depending on driver parameters
32637d993c5fSArseny Solokha 	 */
32647d993c5fSArseny Solokha 	attrs = ATTR_INIT_SETTINGS;
32657d993c5fSArseny Solokha 
32667d993c5fSArseny Solokha 	if (priv->bd_stash_en)
32677d993c5fSArseny Solokha 		attrs |= ATTR_BDSTASH;
32687d993c5fSArseny Solokha 
32697d993c5fSArseny Solokha 	if (priv->rx_stash_size != 0)
32707d993c5fSArseny Solokha 		attrs |= ATTR_BUFSTASH;
32717d993c5fSArseny Solokha 
32727d993c5fSArseny Solokha 	gfar_write(&regs->attr, attrs);
32737d993c5fSArseny Solokha 
32747d993c5fSArseny Solokha 	/* FIFO configs */
32757d993c5fSArseny Solokha 	gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
32767d993c5fSArseny Solokha 	gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
32777d993c5fSArseny Solokha 	gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
32787d993c5fSArseny Solokha 
32797d993c5fSArseny Solokha 	/* Program the interrupt steering regs, only for MG devices */
32807d993c5fSArseny Solokha 	if (priv->num_grps > 1)
32817d993c5fSArseny Solokha 		gfar_write_isrg(priv);
32827d993c5fSArseny Solokha }
32837d993c5fSArseny Solokha 
32847d993c5fSArseny Solokha static const struct net_device_ops gfar_netdev_ops = {
32857d993c5fSArseny Solokha 	.ndo_open = gfar_enet_open,
32867d993c5fSArseny Solokha 	.ndo_start_xmit = gfar_start_xmit,
32877d993c5fSArseny Solokha 	.ndo_stop = gfar_close,
32887d993c5fSArseny Solokha 	.ndo_change_mtu = gfar_change_mtu,
32897d993c5fSArseny Solokha 	.ndo_set_features = gfar_set_features,
32907d993c5fSArseny Solokha 	.ndo_set_rx_mode = gfar_set_multi,
32917d993c5fSArseny Solokha 	.ndo_tx_timeout = gfar_timeout,
32927d993c5fSArseny Solokha 	.ndo_do_ioctl = gfar_ioctl,
32937d993c5fSArseny Solokha 	.ndo_get_stats = gfar_get_stats,
32947d993c5fSArseny Solokha 	.ndo_change_carrier = fixed_phy_change_carrier,
32957d993c5fSArseny Solokha 	.ndo_set_mac_address = gfar_set_mac_addr,
32967d993c5fSArseny Solokha 	.ndo_validate_addr = eth_validate_addr,
32977d993c5fSArseny Solokha #ifdef CONFIG_NET_POLL_CONTROLLER
32987d993c5fSArseny Solokha 	.ndo_poll_controller = gfar_netpoll,
32997d993c5fSArseny Solokha #endif
33007d993c5fSArseny Solokha };
33017d993c5fSArseny Solokha 
33027d993c5fSArseny Solokha /* Set up the ethernet device structure, private data,
33037d993c5fSArseny Solokha  * and anything else we need before we start
33047d993c5fSArseny Solokha  */
33057d993c5fSArseny Solokha static int gfar_probe(struct platform_device *ofdev)
33067d993c5fSArseny Solokha {
33077d993c5fSArseny Solokha 	struct device_node *np = ofdev->dev.of_node;
33087d993c5fSArseny Solokha 	struct net_device *dev = NULL;
33097d993c5fSArseny Solokha 	struct gfar_private *priv = NULL;
33107d993c5fSArseny Solokha 	int err = 0, i;
33117d993c5fSArseny Solokha 
33127d993c5fSArseny Solokha 	err = gfar_of_init(ofdev, &dev);
33137d993c5fSArseny Solokha 
33147d993c5fSArseny Solokha 	if (err)
33157d993c5fSArseny Solokha 		return err;
33167d993c5fSArseny Solokha 
33177d993c5fSArseny Solokha 	priv = netdev_priv(dev);
33187d993c5fSArseny Solokha 	priv->ndev = dev;
33197d993c5fSArseny Solokha 	priv->ofdev = ofdev;
33207d993c5fSArseny Solokha 	priv->dev = &ofdev->dev;
33217d993c5fSArseny Solokha 	SET_NETDEV_DEV(dev, &ofdev->dev);
33227d993c5fSArseny Solokha 
33237d993c5fSArseny Solokha 	INIT_WORK(&priv->reset_task, gfar_reset_task);
33247d993c5fSArseny Solokha 
33257d993c5fSArseny Solokha 	platform_set_drvdata(ofdev, priv);
33267d993c5fSArseny Solokha 
33277d993c5fSArseny Solokha 	gfar_detect_errata(priv);
33287d993c5fSArseny Solokha 
33297d993c5fSArseny Solokha 	/* Set the dev->base_addr to the gfar reg region */
33307d993c5fSArseny Solokha 	dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
33317d993c5fSArseny Solokha 
33327d993c5fSArseny Solokha 	/* Fill in the dev structure */
33337d993c5fSArseny Solokha 	dev->watchdog_timeo = TX_TIMEOUT;
33347d993c5fSArseny Solokha 	/* MTU range: 50 - 9586 */
33357d993c5fSArseny Solokha 	dev->mtu = 1500;
33367d993c5fSArseny Solokha 	dev->min_mtu = 50;
33377d993c5fSArseny Solokha 	dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
33387d993c5fSArseny Solokha 	dev->netdev_ops = &gfar_netdev_ops;
33397d993c5fSArseny Solokha 	dev->ethtool_ops = &gfar_ethtool_ops;
33407d993c5fSArseny Solokha 
33417d993c5fSArseny Solokha 	/* Register for napi ...We are registering NAPI for each grp */
33427d993c5fSArseny Solokha 	for (i = 0; i < priv->num_grps; i++) {
33437d993c5fSArseny Solokha 		if (priv->poll_mode == GFAR_SQ_POLLING) {
33447d993c5fSArseny Solokha 			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
33457d993c5fSArseny Solokha 				       gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
33467d993c5fSArseny Solokha 			netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
33477d993c5fSArseny Solokha 				       gfar_poll_tx_sq, 2);
33487d993c5fSArseny Solokha 		} else {
33497d993c5fSArseny Solokha 			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
33507d993c5fSArseny Solokha 				       gfar_poll_rx, GFAR_DEV_WEIGHT);
33517d993c5fSArseny Solokha 			netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
33527d993c5fSArseny Solokha 				       gfar_poll_tx, 2);
33537d993c5fSArseny Solokha 		}
33547d993c5fSArseny Solokha 	}
33557d993c5fSArseny Solokha 
33567d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
33577d993c5fSArseny Solokha 		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
33587d993c5fSArseny Solokha 				   NETIF_F_RXCSUM;
33597d993c5fSArseny Solokha 		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
33607d993c5fSArseny Solokha 				 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
33617d993c5fSArseny Solokha 	}
33627d993c5fSArseny Solokha 
33637d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
33647d993c5fSArseny Solokha 		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
33657d993c5fSArseny Solokha 				    NETIF_F_HW_VLAN_CTAG_RX;
33667d993c5fSArseny Solokha 		dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
33677d993c5fSArseny Solokha 	}
33687d993c5fSArseny Solokha 
33697d993c5fSArseny Solokha 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
33707d993c5fSArseny Solokha 
33717d993c5fSArseny Solokha 	gfar_init_addr_hash_table(priv);
33727d993c5fSArseny Solokha 
33737d993c5fSArseny Solokha 	/* Insert receive time stamps into padding alignment bytes, and
33747d993c5fSArseny Solokha 	 * plus 2 bytes padding to ensure the cpu alignment.
33757d993c5fSArseny Solokha 	 */
33767d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
33777d993c5fSArseny Solokha 		priv->padding = 8 + DEFAULT_PADDING;
33787d993c5fSArseny Solokha 
33797d993c5fSArseny Solokha 	if (dev->features & NETIF_F_IP_CSUM ||
33807d993c5fSArseny Solokha 	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
33817d993c5fSArseny Solokha 		dev->needed_headroom = GMAC_FCB_LEN;
33827d993c5fSArseny Solokha 
33837d993c5fSArseny Solokha 	/* Initializing some of the rx/tx queue level parameters */
33847d993c5fSArseny Solokha 	for (i = 0; i < priv->num_tx_queues; i++) {
33857d993c5fSArseny Solokha 		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
33867d993c5fSArseny Solokha 		priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
33877d993c5fSArseny Solokha 		priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
33887d993c5fSArseny Solokha 		priv->tx_queue[i]->txic = DEFAULT_TXIC;
33897d993c5fSArseny Solokha 	}
33907d993c5fSArseny Solokha 
33917d993c5fSArseny Solokha 	for (i = 0; i < priv->num_rx_queues; i++) {
33927d993c5fSArseny Solokha 		priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
33937d993c5fSArseny Solokha 		priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
33947d993c5fSArseny Solokha 		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
33957d993c5fSArseny Solokha 	}
33967d993c5fSArseny Solokha 
33977d993c5fSArseny Solokha 	/* Always enable rx filer if available */
33987d993c5fSArseny Solokha 	priv->rx_filer_enable =
33997d993c5fSArseny Solokha 	    (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
34007d993c5fSArseny Solokha 	/* Enable most messages by default */
34017d993c5fSArseny Solokha 	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
34027d993c5fSArseny Solokha 	/* use pritority h/w tx queue scheduling for single queue devices */
34037d993c5fSArseny Solokha 	if (priv->num_tx_queues == 1)
34047d993c5fSArseny Solokha 		priv->prio_sched_en = 1;
34057d993c5fSArseny Solokha 
34067d993c5fSArseny Solokha 	set_bit(GFAR_DOWN, &priv->state);
34077d993c5fSArseny Solokha 
34087d993c5fSArseny Solokha 	gfar_hw_init(priv);
34097d993c5fSArseny Solokha 
34107d993c5fSArseny Solokha 	/* Carrier starts down, phylib will bring it up */
34117d993c5fSArseny Solokha 	netif_carrier_off(dev);
34127d993c5fSArseny Solokha 
34137d993c5fSArseny Solokha 	err = register_netdev(dev);
34147d993c5fSArseny Solokha 
34157d993c5fSArseny Solokha 	if (err) {
34167d993c5fSArseny Solokha 		pr_err("%s: Cannot register net device, aborting\n", dev->name);
34177d993c5fSArseny Solokha 		goto register_fail;
34187d993c5fSArseny Solokha 	}
34197d993c5fSArseny Solokha 
34207d993c5fSArseny Solokha 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
34217d993c5fSArseny Solokha 		priv->wol_supported |= GFAR_WOL_MAGIC;
34227d993c5fSArseny Solokha 
34237d993c5fSArseny Solokha 	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
34247d993c5fSArseny Solokha 	    priv->rx_filer_enable)
34257d993c5fSArseny Solokha 		priv->wol_supported |= GFAR_WOL_FILER_UCAST;
34267d993c5fSArseny Solokha 
34277d993c5fSArseny Solokha 	device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
34287d993c5fSArseny Solokha 
34297d993c5fSArseny Solokha 	/* fill out IRQ number and name fields */
34307d993c5fSArseny Solokha 	for (i = 0; i < priv->num_grps; i++) {
34317d993c5fSArseny Solokha 		struct gfar_priv_grp *grp = &priv->gfargrp[i];
34327d993c5fSArseny Solokha 		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
34337d993c5fSArseny Solokha 			sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
34347d993c5fSArseny Solokha 				dev->name, "_g", '0' + i, "_tx");
34357d993c5fSArseny Solokha 			sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
34367d993c5fSArseny Solokha 				dev->name, "_g", '0' + i, "_rx");
34377d993c5fSArseny Solokha 			sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
34387d993c5fSArseny Solokha 				dev->name, "_g", '0' + i, "_er");
34397d993c5fSArseny Solokha 		} else
34407d993c5fSArseny Solokha 			strcpy(gfar_irq(grp, TX)->name, dev->name);
34417d993c5fSArseny Solokha 	}
34427d993c5fSArseny Solokha 
34437d993c5fSArseny Solokha 	/* Initialize the filer table */
34447d993c5fSArseny Solokha 	gfar_init_filer_table(priv);
34457d993c5fSArseny Solokha 
34467d993c5fSArseny Solokha 	/* Print out the device info */
34477d993c5fSArseny Solokha 	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
34487d993c5fSArseny Solokha 
34497d993c5fSArseny Solokha 	/* Even more device info helps when determining which kernel
34507d993c5fSArseny Solokha 	 * provided which set of benchmarks.
34517d993c5fSArseny Solokha 	 */
34527d993c5fSArseny Solokha 	netdev_info(dev, "Running with NAPI enabled\n");
34537d993c5fSArseny Solokha 	for (i = 0; i < priv->num_rx_queues; i++)
34547d993c5fSArseny Solokha 		netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
34557d993c5fSArseny Solokha 			    i, priv->rx_queue[i]->rx_ring_size);
34567d993c5fSArseny Solokha 	for (i = 0; i < priv->num_tx_queues; i++)
34577d993c5fSArseny Solokha 		netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
34587d993c5fSArseny Solokha 			    i, priv->tx_queue[i]->tx_ring_size);
34597d993c5fSArseny Solokha 
34607d993c5fSArseny Solokha 	return 0;
34617d993c5fSArseny Solokha 
34627d993c5fSArseny Solokha register_fail:
34637d993c5fSArseny Solokha 	if (of_phy_is_fixed_link(np))
34647d993c5fSArseny Solokha 		of_phy_deregister_fixed_link(np);
34657d993c5fSArseny Solokha 	unmap_group_regs(priv);
34667d993c5fSArseny Solokha 	gfar_free_rx_queues(priv);
34677d993c5fSArseny Solokha 	gfar_free_tx_queues(priv);
34687d993c5fSArseny Solokha 	of_node_put(priv->phy_node);
34697d993c5fSArseny Solokha 	of_node_put(priv->tbi_node);
34707d993c5fSArseny Solokha 	free_gfar_dev(priv);
34717d993c5fSArseny Solokha 	return err;
34727d993c5fSArseny Solokha }
34737d993c5fSArseny Solokha 
34747d993c5fSArseny Solokha static int gfar_remove(struct platform_device *ofdev)
34757d993c5fSArseny Solokha {
34767d993c5fSArseny Solokha 	struct gfar_private *priv = platform_get_drvdata(ofdev);
34777d993c5fSArseny Solokha 	struct device_node *np = ofdev->dev.of_node;
34787d993c5fSArseny Solokha 
34797d993c5fSArseny Solokha 	of_node_put(priv->phy_node);
34807d993c5fSArseny Solokha 	of_node_put(priv->tbi_node);
34817d993c5fSArseny Solokha 
34827d993c5fSArseny Solokha 	unregister_netdev(priv->ndev);
34837d993c5fSArseny Solokha 
34847d993c5fSArseny Solokha 	if (of_phy_is_fixed_link(np))
34857d993c5fSArseny Solokha 		of_phy_deregister_fixed_link(np);
34867d993c5fSArseny Solokha 
34877d993c5fSArseny Solokha 	unmap_group_regs(priv);
34887d993c5fSArseny Solokha 	gfar_free_rx_queues(priv);
34897d993c5fSArseny Solokha 	gfar_free_tx_queues(priv);
34907d993c5fSArseny Solokha 	free_gfar_dev(priv);
34917d993c5fSArseny Solokha 
34927d993c5fSArseny Solokha 	return 0;
34937d993c5fSArseny Solokha }
34947d993c5fSArseny Solokha 
34957d993c5fSArseny Solokha #ifdef CONFIG_PM
34967d993c5fSArseny Solokha 
34977d993c5fSArseny Solokha static void __gfar_filer_disable(struct gfar_private *priv)
34987d993c5fSArseny Solokha {
34997d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
35007d993c5fSArseny Solokha 	u32 temp;
35017d993c5fSArseny Solokha 
35027d993c5fSArseny Solokha 	temp = gfar_read(&regs->rctrl);
35037d993c5fSArseny Solokha 	temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
35047d993c5fSArseny Solokha 	gfar_write(&regs->rctrl, temp);
35057d993c5fSArseny Solokha }
35067d993c5fSArseny Solokha 
35077d993c5fSArseny Solokha static void __gfar_filer_enable(struct gfar_private *priv)
35087d993c5fSArseny Solokha {
35097d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
35107d993c5fSArseny Solokha 	u32 temp;
35117d993c5fSArseny Solokha 
35127d993c5fSArseny Solokha 	temp = gfar_read(&regs->rctrl);
35137d993c5fSArseny Solokha 	temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
35147d993c5fSArseny Solokha 	gfar_write(&regs->rctrl, temp);
35157d993c5fSArseny Solokha }
35167d993c5fSArseny Solokha 
35177d993c5fSArseny Solokha /* Filer rules implementing wol capabilities */
35187d993c5fSArseny Solokha static void gfar_filer_config_wol(struct gfar_private *priv)
35197d993c5fSArseny Solokha {
35207d993c5fSArseny Solokha 	unsigned int i;
35217d993c5fSArseny Solokha 	u32 rqfcr;
35227d993c5fSArseny Solokha 
35237d993c5fSArseny Solokha 	__gfar_filer_disable(priv);
35247d993c5fSArseny Solokha 
35257d993c5fSArseny Solokha 	/* clear the filer table, reject any packet by default */
35267d993c5fSArseny Solokha 	rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
35277d993c5fSArseny Solokha 	for (i = 0; i <= MAX_FILER_IDX; i++)
35287d993c5fSArseny Solokha 		gfar_write_filer(priv, i, rqfcr, 0);
35297d993c5fSArseny Solokha 
35307d993c5fSArseny Solokha 	i = 0;
35317d993c5fSArseny Solokha 	if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
35327d993c5fSArseny Solokha 		/* unicast packet, accept it */
35337d993c5fSArseny Solokha 		struct net_device *ndev = priv->ndev;
35347d993c5fSArseny Solokha 		/* get the default rx queue index */
35357d993c5fSArseny Solokha 		u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
35367d993c5fSArseny Solokha 		u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
35377d993c5fSArseny Solokha 				    (ndev->dev_addr[1] << 8) |
35387d993c5fSArseny Solokha 				     ndev->dev_addr[2];
35397d993c5fSArseny Solokha 
35407d993c5fSArseny Solokha 		rqfcr = (qindex << 10) | RQFCR_AND |
35417d993c5fSArseny Solokha 			RQFCR_CMP_EXACT | RQFCR_PID_DAH;
35427d993c5fSArseny Solokha 
35437d993c5fSArseny Solokha 		gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
35447d993c5fSArseny Solokha 
35457d993c5fSArseny Solokha 		dest_mac_addr = (ndev->dev_addr[3] << 16) |
35467d993c5fSArseny Solokha 				(ndev->dev_addr[4] << 8) |
35477d993c5fSArseny Solokha 				 ndev->dev_addr[5];
35487d993c5fSArseny Solokha 		rqfcr = (qindex << 10) | RQFCR_GPI |
35497d993c5fSArseny Solokha 			RQFCR_CMP_EXACT | RQFCR_PID_DAL;
35507d993c5fSArseny Solokha 		gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
35517d993c5fSArseny Solokha 	}
35527d993c5fSArseny Solokha 
35537d993c5fSArseny Solokha 	__gfar_filer_enable(priv);
35547d993c5fSArseny Solokha }
35557d993c5fSArseny Solokha 
35567d993c5fSArseny Solokha static void gfar_filer_restore_table(struct gfar_private *priv)
35577d993c5fSArseny Solokha {
35587d993c5fSArseny Solokha 	u32 rqfcr, rqfpr;
35597d993c5fSArseny Solokha 	unsigned int i;
35607d993c5fSArseny Solokha 
35617d993c5fSArseny Solokha 	__gfar_filer_disable(priv);
35627d993c5fSArseny Solokha 
35637d993c5fSArseny Solokha 	for (i = 0; i <= MAX_FILER_IDX; i++) {
35647d993c5fSArseny Solokha 		rqfcr = priv->ftp_rqfcr[i];
35657d993c5fSArseny Solokha 		rqfpr = priv->ftp_rqfpr[i];
35667d993c5fSArseny Solokha 		gfar_write_filer(priv, i, rqfcr, rqfpr);
35677d993c5fSArseny Solokha 	}
35687d993c5fSArseny Solokha 
35697d993c5fSArseny Solokha 	__gfar_filer_enable(priv);
35707d993c5fSArseny Solokha }
35717d993c5fSArseny Solokha 
35727d993c5fSArseny Solokha /* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
35737d993c5fSArseny Solokha static void gfar_start_wol_filer(struct gfar_private *priv)
35747d993c5fSArseny Solokha {
35757d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
35767d993c5fSArseny Solokha 	u32 tempval;
35777d993c5fSArseny Solokha 	int i = 0;
35787d993c5fSArseny Solokha 
35797d993c5fSArseny Solokha 	/* Enable Rx hw queues */
35807d993c5fSArseny Solokha 	gfar_write(&regs->rqueue, priv->rqueue);
35817d993c5fSArseny Solokha 
35827d993c5fSArseny Solokha 	/* Initialize DMACTRL to have WWR and WOP */
35837d993c5fSArseny Solokha 	tempval = gfar_read(&regs->dmactrl);
35847d993c5fSArseny Solokha 	tempval |= DMACTRL_INIT_SETTINGS;
35857d993c5fSArseny Solokha 	gfar_write(&regs->dmactrl, tempval);
35867d993c5fSArseny Solokha 
35877d993c5fSArseny Solokha 	/* Make sure we aren't stopped */
35887d993c5fSArseny Solokha 	tempval = gfar_read(&regs->dmactrl);
35897d993c5fSArseny Solokha 	tempval &= ~DMACTRL_GRS;
35907d993c5fSArseny Solokha 	gfar_write(&regs->dmactrl, tempval);
35917d993c5fSArseny Solokha 
35927d993c5fSArseny Solokha 	for (i = 0; i < priv->num_grps; i++) {
35937d993c5fSArseny Solokha 		regs = priv->gfargrp[i].regs;
35947d993c5fSArseny Solokha 		/* Clear RHLT, so that the DMA starts polling now */
35957d993c5fSArseny Solokha 		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
35967d993c5fSArseny Solokha 		/* enable the Filer General Purpose Interrupt */
35977d993c5fSArseny Solokha 		gfar_write(&regs->imask, IMASK_FGPI);
35987d993c5fSArseny Solokha 	}
35997d993c5fSArseny Solokha 
36007d993c5fSArseny Solokha 	/* Enable Rx DMA */
36017d993c5fSArseny Solokha 	tempval = gfar_read(&regs->maccfg1);
36027d993c5fSArseny Solokha 	tempval |= MACCFG1_RX_EN;
36037d993c5fSArseny Solokha 	gfar_write(&regs->maccfg1, tempval);
36047d993c5fSArseny Solokha }
36057d993c5fSArseny Solokha 
36067d993c5fSArseny Solokha static int gfar_suspend(struct device *dev)
36077d993c5fSArseny Solokha {
36087d993c5fSArseny Solokha 	struct gfar_private *priv = dev_get_drvdata(dev);
36097d993c5fSArseny Solokha 	struct net_device *ndev = priv->ndev;
36107d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
36117d993c5fSArseny Solokha 	u32 tempval;
36127d993c5fSArseny Solokha 	u16 wol = priv->wol_opts;
36137d993c5fSArseny Solokha 
36147d993c5fSArseny Solokha 	if (!netif_running(ndev))
36157d993c5fSArseny Solokha 		return 0;
36167d993c5fSArseny Solokha 
36177d993c5fSArseny Solokha 	disable_napi(priv);
36187d993c5fSArseny Solokha 	netif_tx_lock(ndev);
36197d993c5fSArseny Solokha 	netif_device_detach(ndev);
36207d993c5fSArseny Solokha 	netif_tx_unlock(ndev);
36217d993c5fSArseny Solokha 
36227d993c5fSArseny Solokha 	gfar_halt(priv);
36237d993c5fSArseny Solokha 
36247d993c5fSArseny Solokha 	if (wol & GFAR_WOL_MAGIC) {
36257d993c5fSArseny Solokha 		/* Enable interrupt on Magic Packet */
36267d993c5fSArseny Solokha 		gfar_write(&regs->imask, IMASK_MAG);
36277d993c5fSArseny Solokha 
36287d993c5fSArseny Solokha 		/* Enable Magic Packet mode */
36297d993c5fSArseny Solokha 		tempval = gfar_read(&regs->maccfg2);
36307d993c5fSArseny Solokha 		tempval |= MACCFG2_MPEN;
36317d993c5fSArseny Solokha 		gfar_write(&regs->maccfg2, tempval);
36327d993c5fSArseny Solokha 
36337d993c5fSArseny Solokha 		/* re-enable the Rx block */
36347d993c5fSArseny Solokha 		tempval = gfar_read(&regs->maccfg1);
36357d993c5fSArseny Solokha 		tempval |= MACCFG1_RX_EN;
36367d993c5fSArseny Solokha 		gfar_write(&regs->maccfg1, tempval);
36377d993c5fSArseny Solokha 
36387d993c5fSArseny Solokha 	} else if (wol & GFAR_WOL_FILER_UCAST) {
36397d993c5fSArseny Solokha 		gfar_filer_config_wol(priv);
36407d993c5fSArseny Solokha 		gfar_start_wol_filer(priv);
36417d993c5fSArseny Solokha 
36427d993c5fSArseny Solokha 	} else {
36437d993c5fSArseny Solokha 		phy_stop(ndev->phydev);
36447d993c5fSArseny Solokha 	}
36457d993c5fSArseny Solokha 
36467d993c5fSArseny Solokha 	return 0;
36477d993c5fSArseny Solokha }
36487d993c5fSArseny Solokha 
36497d993c5fSArseny Solokha static int gfar_resume(struct device *dev)
36507d993c5fSArseny Solokha {
36517d993c5fSArseny Solokha 	struct gfar_private *priv = dev_get_drvdata(dev);
36527d993c5fSArseny Solokha 	struct net_device *ndev = priv->ndev;
36537d993c5fSArseny Solokha 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
36547d993c5fSArseny Solokha 	u32 tempval;
36557d993c5fSArseny Solokha 	u16 wol = priv->wol_opts;
36567d993c5fSArseny Solokha 
36577d993c5fSArseny Solokha 	if (!netif_running(ndev))
36587d993c5fSArseny Solokha 		return 0;
36597d993c5fSArseny Solokha 
36607d993c5fSArseny Solokha 	if (wol & GFAR_WOL_MAGIC) {
36617d993c5fSArseny Solokha 		/* Disable Magic Packet mode */
36627d993c5fSArseny Solokha 		tempval = gfar_read(&regs->maccfg2);
36637d993c5fSArseny Solokha 		tempval &= ~MACCFG2_MPEN;
36647d993c5fSArseny Solokha 		gfar_write(&regs->maccfg2, tempval);
36657d993c5fSArseny Solokha 
36667d993c5fSArseny Solokha 	} else if (wol & GFAR_WOL_FILER_UCAST) {
36677d993c5fSArseny Solokha 		/* need to stop rx only, tx is already down */
36687d993c5fSArseny Solokha 		gfar_halt(priv);
36697d993c5fSArseny Solokha 		gfar_filer_restore_table(priv);
36707d993c5fSArseny Solokha 
36717d993c5fSArseny Solokha 	} else {
36727d993c5fSArseny Solokha 		phy_start(ndev->phydev);
36737d993c5fSArseny Solokha 	}
36747d993c5fSArseny Solokha 
36757d993c5fSArseny Solokha 	gfar_start(priv);
36767d993c5fSArseny Solokha 
36777d993c5fSArseny Solokha 	netif_device_attach(ndev);
36787d993c5fSArseny Solokha 	enable_napi(priv);
36797d993c5fSArseny Solokha 
36807d993c5fSArseny Solokha 	return 0;
36817d993c5fSArseny Solokha }
36827d993c5fSArseny Solokha 
36837d993c5fSArseny Solokha static int gfar_restore(struct device *dev)
36847d993c5fSArseny Solokha {
36857d993c5fSArseny Solokha 	struct gfar_private *priv = dev_get_drvdata(dev);
36867d993c5fSArseny Solokha 	struct net_device *ndev = priv->ndev;
36877d993c5fSArseny Solokha 
36887d993c5fSArseny Solokha 	if (!netif_running(ndev)) {
36897d993c5fSArseny Solokha 		netif_device_attach(ndev);
36907d993c5fSArseny Solokha 
36917d993c5fSArseny Solokha 		return 0;
36927d993c5fSArseny Solokha 	}
36937d993c5fSArseny Solokha 
36947d993c5fSArseny Solokha 	gfar_init_bds(ndev);
36957d993c5fSArseny Solokha 
36967d993c5fSArseny Solokha 	gfar_mac_reset(priv);
36977d993c5fSArseny Solokha 
36987d993c5fSArseny Solokha 	gfar_init_tx_rx_base(priv);
36997d993c5fSArseny Solokha 
37007d993c5fSArseny Solokha 	gfar_start(priv);
37017d993c5fSArseny Solokha 
37026ce29b0eSClaudiu Manoil 	priv->oldlink = 0;
37036ce29b0eSClaudiu Manoil 	priv->oldspeed = 0;
37046ce29b0eSClaudiu Manoil 	priv->oldduplex = -1;
37057d993c5fSArseny Solokha 
37067d993c5fSArseny Solokha 	if (ndev->phydev)
37077d993c5fSArseny Solokha 		phy_start(ndev->phydev);
37087d993c5fSArseny Solokha 
37097d993c5fSArseny Solokha 	netif_device_attach(ndev);
37107d993c5fSArseny Solokha 	enable_napi(priv);
37117d993c5fSArseny Solokha 
37127d993c5fSArseny Solokha 	return 0;
37136ce29b0eSClaudiu Manoil }
37146ce29b0eSClaudiu Manoil 
37157d993c5fSArseny Solokha static const struct dev_pm_ops gfar_pm_ops = {
37167d993c5fSArseny Solokha 	.suspend = gfar_suspend,
37177d993c5fSArseny Solokha 	.resume = gfar_resume,
37187d993c5fSArseny Solokha 	.freeze = gfar_suspend,
37197d993c5fSArseny Solokha 	.thaw = gfar_resume,
37207d993c5fSArseny Solokha 	.restore = gfar_restore,
37217d993c5fSArseny Solokha };
37227d993c5fSArseny Solokha 
37237d993c5fSArseny Solokha #define GFAR_PM_OPS (&gfar_pm_ops)
37247d993c5fSArseny Solokha 
37257d993c5fSArseny Solokha #else
37267d993c5fSArseny Solokha 
37277d993c5fSArseny Solokha #define GFAR_PM_OPS NULL
37287d993c5fSArseny Solokha 
37297d993c5fSArseny Solokha #endif
37306ce29b0eSClaudiu Manoil 
373194e5a2a8SFabian Frederick static const struct of_device_id gfar_match[] =
3732ec21e2ecSJeff Kirsher {
3733ec21e2ecSJeff Kirsher 	{
3734ec21e2ecSJeff Kirsher 		.type = "network",
3735ec21e2ecSJeff Kirsher 		.compatible = "gianfar",
3736ec21e2ecSJeff Kirsher 	},
3737ec21e2ecSJeff Kirsher 	{
3738ec21e2ecSJeff Kirsher 		.compatible = "fsl,etsec2",
3739ec21e2ecSJeff Kirsher 	},
3740ec21e2ecSJeff Kirsher 	{},
3741ec21e2ecSJeff Kirsher };
3742ec21e2ecSJeff Kirsher MODULE_DEVICE_TABLE(of, gfar_match);
3743ec21e2ecSJeff Kirsher 
3744ec21e2ecSJeff Kirsher /* Structure for a device driver */
3745ec21e2ecSJeff Kirsher static struct platform_driver gfar_driver = {
3746ec21e2ecSJeff Kirsher 	.driver = {
3747ec21e2ecSJeff Kirsher 		.name = "fsl-gianfar",
3748ec21e2ecSJeff Kirsher 		.pm = GFAR_PM_OPS,
3749ec21e2ecSJeff Kirsher 		.of_match_table = gfar_match,
3750ec21e2ecSJeff Kirsher 	},
3751ec21e2ecSJeff Kirsher 	.probe = gfar_probe,
3752ec21e2ecSJeff Kirsher 	.remove = gfar_remove,
3753ec21e2ecSJeff Kirsher };
3754ec21e2ecSJeff Kirsher 
3755db62f684SAxel Lin module_platform_driver(gfar_driver);
3756