1 /* drivers/net/ethernet/freescale/gianfar.c
2  *
3  * Gianfar Ethernet Driver
4  * This driver is designed for the non-CPM ethernet controllers
5  * on the 85xx and 83xx family of integrated processors
6  * Based on 8260_io/fcc_enet.c
7  *
8  * Author: Andy Fleming
9  * Maintainer: Kumar Gala
10  * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11  *
12  * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
13  * Copyright 2007 MontaVista Software, Inc.
14  *
15  * This program is free software; you can redistribute  it and/or modify it
16  * under  the terms of  the GNU General  Public License as published by the
17  * Free Software Foundation;  either version 2 of the  License, or (at your
18  * option) any later version.
19  *
20  *  Gianfar:  AKA Lambda Draconis, "Dragon"
21  *  RA 11 31 24.2
22  *  Dec +69 19 52
23  *  V 3.84
24  *  B-V +1.62
25  *
26  *  Theory of operation
27  *
28  *  The driver is initialized through of_device. Configuration information
29  *  is therefore conveyed through an OF-style device tree.
30  *
31  *  The Gianfar Ethernet Controller uses a ring of buffer
32  *  descriptors.  The beginning is indicated by a register
33  *  pointing to the physical address of the start of the ring.
34  *  The end is determined by a "wrap" bit being set in the
35  *  last descriptor of the ring.
36  *
37  *  When a packet is received, the RXF bit in the
38  *  IEVENT register is set, triggering an interrupt when the
39  *  corresponding bit in the IMASK register is also set (if
40  *  interrupt coalescing is active, then the interrupt may not
41  *  happen immediately, but will wait until either a set number
42  *  of frames or amount of time have passed).  In NAPI, the
43  *  interrupt handler will signal there is work to be done, and
44  *  exit. This method will start at the last known empty
45  *  descriptor, and process every subsequent descriptor until there
46  *  are none left with data (NAPI will stop after a set number of
47  *  packets to give time to other tasks, but will eventually
48  *  process all the packets).  The data arrives inside a
49  *  pre-allocated skb, and so after the skb is passed up to the
50  *  stack, a new skb must be allocated, and the address field in
51  *  the buffer descriptor must be updated to indicate this new
52  *  skb.
53  *
54  *  When the kernel requests that a packet be transmitted, the
55  *  driver starts where it left off last time, and points the
56  *  descriptor at the buffer which was passed in.  The driver
57  *  then informs the DMA engine that there are packets ready to
58  *  be transmitted.  Once the controller is finished transmitting
59  *  the packet, an interrupt may be triggered (under the same
60  *  conditions as for reception, but depending on the TXF bit).
61  *  The driver then cleans up the buffer.
62  */
63 
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65 #define DEBUG
66 
67 #include <linux/kernel.h>
68 #include <linux/string.h>
69 #include <linux/errno.h>
70 #include <linux/unistd.h>
71 #include <linux/slab.h>
72 #include <linux/interrupt.h>
73 #include <linux/delay.h>
74 #include <linux/netdevice.h>
75 #include <linux/etherdevice.h>
76 #include <linux/skbuff.h>
77 #include <linux/if_vlan.h>
78 #include <linux/spinlock.h>
79 #include <linux/mm.h>
80 #include <linux/of_address.h>
81 #include <linux/of_irq.h>
82 #include <linux/of_mdio.h>
83 #include <linux/of_platform.h>
84 #include <linux/ip.h>
85 #include <linux/tcp.h>
86 #include <linux/udp.h>
87 #include <linux/in.h>
88 #include <linux/net_tstamp.h>
89 
90 #include <asm/io.h>
91 #include <asm/reg.h>
92 #include <asm/mpc85xx.h>
93 #include <asm/irq.h>
94 #include <asm/uaccess.h>
95 #include <linux/module.h>
96 #include <linux/dma-mapping.h>
97 #include <linux/crc32.h>
98 #include <linux/mii.h>
99 #include <linux/phy.h>
100 #include <linux/phy_fixed.h>
101 #include <linux/of.h>
102 #include <linux/of_net.h>
103 
104 #include "gianfar.h"
105 
106 #define TX_TIMEOUT      (1*HZ)
107 
108 const char gfar_driver_version[] = "1.3";
109 
110 static int gfar_enet_open(struct net_device *dev);
111 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
112 static void gfar_reset_task(struct work_struct *work);
113 static void gfar_timeout(struct net_device *dev);
114 static int gfar_close(struct net_device *dev);
115 struct sk_buff *gfar_new_skb(struct net_device *dev);
116 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
117 			   struct sk_buff *skb);
118 static int gfar_set_mac_address(struct net_device *dev);
119 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
120 static irqreturn_t gfar_error(int irq, void *dev_id);
121 static irqreturn_t gfar_transmit(int irq, void *dev_id);
122 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
123 static void adjust_link(struct net_device *dev);
124 static int init_phy(struct net_device *dev);
125 static int gfar_probe(struct platform_device *ofdev);
126 static int gfar_remove(struct platform_device *ofdev);
127 static void free_skb_resources(struct gfar_private *priv);
128 static void gfar_set_multi(struct net_device *dev);
129 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
130 static void gfar_configure_serdes(struct net_device *dev);
131 static int gfar_poll_rx(struct napi_struct *napi, int budget);
132 static int gfar_poll_tx(struct napi_struct *napi, int budget);
133 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
134 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
135 #ifdef CONFIG_NET_POLL_CONTROLLER
136 static void gfar_netpoll(struct net_device *dev);
137 #endif
138 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
139 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
140 static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
141 			       int amount_pull, struct napi_struct *napi);
142 static void gfar_halt_nodisable(struct gfar_private *priv);
143 static void gfar_clear_exact_match(struct net_device *dev);
144 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
145 				  const u8 *addr);
146 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
147 
148 MODULE_AUTHOR("Freescale Semiconductor, Inc");
149 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
150 MODULE_LICENSE("GPL");
151 
152 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
153 			    dma_addr_t buf)
154 {
155 	u32 lstatus;
156 
157 	bdp->bufPtr = buf;
158 
159 	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
160 	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
161 		lstatus |= BD_LFLAG(RXBD_WRAP);
162 
163 	eieio();
164 
165 	bdp->lstatus = lstatus;
166 }
167 
168 static int gfar_init_bds(struct net_device *ndev)
169 {
170 	struct gfar_private *priv = netdev_priv(ndev);
171 	struct gfar_priv_tx_q *tx_queue = NULL;
172 	struct gfar_priv_rx_q *rx_queue = NULL;
173 	struct txbd8 *txbdp;
174 	struct rxbd8 *rxbdp;
175 	int i, j;
176 
177 	for (i = 0; i < priv->num_tx_queues; i++) {
178 		tx_queue = priv->tx_queue[i];
179 		/* Initialize some variables in our dev structure */
180 		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
181 		tx_queue->dirty_tx = tx_queue->tx_bd_base;
182 		tx_queue->cur_tx = tx_queue->tx_bd_base;
183 		tx_queue->skb_curtx = 0;
184 		tx_queue->skb_dirtytx = 0;
185 
186 		/* Initialize Transmit Descriptor Ring */
187 		txbdp = tx_queue->tx_bd_base;
188 		for (j = 0; j < tx_queue->tx_ring_size; j++) {
189 			txbdp->lstatus = 0;
190 			txbdp->bufPtr = 0;
191 			txbdp++;
192 		}
193 
194 		/* Set the last descriptor in the ring to indicate wrap */
195 		txbdp--;
196 		txbdp->status |= TXBD_WRAP;
197 	}
198 
199 	for (i = 0; i < priv->num_rx_queues; i++) {
200 		rx_queue = priv->rx_queue[i];
201 		rx_queue->cur_rx = rx_queue->rx_bd_base;
202 		rx_queue->skb_currx = 0;
203 		rxbdp = rx_queue->rx_bd_base;
204 
205 		for (j = 0; j < rx_queue->rx_ring_size; j++) {
206 			struct sk_buff *skb = rx_queue->rx_skbuff[j];
207 
208 			if (skb) {
209 				gfar_init_rxbdp(rx_queue, rxbdp,
210 						rxbdp->bufPtr);
211 			} else {
212 				skb = gfar_new_skb(ndev);
213 				if (!skb) {
214 					netdev_err(ndev, "Can't allocate RX buffers\n");
215 					return -ENOMEM;
216 				}
217 				rx_queue->rx_skbuff[j] = skb;
218 
219 				gfar_new_rxbdp(rx_queue, rxbdp, skb);
220 			}
221 
222 			rxbdp++;
223 		}
224 
225 	}
226 
227 	return 0;
228 }
229 
230 static int gfar_alloc_skb_resources(struct net_device *ndev)
231 {
232 	void *vaddr;
233 	dma_addr_t addr;
234 	int i, j, k;
235 	struct gfar_private *priv = netdev_priv(ndev);
236 	struct device *dev = priv->dev;
237 	struct gfar_priv_tx_q *tx_queue = NULL;
238 	struct gfar_priv_rx_q *rx_queue = NULL;
239 
240 	priv->total_tx_ring_size = 0;
241 	for (i = 0; i < priv->num_tx_queues; i++)
242 		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
243 
244 	priv->total_rx_ring_size = 0;
245 	for (i = 0; i < priv->num_rx_queues; i++)
246 		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
247 
248 	/* Allocate memory for the buffer descriptors */
249 	vaddr = dma_alloc_coherent(dev,
250 				   (priv->total_tx_ring_size *
251 				    sizeof(struct txbd8)) +
252 				   (priv->total_rx_ring_size *
253 				    sizeof(struct rxbd8)),
254 				   &addr, GFP_KERNEL);
255 	if (!vaddr)
256 		return -ENOMEM;
257 
258 	for (i = 0; i < priv->num_tx_queues; i++) {
259 		tx_queue = priv->tx_queue[i];
260 		tx_queue->tx_bd_base = vaddr;
261 		tx_queue->tx_bd_dma_base = addr;
262 		tx_queue->dev = ndev;
263 		/* enet DMA only understands physical addresses */
264 		addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
265 		vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
266 	}
267 
268 	/* Start the rx descriptor ring where the tx ring leaves off */
269 	for (i = 0; i < priv->num_rx_queues; i++) {
270 		rx_queue = priv->rx_queue[i];
271 		rx_queue->rx_bd_base = vaddr;
272 		rx_queue->rx_bd_dma_base = addr;
273 		rx_queue->dev = ndev;
274 		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
275 		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
276 	}
277 
278 	/* Setup the skbuff rings */
279 	for (i = 0; i < priv->num_tx_queues; i++) {
280 		tx_queue = priv->tx_queue[i];
281 		tx_queue->tx_skbuff =
282 			kmalloc_array(tx_queue->tx_ring_size,
283 				      sizeof(*tx_queue->tx_skbuff),
284 				      GFP_KERNEL);
285 		if (!tx_queue->tx_skbuff)
286 			goto cleanup;
287 
288 		for (k = 0; k < tx_queue->tx_ring_size; k++)
289 			tx_queue->tx_skbuff[k] = NULL;
290 	}
291 
292 	for (i = 0; i < priv->num_rx_queues; i++) {
293 		rx_queue = priv->rx_queue[i];
294 		rx_queue->rx_skbuff =
295 			kmalloc_array(rx_queue->rx_ring_size,
296 				      sizeof(*rx_queue->rx_skbuff),
297 				      GFP_KERNEL);
298 		if (!rx_queue->rx_skbuff)
299 			goto cleanup;
300 
301 		for (j = 0; j < rx_queue->rx_ring_size; j++)
302 			rx_queue->rx_skbuff[j] = NULL;
303 	}
304 
305 	if (gfar_init_bds(ndev))
306 		goto cleanup;
307 
308 	return 0;
309 
310 cleanup:
311 	free_skb_resources(priv);
312 	return -ENOMEM;
313 }
314 
315 static void gfar_init_tx_rx_base(struct gfar_private *priv)
316 {
317 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
318 	u32 __iomem *baddr;
319 	int i;
320 
321 	baddr = &regs->tbase0;
322 	for (i = 0; i < priv->num_tx_queues; i++) {
323 		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
324 		baddr += 2;
325 	}
326 
327 	baddr = &regs->rbase0;
328 	for (i = 0; i < priv->num_rx_queues; i++) {
329 		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
330 		baddr += 2;
331 	}
332 }
333 
334 static void gfar_rx_buff_size_config(struct gfar_private *priv)
335 {
336 	int frame_size = priv->ndev->mtu + ETH_HLEN;
337 
338 	/* set this when rx hw offload (TOE) functions are being used */
339 	priv->uses_rxfcb = 0;
340 
341 	if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
342 		priv->uses_rxfcb = 1;
343 
344 	if (priv->hwts_rx_en)
345 		priv->uses_rxfcb = 1;
346 
347 	if (priv->uses_rxfcb)
348 		frame_size += GMAC_FCB_LEN;
349 
350 	frame_size += priv->padding;
351 
352 	frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
353 		     INCREMENTAL_BUFFER_SIZE;
354 
355 	priv->rx_buffer_size = frame_size;
356 }
357 
358 static void gfar_mac_rx_config(struct gfar_private *priv)
359 {
360 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
361 	u32 rctrl = 0;
362 
363 	if (priv->rx_filer_enable) {
364 		rctrl |= RCTRL_FILREN;
365 		/* Program the RIR0 reg with the required distribution */
366 		if (priv->poll_mode == GFAR_SQ_POLLING)
367 			gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
368 		else /* GFAR_MQ_POLLING */
369 			gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
370 	}
371 
372 	/* Restore PROMISC mode */
373 	if (priv->ndev->flags & IFF_PROMISC)
374 		rctrl |= RCTRL_PROM;
375 
376 	if (priv->ndev->features & NETIF_F_RXCSUM)
377 		rctrl |= RCTRL_CHECKSUMMING;
378 
379 	if (priv->extended_hash)
380 		rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
381 
382 	if (priv->padding) {
383 		rctrl &= ~RCTRL_PAL_MASK;
384 		rctrl |= RCTRL_PADDING(priv->padding);
385 	}
386 
387 	/* Enable HW time stamping if requested from user space */
388 	if (priv->hwts_rx_en)
389 		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
390 
391 	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
392 		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
393 
394 	/* Init rctrl based on our settings */
395 	gfar_write(&regs->rctrl, rctrl);
396 }
397 
398 static void gfar_mac_tx_config(struct gfar_private *priv)
399 {
400 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
401 	u32 tctrl = 0;
402 
403 	if (priv->ndev->features & NETIF_F_IP_CSUM)
404 		tctrl |= TCTRL_INIT_CSUM;
405 
406 	if (priv->prio_sched_en)
407 		tctrl |= TCTRL_TXSCHED_PRIO;
408 	else {
409 		tctrl |= TCTRL_TXSCHED_WRRS;
410 		gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
411 		gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
412 	}
413 
414 	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
415 		tctrl |= TCTRL_VLINS;
416 
417 	gfar_write(&regs->tctrl, tctrl);
418 }
419 
420 static void gfar_configure_coalescing(struct gfar_private *priv,
421 			       unsigned long tx_mask, unsigned long rx_mask)
422 {
423 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
424 	u32 __iomem *baddr;
425 
426 	if (priv->mode == MQ_MG_MODE) {
427 		int i = 0;
428 
429 		baddr = &regs->txic0;
430 		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
431 			gfar_write(baddr + i, 0);
432 			if (likely(priv->tx_queue[i]->txcoalescing))
433 				gfar_write(baddr + i, priv->tx_queue[i]->txic);
434 		}
435 
436 		baddr = &regs->rxic0;
437 		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
438 			gfar_write(baddr + i, 0);
439 			if (likely(priv->rx_queue[i]->rxcoalescing))
440 				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
441 		}
442 	} else {
443 		/* Backward compatible case -- even if we enable
444 		 * multiple queues, there's only single reg to program
445 		 */
446 		gfar_write(&regs->txic, 0);
447 		if (likely(priv->tx_queue[0]->txcoalescing))
448 			gfar_write(&regs->txic, priv->tx_queue[0]->txic);
449 
450 		gfar_write(&regs->rxic, 0);
451 		if (unlikely(priv->rx_queue[0]->rxcoalescing))
452 			gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
453 	}
454 }
455 
456 void gfar_configure_coalescing_all(struct gfar_private *priv)
457 {
458 	gfar_configure_coalescing(priv, 0xFF, 0xFF);
459 }
460 
461 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
462 {
463 	struct gfar_private *priv = netdev_priv(dev);
464 	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
465 	unsigned long tx_packets = 0, tx_bytes = 0;
466 	int i;
467 
468 	for (i = 0; i < priv->num_rx_queues; i++) {
469 		rx_packets += priv->rx_queue[i]->stats.rx_packets;
470 		rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
471 		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
472 	}
473 
474 	dev->stats.rx_packets = rx_packets;
475 	dev->stats.rx_bytes   = rx_bytes;
476 	dev->stats.rx_dropped = rx_dropped;
477 
478 	for (i = 0; i < priv->num_tx_queues; i++) {
479 		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
480 		tx_packets += priv->tx_queue[i]->stats.tx_packets;
481 	}
482 
483 	dev->stats.tx_bytes   = tx_bytes;
484 	dev->stats.tx_packets = tx_packets;
485 
486 	return &dev->stats;
487 }
488 
489 static const struct net_device_ops gfar_netdev_ops = {
490 	.ndo_open = gfar_enet_open,
491 	.ndo_start_xmit = gfar_start_xmit,
492 	.ndo_stop = gfar_close,
493 	.ndo_change_mtu = gfar_change_mtu,
494 	.ndo_set_features = gfar_set_features,
495 	.ndo_set_rx_mode = gfar_set_multi,
496 	.ndo_tx_timeout = gfar_timeout,
497 	.ndo_do_ioctl = gfar_ioctl,
498 	.ndo_get_stats = gfar_get_stats,
499 	.ndo_set_mac_address = eth_mac_addr,
500 	.ndo_validate_addr = eth_validate_addr,
501 #ifdef CONFIG_NET_POLL_CONTROLLER
502 	.ndo_poll_controller = gfar_netpoll,
503 #endif
504 };
505 
506 static void gfar_ints_disable(struct gfar_private *priv)
507 {
508 	int i;
509 	for (i = 0; i < priv->num_grps; i++) {
510 		struct gfar __iomem *regs = priv->gfargrp[i].regs;
511 		/* Clear IEVENT */
512 		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
513 
514 		/* Initialize IMASK */
515 		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
516 	}
517 }
518 
519 static void gfar_ints_enable(struct gfar_private *priv)
520 {
521 	int i;
522 	for (i = 0; i < priv->num_grps; i++) {
523 		struct gfar __iomem *regs = priv->gfargrp[i].regs;
524 		/* Unmask the interrupts we look for */
525 		gfar_write(&regs->imask, IMASK_DEFAULT);
526 	}
527 }
528 
529 void lock_tx_qs(struct gfar_private *priv)
530 {
531 	int i;
532 
533 	for (i = 0; i < priv->num_tx_queues; i++)
534 		spin_lock(&priv->tx_queue[i]->txlock);
535 }
536 
537 void unlock_tx_qs(struct gfar_private *priv)
538 {
539 	int i;
540 
541 	for (i = 0; i < priv->num_tx_queues; i++)
542 		spin_unlock(&priv->tx_queue[i]->txlock);
543 }
544 
545 static int gfar_alloc_tx_queues(struct gfar_private *priv)
546 {
547 	int i;
548 
549 	for (i = 0; i < priv->num_tx_queues; i++) {
550 		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
551 					    GFP_KERNEL);
552 		if (!priv->tx_queue[i])
553 			return -ENOMEM;
554 
555 		priv->tx_queue[i]->tx_skbuff = NULL;
556 		priv->tx_queue[i]->qindex = i;
557 		priv->tx_queue[i]->dev = priv->ndev;
558 		spin_lock_init(&(priv->tx_queue[i]->txlock));
559 	}
560 	return 0;
561 }
562 
563 static int gfar_alloc_rx_queues(struct gfar_private *priv)
564 {
565 	int i;
566 
567 	for (i = 0; i < priv->num_rx_queues; i++) {
568 		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
569 					    GFP_KERNEL);
570 		if (!priv->rx_queue[i])
571 			return -ENOMEM;
572 
573 		priv->rx_queue[i]->rx_skbuff = NULL;
574 		priv->rx_queue[i]->qindex = i;
575 		priv->rx_queue[i]->dev = priv->ndev;
576 	}
577 	return 0;
578 }
579 
580 static void gfar_free_tx_queues(struct gfar_private *priv)
581 {
582 	int i;
583 
584 	for (i = 0; i < priv->num_tx_queues; i++)
585 		kfree(priv->tx_queue[i]);
586 }
587 
588 static void gfar_free_rx_queues(struct gfar_private *priv)
589 {
590 	int i;
591 
592 	for (i = 0; i < priv->num_rx_queues; i++)
593 		kfree(priv->rx_queue[i]);
594 }
595 
596 static void unmap_group_regs(struct gfar_private *priv)
597 {
598 	int i;
599 
600 	for (i = 0; i < MAXGROUPS; i++)
601 		if (priv->gfargrp[i].regs)
602 			iounmap(priv->gfargrp[i].regs);
603 }
604 
605 static void free_gfar_dev(struct gfar_private *priv)
606 {
607 	int i, j;
608 
609 	for (i = 0; i < priv->num_grps; i++)
610 		for (j = 0; j < GFAR_NUM_IRQS; j++) {
611 			kfree(priv->gfargrp[i].irqinfo[j]);
612 			priv->gfargrp[i].irqinfo[j] = NULL;
613 		}
614 
615 	free_netdev(priv->ndev);
616 }
617 
618 static void disable_napi(struct gfar_private *priv)
619 {
620 	int i;
621 
622 	for (i = 0; i < priv->num_grps; i++) {
623 		napi_disable(&priv->gfargrp[i].napi_rx);
624 		napi_disable(&priv->gfargrp[i].napi_tx);
625 	}
626 }
627 
628 static void enable_napi(struct gfar_private *priv)
629 {
630 	int i;
631 
632 	for (i = 0; i < priv->num_grps; i++) {
633 		napi_enable(&priv->gfargrp[i].napi_rx);
634 		napi_enable(&priv->gfargrp[i].napi_tx);
635 	}
636 }
637 
638 static int gfar_parse_group(struct device_node *np,
639 			    struct gfar_private *priv, const char *model)
640 {
641 	struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
642 	int i;
643 
644 	for (i = 0; i < GFAR_NUM_IRQS; i++) {
645 		grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
646 					  GFP_KERNEL);
647 		if (!grp->irqinfo[i])
648 			return -ENOMEM;
649 	}
650 
651 	grp->regs = of_iomap(np, 0);
652 	if (!grp->regs)
653 		return -ENOMEM;
654 
655 	gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
656 
657 	/* If we aren't the FEC we have multiple interrupts */
658 	if (model && strcasecmp(model, "FEC")) {
659 		gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
660 		gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
661 		if (gfar_irq(grp, TX)->irq == NO_IRQ ||
662 		    gfar_irq(grp, RX)->irq == NO_IRQ ||
663 		    gfar_irq(grp, ER)->irq == NO_IRQ)
664 			return -EINVAL;
665 	}
666 
667 	grp->priv = priv;
668 	spin_lock_init(&grp->grplock);
669 	if (priv->mode == MQ_MG_MODE) {
670 		u32 *rxq_mask, *txq_mask;
671 		rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
672 		txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
673 
674 		if (priv->poll_mode == GFAR_SQ_POLLING) {
675 			/* One Q per interrupt group: Q0 to G0, Q1 to G1 */
676 			grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
677 			grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
678 		} else { /* GFAR_MQ_POLLING */
679 			grp->rx_bit_map = rxq_mask ?
680 			*rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
681 			grp->tx_bit_map = txq_mask ?
682 			*txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
683 		}
684 	} else {
685 		grp->rx_bit_map = 0xFF;
686 		grp->tx_bit_map = 0xFF;
687 	}
688 
689 	/* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
690 	 * right to left, so we need to revert the 8 bits to get the q index
691 	 */
692 	grp->rx_bit_map = bitrev8(grp->rx_bit_map);
693 	grp->tx_bit_map = bitrev8(grp->tx_bit_map);
694 
695 	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
696 	 * also assign queues to groups
697 	 */
698 	for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
699 		if (!grp->rx_queue)
700 			grp->rx_queue = priv->rx_queue[i];
701 		grp->num_rx_queues++;
702 		grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
703 		priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
704 		priv->rx_queue[i]->grp = grp;
705 	}
706 
707 	for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
708 		if (!grp->tx_queue)
709 			grp->tx_queue = priv->tx_queue[i];
710 		grp->num_tx_queues++;
711 		grp->tstat |= (TSTAT_CLEAR_THALT >> i);
712 		priv->tqueue |= (TQUEUE_EN0 >> i);
713 		priv->tx_queue[i]->grp = grp;
714 	}
715 
716 	priv->num_grps++;
717 
718 	return 0;
719 }
720 
721 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
722 {
723 	const char *model;
724 	const char *ctype;
725 	const void *mac_addr;
726 	int err = 0, i;
727 	struct net_device *dev = NULL;
728 	struct gfar_private *priv = NULL;
729 	struct device_node *np = ofdev->dev.of_node;
730 	struct device_node *child = NULL;
731 	const u32 *stash;
732 	const u32 *stash_len;
733 	const u32 *stash_idx;
734 	unsigned int num_tx_qs, num_rx_qs;
735 	u32 *tx_queues, *rx_queues;
736 	unsigned short mode, poll_mode;
737 
738 	if (!np || !of_device_is_available(np))
739 		return -ENODEV;
740 
741 	if (of_device_is_compatible(np, "fsl,etsec2")) {
742 		mode = MQ_MG_MODE;
743 		poll_mode = GFAR_SQ_POLLING;
744 	} else {
745 		mode = SQ_SG_MODE;
746 		poll_mode = GFAR_SQ_POLLING;
747 	}
748 
749 	/* parse the num of HW tx and rx queues */
750 	tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
751 	rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
752 
753 	if (mode == SQ_SG_MODE) {
754 		num_tx_qs = 1;
755 		num_rx_qs = 1;
756 	} else { /* MQ_MG_MODE */
757 		/* get the actual number of supported groups */
758 		unsigned int num_grps = of_get_available_child_count(np);
759 
760 		if (num_grps == 0 || num_grps > MAXGROUPS) {
761 			dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
762 				num_grps);
763 			pr_err("Cannot do alloc_etherdev, aborting\n");
764 			return -EINVAL;
765 		}
766 
767 		if (poll_mode == GFAR_SQ_POLLING) {
768 			num_tx_qs = num_grps; /* one txq per int group */
769 			num_rx_qs = num_grps; /* one rxq per int group */
770 		} else { /* GFAR_MQ_POLLING */
771 			num_tx_qs = tx_queues ? *tx_queues : 1;
772 			num_rx_qs = rx_queues ? *rx_queues : 1;
773 		}
774 	}
775 
776 	if (num_tx_qs > MAX_TX_QS) {
777 		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
778 		       num_tx_qs, MAX_TX_QS);
779 		pr_err("Cannot do alloc_etherdev, aborting\n");
780 		return -EINVAL;
781 	}
782 
783 	if (num_rx_qs > MAX_RX_QS) {
784 		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
785 		       num_rx_qs, MAX_RX_QS);
786 		pr_err("Cannot do alloc_etherdev, aborting\n");
787 		return -EINVAL;
788 	}
789 
790 	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
791 	dev = *pdev;
792 	if (NULL == dev)
793 		return -ENOMEM;
794 
795 	priv = netdev_priv(dev);
796 	priv->ndev = dev;
797 
798 	priv->mode = mode;
799 	priv->poll_mode = poll_mode;
800 
801 	priv->num_tx_queues = num_tx_qs;
802 	netif_set_real_num_rx_queues(dev, num_rx_qs);
803 	priv->num_rx_queues = num_rx_qs;
804 
805 	err = gfar_alloc_tx_queues(priv);
806 	if (err)
807 		goto tx_alloc_failed;
808 
809 	err = gfar_alloc_rx_queues(priv);
810 	if (err)
811 		goto rx_alloc_failed;
812 
813 	/* Init Rx queue filer rule set linked list */
814 	INIT_LIST_HEAD(&priv->rx_list.list);
815 	priv->rx_list.count = 0;
816 	mutex_init(&priv->rx_queue_access);
817 
818 	model = of_get_property(np, "model", NULL);
819 
820 	for (i = 0; i < MAXGROUPS; i++)
821 		priv->gfargrp[i].regs = NULL;
822 
823 	/* Parse and initialize group specific information */
824 	if (priv->mode == MQ_MG_MODE) {
825 		for_each_child_of_node(np, child) {
826 			err = gfar_parse_group(child, priv, model);
827 			if (err)
828 				goto err_grp_init;
829 		}
830 	} else { /* SQ_SG_MODE */
831 		err = gfar_parse_group(np, priv, model);
832 		if (err)
833 			goto err_grp_init;
834 	}
835 
836 	stash = of_get_property(np, "bd-stash", NULL);
837 
838 	if (stash) {
839 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
840 		priv->bd_stash_en = 1;
841 	}
842 
843 	stash_len = of_get_property(np, "rx-stash-len", NULL);
844 
845 	if (stash_len)
846 		priv->rx_stash_size = *stash_len;
847 
848 	stash_idx = of_get_property(np, "rx-stash-idx", NULL);
849 
850 	if (stash_idx)
851 		priv->rx_stash_index = *stash_idx;
852 
853 	if (stash_len || stash_idx)
854 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
855 
856 	mac_addr = of_get_mac_address(np);
857 
858 	if (mac_addr)
859 		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
860 
861 	if (model && !strcasecmp(model, "TSEC"))
862 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
863 				     FSL_GIANFAR_DEV_HAS_COALESCE |
864 				     FSL_GIANFAR_DEV_HAS_RMON |
865 				     FSL_GIANFAR_DEV_HAS_MULTI_INTR;
866 
867 	if (model && !strcasecmp(model, "eTSEC"))
868 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
869 				     FSL_GIANFAR_DEV_HAS_COALESCE |
870 				     FSL_GIANFAR_DEV_HAS_RMON |
871 				     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
872 				     FSL_GIANFAR_DEV_HAS_CSUM |
873 				     FSL_GIANFAR_DEV_HAS_VLAN |
874 				     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
875 				     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
876 				     FSL_GIANFAR_DEV_HAS_TIMER;
877 
878 	ctype = of_get_property(np, "phy-connection-type", NULL);
879 
880 	/* We only care about rgmii-id.  The rest are autodetected */
881 	if (ctype && !strcmp(ctype, "rgmii-id"))
882 		priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
883 	else
884 		priv->interface = PHY_INTERFACE_MODE_MII;
885 
886 	if (of_get_property(np, "fsl,magic-packet", NULL))
887 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
888 
889 	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
890 
891 	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
892 	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
893 
894 	return 0;
895 
896 err_grp_init:
897 	unmap_group_regs(priv);
898 rx_alloc_failed:
899 	gfar_free_rx_queues(priv);
900 tx_alloc_failed:
901 	gfar_free_tx_queues(priv);
902 	free_gfar_dev(priv);
903 	return err;
904 }
905 
906 static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
907 {
908 	struct hwtstamp_config config;
909 	struct gfar_private *priv = netdev_priv(netdev);
910 
911 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
912 		return -EFAULT;
913 
914 	/* reserved for future extensions */
915 	if (config.flags)
916 		return -EINVAL;
917 
918 	switch (config.tx_type) {
919 	case HWTSTAMP_TX_OFF:
920 		priv->hwts_tx_en = 0;
921 		break;
922 	case HWTSTAMP_TX_ON:
923 		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
924 			return -ERANGE;
925 		priv->hwts_tx_en = 1;
926 		break;
927 	default:
928 		return -ERANGE;
929 	}
930 
931 	switch (config.rx_filter) {
932 	case HWTSTAMP_FILTER_NONE:
933 		if (priv->hwts_rx_en) {
934 			priv->hwts_rx_en = 0;
935 			reset_gfar(netdev);
936 		}
937 		break;
938 	default:
939 		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
940 			return -ERANGE;
941 		if (!priv->hwts_rx_en) {
942 			priv->hwts_rx_en = 1;
943 			reset_gfar(netdev);
944 		}
945 		config.rx_filter = HWTSTAMP_FILTER_ALL;
946 		break;
947 	}
948 
949 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
950 		-EFAULT : 0;
951 }
952 
953 static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
954 {
955 	struct hwtstamp_config config;
956 	struct gfar_private *priv = netdev_priv(netdev);
957 
958 	config.flags = 0;
959 	config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
960 	config.rx_filter = (priv->hwts_rx_en ?
961 			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
962 
963 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
964 		-EFAULT : 0;
965 }
966 
967 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
968 {
969 	struct gfar_private *priv = netdev_priv(dev);
970 
971 	if (!netif_running(dev))
972 		return -EINVAL;
973 
974 	if (cmd == SIOCSHWTSTAMP)
975 		return gfar_hwtstamp_set(dev, rq);
976 	if (cmd == SIOCGHWTSTAMP)
977 		return gfar_hwtstamp_get(dev, rq);
978 
979 	if (!priv->phydev)
980 		return -ENODEV;
981 
982 	return phy_mii_ioctl(priv->phydev, rq, cmd);
983 }
984 
985 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
986 				   u32 class)
987 {
988 	u32 rqfpr = FPR_FILER_MASK;
989 	u32 rqfcr = 0x0;
990 
991 	rqfar--;
992 	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
993 	priv->ftp_rqfpr[rqfar] = rqfpr;
994 	priv->ftp_rqfcr[rqfar] = rqfcr;
995 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
996 
997 	rqfar--;
998 	rqfcr = RQFCR_CMP_NOMATCH;
999 	priv->ftp_rqfpr[rqfar] = rqfpr;
1000 	priv->ftp_rqfcr[rqfar] = rqfcr;
1001 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1002 
1003 	rqfar--;
1004 	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
1005 	rqfpr = class;
1006 	priv->ftp_rqfcr[rqfar] = rqfcr;
1007 	priv->ftp_rqfpr[rqfar] = rqfpr;
1008 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1009 
1010 	rqfar--;
1011 	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
1012 	rqfpr = class;
1013 	priv->ftp_rqfcr[rqfar] = rqfcr;
1014 	priv->ftp_rqfpr[rqfar] = rqfpr;
1015 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1016 
1017 	return rqfar;
1018 }
1019 
1020 static void gfar_init_filer_table(struct gfar_private *priv)
1021 {
1022 	int i = 0x0;
1023 	u32 rqfar = MAX_FILER_IDX;
1024 	u32 rqfcr = 0x0;
1025 	u32 rqfpr = FPR_FILER_MASK;
1026 
1027 	/* Default rule */
1028 	rqfcr = RQFCR_CMP_MATCH;
1029 	priv->ftp_rqfcr[rqfar] = rqfcr;
1030 	priv->ftp_rqfpr[rqfar] = rqfpr;
1031 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1032 
1033 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
1034 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
1035 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
1036 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
1037 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
1038 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
1039 
1040 	/* cur_filer_idx indicated the first non-masked rule */
1041 	priv->cur_filer_idx = rqfar;
1042 
1043 	/* Rest are masked rules */
1044 	rqfcr = RQFCR_CMP_NOMATCH;
1045 	for (i = 0; i < rqfar; i++) {
1046 		priv->ftp_rqfcr[i] = rqfcr;
1047 		priv->ftp_rqfpr[i] = rqfpr;
1048 		gfar_write_filer(priv, i, rqfcr, rqfpr);
1049 	}
1050 }
1051 
1052 static void __gfar_detect_errata_83xx(struct gfar_private *priv)
1053 {
1054 	unsigned int pvr = mfspr(SPRN_PVR);
1055 	unsigned int svr = mfspr(SPRN_SVR);
1056 	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
1057 	unsigned int rev = svr & 0xffff;
1058 
1059 	/* MPC8313 Rev 2.0 and higher; All MPC837x */
1060 	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
1061 	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1062 		priv->errata |= GFAR_ERRATA_74;
1063 
1064 	/* MPC8313 and MPC837x all rev */
1065 	if ((pvr == 0x80850010 && mod == 0x80b0) ||
1066 	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1067 		priv->errata |= GFAR_ERRATA_76;
1068 
1069 	/* MPC8313 Rev < 2.0 */
1070 	if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
1071 		priv->errata |= GFAR_ERRATA_12;
1072 }
1073 
1074 static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1075 {
1076 	unsigned int svr = mfspr(SPRN_SVR);
1077 
1078 	if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1079 		priv->errata |= GFAR_ERRATA_12;
1080 	if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
1081 	    ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
1082 		priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
1083 }
1084 
1085 static void gfar_detect_errata(struct gfar_private *priv)
1086 {
1087 	struct device *dev = &priv->ofdev->dev;
1088 
1089 	/* no plans to fix */
1090 	priv->errata |= GFAR_ERRATA_A002;
1091 
1092 	if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
1093 		__gfar_detect_errata_85xx(priv);
1094 	else /* non-mpc85xx parts, i.e. e300 core based */
1095 		__gfar_detect_errata_83xx(priv);
1096 
1097 	if (priv->errata)
1098 		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
1099 			 priv->errata);
1100 }
1101 
1102 void gfar_mac_reset(struct gfar_private *priv)
1103 {
1104 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1105 	u32 tempval;
1106 
1107 	/* Reset MAC layer */
1108 	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
1109 
1110 	/* We need to delay at least 3 TX clocks */
1111 	udelay(3);
1112 
1113 	/* the soft reset bit is not self-resetting, so we need to
1114 	 * clear it before resuming normal operation
1115 	 */
1116 	gfar_write(&regs->maccfg1, 0);
1117 
1118 	udelay(3);
1119 
1120 	/* Compute rx_buff_size based on config flags */
1121 	gfar_rx_buff_size_config(priv);
1122 
1123 	/* Initialize the max receive frame/buffer lengths */
1124 	gfar_write(&regs->maxfrm, priv->rx_buffer_size);
1125 	gfar_write(&regs->mrblr, priv->rx_buffer_size);
1126 
1127 	/* Initialize the Minimum Frame Length Register */
1128 	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1129 
1130 	/* Initialize MACCFG2. */
1131 	tempval = MACCFG2_INIT_SETTINGS;
1132 
1133 	/* If the mtu is larger than the max size for standard
1134 	 * ethernet frames (ie, a jumbo frame), then set maccfg2
1135 	 * to allow huge frames, and to check the length
1136 	 */
1137 	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
1138 	    gfar_has_errata(priv, GFAR_ERRATA_74))
1139 		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1140 
1141 	gfar_write(&regs->maccfg2, tempval);
1142 
1143 	/* Clear mac addr hash registers */
1144 	gfar_write(&regs->igaddr0, 0);
1145 	gfar_write(&regs->igaddr1, 0);
1146 	gfar_write(&regs->igaddr2, 0);
1147 	gfar_write(&regs->igaddr3, 0);
1148 	gfar_write(&regs->igaddr4, 0);
1149 	gfar_write(&regs->igaddr5, 0);
1150 	gfar_write(&regs->igaddr6, 0);
1151 	gfar_write(&regs->igaddr7, 0);
1152 
1153 	gfar_write(&regs->gaddr0, 0);
1154 	gfar_write(&regs->gaddr1, 0);
1155 	gfar_write(&regs->gaddr2, 0);
1156 	gfar_write(&regs->gaddr3, 0);
1157 	gfar_write(&regs->gaddr4, 0);
1158 	gfar_write(&regs->gaddr5, 0);
1159 	gfar_write(&regs->gaddr6, 0);
1160 	gfar_write(&regs->gaddr7, 0);
1161 
1162 	if (priv->extended_hash)
1163 		gfar_clear_exact_match(priv->ndev);
1164 
1165 	gfar_mac_rx_config(priv);
1166 
1167 	gfar_mac_tx_config(priv);
1168 
1169 	gfar_set_mac_address(priv->ndev);
1170 
1171 	gfar_set_multi(priv->ndev);
1172 
1173 	/* clear ievent and imask before configuring coalescing */
1174 	gfar_ints_disable(priv);
1175 
1176 	/* Configure the coalescing support */
1177 	gfar_configure_coalescing_all(priv);
1178 }
1179 
1180 static void gfar_hw_init(struct gfar_private *priv)
1181 {
1182 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1183 	u32 attrs;
1184 
1185 	/* Stop the DMA engine now, in case it was running before
1186 	 * (The firmware could have used it, and left it running).
1187 	 */
1188 	gfar_halt(priv);
1189 
1190 	gfar_mac_reset(priv);
1191 
1192 	/* Zero out the rmon mib registers if it has them */
1193 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1194 		memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1195 
1196 		/* Mask off the CAM interrupts */
1197 		gfar_write(&regs->rmon.cam1, 0xffffffff);
1198 		gfar_write(&regs->rmon.cam2, 0xffffffff);
1199 	}
1200 
1201 	/* Initialize ECNTRL */
1202 	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
1203 
1204 	/* Set the extraction length and index */
1205 	attrs = ATTRELI_EL(priv->rx_stash_size) |
1206 		ATTRELI_EI(priv->rx_stash_index);
1207 
1208 	gfar_write(&regs->attreli, attrs);
1209 
1210 	/* Start with defaults, and add stashing
1211 	 * depending on driver parameters
1212 	 */
1213 	attrs = ATTR_INIT_SETTINGS;
1214 
1215 	if (priv->bd_stash_en)
1216 		attrs |= ATTR_BDSTASH;
1217 
1218 	if (priv->rx_stash_size != 0)
1219 		attrs |= ATTR_BUFSTASH;
1220 
1221 	gfar_write(&regs->attr, attrs);
1222 
1223 	/* FIFO configs */
1224 	gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1225 	gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1226 	gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1227 
1228 	/* Program the interrupt steering regs, only for MG devices */
1229 	if (priv->num_grps > 1)
1230 		gfar_write_isrg(priv);
1231 }
1232 
1233 static void __init gfar_init_addr_hash_table(struct gfar_private *priv)
1234 {
1235 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1236 
1237 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1238 		priv->extended_hash = 1;
1239 		priv->hash_width = 9;
1240 
1241 		priv->hash_regs[0] = &regs->igaddr0;
1242 		priv->hash_regs[1] = &regs->igaddr1;
1243 		priv->hash_regs[2] = &regs->igaddr2;
1244 		priv->hash_regs[3] = &regs->igaddr3;
1245 		priv->hash_regs[4] = &regs->igaddr4;
1246 		priv->hash_regs[5] = &regs->igaddr5;
1247 		priv->hash_regs[6] = &regs->igaddr6;
1248 		priv->hash_regs[7] = &regs->igaddr7;
1249 		priv->hash_regs[8] = &regs->gaddr0;
1250 		priv->hash_regs[9] = &regs->gaddr1;
1251 		priv->hash_regs[10] = &regs->gaddr2;
1252 		priv->hash_regs[11] = &regs->gaddr3;
1253 		priv->hash_regs[12] = &regs->gaddr4;
1254 		priv->hash_regs[13] = &regs->gaddr5;
1255 		priv->hash_regs[14] = &regs->gaddr6;
1256 		priv->hash_regs[15] = &regs->gaddr7;
1257 
1258 	} else {
1259 		priv->extended_hash = 0;
1260 		priv->hash_width = 8;
1261 
1262 		priv->hash_regs[0] = &regs->gaddr0;
1263 		priv->hash_regs[1] = &regs->gaddr1;
1264 		priv->hash_regs[2] = &regs->gaddr2;
1265 		priv->hash_regs[3] = &regs->gaddr3;
1266 		priv->hash_regs[4] = &regs->gaddr4;
1267 		priv->hash_regs[5] = &regs->gaddr5;
1268 		priv->hash_regs[6] = &regs->gaddr6;
1269 		priv->hash_regs[7] = &regs->gaddr7;
1270 	}
1271 }
1272 
1273 /* Set up the ethernet device structure, private data,
1274  * and anything else we need before we start
1275  */
1276 static int gfar_probe(struct platform_device *ofdev)
1277 {
1278 	struct net_device *dev = NULL;
1279 	struct gfar_private *priv = NULL;
1280 	int err = 0, i;
1281 
1282 	err = gfar_of_init(ofdev, &dev);
1283 
1284 	if (err)
1285 		return err;
1286 
1287 	priv = netdev_priv(dev);
1288 	priv->ndev = dev;
1289 	priv->ofdev = ofdev;
1290 	priv->dev = &ofdev->dev;
1291 	SET_NETDEV_DEV(dev, &ofdev->dev);
1292 
1293 	spin_lock_init(&priv->bflock);
1294 	INIT_WORK(&priv->reset_task, gfar_reset_task);
1295 
1296 	platform_set_drvdata(ofdev, priv);
1297 
1298 	gfar_detect_errata(priv);
1299 
1300 	/* Set the dev->base_addr to the gfar reg region */
1301 	dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1302 
1303 	/* Fill in the dev structure */
1304 	dev->watchdog_timeo = TX_TIMEOUT;
1305 	dev->mtu = 1500;
1306 	dev->netdev_ops = &gfar_netdev_ops;
1307 	dev->ethtool_ops = &gfar_ethtool_ops;
1308 
1309 	/* Register for napi ...We are registering NAPI for each grp */
1310 	for (i = 0; i < priv->num_grps; i++) {
1311 		if (priv->poll_mode == GFAR_SQ_POLLING) {
1312 			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1313 				       gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
1314 			netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1315 				       gfar_poll_tx_sq, 2);
1316 		} else {
1317 			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1318 				       gfar_poll_rx, GFAR_DEV_WEIGHT);
1319 			netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1320 				       gfar_poll_tx, 2);
1321 		}
1322 	}
1323 
1324 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1325 		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1326 				   NETIF_F_RXCSUM;
1327 		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1328 				 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1329 	}
1330 
1331 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1332 		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1333 				    NETIF_F_HW_VLAN_CTAG_RX;
1334 		dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1335 	}
1336 
1337 	gfar_init_addr_hash_table(priv);
1338 
1339 	/* Insert receive time stamps into padding alignment bytes */
1340 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1341 		priv->padding = 8;
1342 
1343 	if (dev->features & NETIF_F_IP_CSUM ||
1344 	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1345 		dev->needed_headroom = GMAC_FCB_LEN;
1346 
1347 	priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1348 
1349 	/* Initializing some of the rx/tx queue level parameters */
1350 	for (i = 0; i < priv->num_tx_queues; i++) {
1351 		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1352 		priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1353 		priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1354 		priv->tx_queue[i]->txic = DEFAULT_TXIC;
1355 	}
1356 
1357 	for (i = 0; i < priv->num_rx_queues; i++) {
1358 		priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1359 		priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1360 		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1361 	}
1362 
1363 	/* always enable rx filer */
1364 	priv->rx_filer_enable = 1;
1365 	/* Enable most messages by default */
1366 	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1367 	/* use pritority h/w tx queue scheduling for single queue devices */
1368 	if (priv->num_tx_queues == 1)
1369 		priv->prio_sched_en = 1;
1370 
1371 	set_bit(GFAR_DOWN, &priv->state);
1372 
1373 	gfar_hw_init(priv);
1374 
1375 	err = register_netdev(dev);
1376 
1377 	if (err) {
1378 		pr_err("%s: Cannot register net device, aborting\n", dev->name);
1379 		goto register_fail;
1380 	}
1381 
1382 	/* Carrier starts down, phylib will bring it up */
1383 	netif_carrier_off(dev);
1384 
1385 	device_init_wakeup(&dev->dev,
1386 			   priv->device_flags &
1387 			   FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1388 
1389 	/* fill out IRQ number and name fields */
1390 	for (i = 0; i < priv->num_grps; i++) {
1391 		struct gfar_priv_grp *grp = &priv->gfargrp[i];
1392 		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1393 			sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
1394 				dev->name, "_g", '0' + i, "_tx");
1395 			sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
1396 				dev->name, "_g", '0' + i, "_rx");
1397 			sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
1398 				dev->name, "_g", '0' + i, "_er");
1399 		} else
1400 			strcpy(gfar_irq(grp, TX)->name, dev->name);
1401 	}
1402 
1403 	/* Initialize the filer table */
1404 	gfar_init_filer_table(priv);
1405 
1406 	/* Print out the device info */
1407 	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1408 
1409 	/* Even more device info helps when determining which kernel
1410 	 * provided which set of benchmarks.
1411 	 */
1412 	netdev_info(dev, "Running with NAPI enabled\n");
1413 	for (i = 0; i < priv->num_rx_queues; i++)
1414 		netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1415 			    i, priv->rx_queue[i]->rx_ring_size);
1416 	for (i = 0; i < priv->num_tx_queues; i++)
1417 		netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1418 			    i, priv->tx_queue[i]->tx_ring_size);
1419 
1420 	return 0;
1421 
1422 register_fail:
1423 	unmap_group_regs(priv);
1424 	gfar_free_rx_queues(priv);
1425 	gfar_free_tx_queues(priv);
1426 	if (priv->phy_node)
1427 		of_node_put(priv->phy_node);
1428 	if (priv->tbi_node)
1429 		of_node_put(priv->tbi_node);
1430 	free_gfar_dev(priv);
1431 	return err;
1432 }
1433 
1434 static int gfar_remove(struct platform_device *ofdev)
1435 {
1436 	struct gfar_private *priv = platform_get_drvdata(ofdev);
1437 
1438 	if (priv->phy_node)
1439 		of_node_put(priv->phy_node);
1440 	if (priv->tbi_node)
1441 		of_node_put(priv->tbi_node);
1442 
1443 	unregister_netdev(priv->ndev);
1444 	unmap_group_regs(priv);
1445 	gfar_free_rx_queues(priv);
1446 	gfar_free_tx_queues(priv);
1447 	free_gfar_dev(priv);
1448 
1449 	return 0;
1450 }
1451 
1452 #ifdef CONFIG_PM
1453 
1454 static int gfar_suspend(struct device *dev)
1455 {
1456 	struct gfar_private *priv = dev_get_drvdata(dev);
1457 	struct net_device *ndev = priv->ndev;
1458 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1459 	unsigned long flags;
1460 	u32 tempval;
1461 
1462 	int magic_packet = priv->wol_en &&
1463 			   (priv->device_flags &
1464 			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1465 
1466 	netif_device_detach(ndev);
1467 
1468 	if (netif_running(ndev)) {
1469 
1470 		local_irq_save(flags);
1471 		lock_tx_qs(priv);
1472 
1473 		gfar_halt_nodisable(priv);
1474 
1475 		/* Disable Tx, and Rx if wake-on-LAN is disabled. */
1476 		tempval = gfar_read(&regs->maccfg1);
1477 
1478 		tempval &= ~MACCFG1_TX_EN;
1479 
1480 		if (!magic_packet)
1481 			tempval &= ~MACCFG1_RX_EN;
1482 
1483 		gfar_write(&regs->maccfg1, tempval);
1484 
1485 		unlock_tx_qs(priv);
1486 		local_irq_restore(flags);
1487 
1488 		disable_napi(priv);
1489 
1490 		if (magic_packet) {
1491 			/* Enable interrupt on Magic Packet */
1492 			gfar_write(&regs->imask, IMASK_MAG);
1493 
1494 			/* Enable Magic Packet mode */
1495 			tempval = gfar_read(&regs->maccfg2);
1496 			tempval |= MACCFG2_MPEN;
1497 			gfar_write(&regs->maccfg2, tempval);
1498 		} else {
1499 			phy_stop(priv->phydev);
1500 		}
1501 	}
1502 
1503 	return 0;
1504 }
1505 
1506 static int gfar_resume(struct device *dev)
1507 {
1508 	struct gfar_private *priv = dev_get_drvdata(dev);
1509 	struct net_device *ndev = priv->ndev;
1510 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1511 	unsigned long flags;
1512 	u32 tempval;
1513 	int magic_packet = priv->wol_en &&
1514 			   (priv->device_flags &
1515 			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1516 
1517 	if (!netif_running(ndev)) {
1518 		netif_device_attach(ndev);
1519 		return 0;
1520 	}
1521 
1522 	if (!magic_packet && priv->phydev)
1523 		phy_start(priv->phydev);
1524 
1525 	/* Disable Magic Packet mode, in case something
1526 	 * else woke us up.
1527 	 */
1528 	local_irq_save(flags);
1529 	lock_tx_qs(priv);
1530 
1531 	tempval = gfar_read(&regs->maccfg2);
1532 	tempval &= ~MACCFG2_MPEN;
1533 	gfar_write(&regs->maccfg2, tempval);
1534 
1535 	gfar_start(priv);
1536 
1537 	unlock_tx_qs(priv);
1538 	local_irq_restore(flags);
1539 
1540 	netif_device_attach(ndev);
1541 
1542 	enable_napi(priv);
1543 
1544 	return 0;
1545 }
1546 
1547 static int gfar_restore(struct device *dev)
1548 {
1549 	struct gfar_private *priv = dev_get_drvdata(dev);
1550 	struct net_device *ndev = priv->ndev;
1551 
1552 	if (!netif_running(ndev)) {
1553 		netif_device_attach(ndev);
1554 
1555 		return 0;
1556 	}
1557 
1558 	if (gfar_init_bds(ndev)) {
1559 		free_skb_resources(priv);
1560 		return -ENOMEM;
1561 	}
1562 
1563 	gfar_mac_reset(priv);
1564 
1565 	gfar_init_tx_rx_base(priv);
1566 
1567 	gfar_start(priv);
1568 
1569 	priv->oldlink = 0;
1570 	priv->oldspeed = 0;
1571 	priv->oldduplex = -1;
1572 
1573 	if (priv->phydev)
1574 		phy_start(priv->phydev);
1575 
1576 	netif_device_attach(ndev);
1577 	enable_napi(priv);
1578 
1579 	return 0;
1580 }
1581 
1582 static struct dev_pm_ops gfar_pm_ops = {
1583 	.suspend = gfar_suspend,
1584 	.resume = gfar_resume,
1585 	.freeze = gfar_suspend,
1586 	.thaw = gfar_resume,
1587 	.restore = gfar_restore,
1588 };
1589 
1590 #define GFAR_PM_OPS (&gfar_pm_ops)
1591 
1592 #else
1593 
1594 #define GFAR_PM_OPS NULL
1595 
1596 #endif
1597 
1598 /* Reads the controller's registers to determine what interface
1599  * connects it to the PHY.
1600  */
1601 static phy_interface_t gfar_get_interface(struct net_device *dev)
1602 {
1603 	struct gfar_private *priv = netdev_priv(dev);
1604 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1605 	u32 ecntrl;
1606 
1607 	ecntrl = gfar_read(&regs->ecntrl);
1608 
1609 	if (ecntrl & ECNTRL_SGMII_MODE)
1610 		return PHY_INTERFACE_MODE_SGMII;
1611 
1612 	if (ecntrl & ECNTRL_TBI_MODE) {
1613 		if (ecntrl & ECNTRL_REDUCED_MODE)
1614 			return PHY_INTERFACE_MODE_RTBI;
1615 		else
1616 			return PHY_INTERFACE_MODE_TBI;
1617 	}
1618 
1619 	if (ecntrl & ECNTRL_REDUCED_MODE) {
1620 		if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1621 			return PHY_INTERFACE_MODE_RMII;
1622 		}
1623 		else {
1624 			phy_interface_t interface = priv->interface;
1625 
1626 			/* This isn't autodetected right now, so it must
1627 			 * be set by the device tree or platform code.
1628 			 */
1629 			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1630 				return PHY_INTERFACE_MODE_RGMII_ID;
1631 
1632 			return PHY_INTERFACE_MODE_RGMII;
1633 		}
1634 	}
1635 
1636 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1637 		return PHY_INTERFACE_MODE_GMII;
1638 
1639 	return PHY_INTERFACE_MODE_MII;
1640 }
1641 
1642 
1643 /* Initializes driver's PHY state, and attaches to the PHY.
1644  * Returns 0 on success.
1645  */
1646 static int init_phy(struct net_device *dev)
1647 {
1648 	struct gfar_private *priv = netdev_priv(dev);
1649 	uint gigabit_support =
1650 		priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1651 		GFAR_SUPPORTED_GBIT : 0;
1652 	phy_interface_t interface;
1653 
1654 	priv->oldlink = 0;
1655 	priv->oldspeed = 0;
1656 	priv->oldduplex = -1;
1657 
1658 	interface = gfar_get_interface(dev);
1659 
1660 	priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1661 				      interface);
1662 	if (!priv->phydev)
1663 		priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1664 							 interface);
1665 	if (!priv->phydev) {
1666 		dev_err(&dev->dev, "could not attach to PHY\n");
1667 		return -ENODEV;
1668 	}
1669 
1670 	if (interface == PHY_INTERFACE_MODE_SGMII)
1671 		gfar_configure_serdes(dev);
1672 
1673 	/* Remove any features not supported by the controller */
1674 	priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1675 	priv->phydev->advertising = priv->phydev->supported;
1676 
1677 	return 0;
1678 }
1679 
1680 /* Initialize TBI PHY interface for communicating with the
1681  * SERDES lynx PHY on the chip.  We communicate with this PHY
1682  * through the MDIO bus on each controller, treating it as a
1683  * "normal" PHY at the address found in the TBIPA register.  We assume
1684  * that the TBIPA register is valid.  Either the MDIO bus code will set
1685  * it to a value that doesn't conflict with other PHYs on the bus, or the
1686  * value doesn't matter, as there are no other PHYs on the bus.
1687  */
1688 static void gfar_configure_serdes(struct net_device *dev)
1689 {
1690 	struct gfar_private *priv = netdev_priv(dev);
1691 	struct phy_device *tbiphy;
1692 
1693 	if (!priv->tbi_node) {
1694 		dev_warn(&dev->dev, "error: SGMII mode requires that the "
1695 				    "device tree specify a tbi-handle\n");
1696 		return;
1697 	}
1698 
1699 	tbiphy = of_phy_find_device(priv->tbi_node);
1700 	if (!tbiphy) {
1701 		dev_err(&dev->dev, "error: Could not get TBI device\n");
1702 		return;
1703 	}
1704 
1705 	/* If the link is already up, we must already be ok, and don't need to
1706 	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
1707 	 * everything for us?  Resetting it takes the link down and requires
1708 	 * several seconds for it to come back.
1709 	 */
1710 	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1711 		return;
1712 
1713 	/* Single clk mode, mii mode off(for serdes communication) */
1714 	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1715 
1716 	phy_write(tbiphy, MII_ADVERTISE,
1717 		  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1718 		  ADVERTISE_1000XPSE_ASYM);
1719 
1720 	phy_write(tbiphy, MII_BMCR,
1721 		  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1722 		  BMCR_SPEED1000);
1723 }
1724 
1725 static int __gfar_is_rx_idle(struct gfar_private *priv)
1726 {
1727 	u32 res;
1728 
1729 	/* Normaly TSEC should not hang on GRS commands, so we should
1730 	 * actually wait for IEVENT_GRSC flag.
1731 	 */
1732 	if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1733 		return 0;
1734 
1735 	/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1736 	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1737 	 * and the Rx can be safely reset.
1738 	 */
1739 	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1740 	res &= 0x7f807f80;
1741 	if ((res & 0xffff) == (res >> 16))
1742 		return 1;
1743 
1744 	return 0;
1745 }
1746 
1747 /* Halt the receive and transmit queues */
1748 static void gfar_halt_nodisable(struct gfar_private *priv)
1749 {
1750 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1751 	u32 tempval;
1752 
1753 	gfar_ints_disable(priv);
1754 
1755 	/* Stop the DMA, and wait for it to stop */
1756 	tempval = gfar_read(&regs->dmactrl);
1757 	if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
1758 	    (DMACTRL_GRS | DMACTRL_GTS)) {
1759 		int ret;
1760 
1761 		tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1762 		gfar_write(&regs->dmactrl, tempval);
1763 
1764 		do {
1765 			ret = spin_event_timeout(((gfar_read(&regs->ievent) &
1766 				 (IEVENT_GRSC | IEVENT_GTSC)) ==
1767 				 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1768 			if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
1769 				ret = __gfar_is_rx_idle(priv);
1770 		} while (!ret);
1771 	}
1772 }
1773 
1774 /* Halt the receive and transmit queues */
1775 void gfar_halt(struct gfar_private *priv)
1776 {
1777 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1778 	u32 tempval;
1779 
1780 	/* Dissable the Rx/Tx hw queues */
1781 	gfar_write(&regs->rqueue, 0);
1782 	gfar_write(&regs->tqueue, 0);
1783 
1784 	mdelay(10);
1785 
1786 	gfar_halt_nodisable(priv);
1787 
1788 	/* Disable Rx/Tx DMA */
1789 	tempval = gfar_read(&regs->maccfg1);
1790 	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1791 	gfar_write(&regs->maccfg1, tempval);
1792 }
1793 
1794 void stop_gfar(struct net_device *dev)
1795 {
1796 	struct gfar_private *priv = netdev_priv(dev);
1797 
1798 	netif_tx_stop_all_queues(dev);
1799 
1800 	smp_mb__before_clear_bit();
1801 	set_bit(GFAR_DOWN, &priv->state);
1802 	smp_mb__after_clear_bit();
1803 
1804 	disable_napi(priv);
1805 
1806 	/* disable ints and gracefully shut down Rx/Tx DMA */
1807 	gfar_halt(priv);
1808 
1809 	phy_stop(priv->phydev);
1810 
1811 	free_skb_resources(priv);
1812 }
1813 
1814 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1815 {
1816 	struct txbd8 *txbdp;
1817 	struct gfar_private *priv = netdev_priv(tx_queue->dev);
1818 	int i, j;
1819 
1820 	txbdp = tx_queue->tx_bd_base;
1821 
1822 	for (i = 0; i < tx_queue->tx_ring_size; i++) {
1823 		if (!tx_queue->tx_skbuff[i])
1824 			continue;
1825 
1826 		dma_unmap_single(priv->dev, txbdp->bufPtr,
1827 				 txbdp->length, DMA_TO_DEVICE);
1828 		txbdp->lstatus = 0;
1829 		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1830 		     j++) {
1831 			txbdp++;
1832 			dma_unmap_page(priv->dev, txbdp->bufPtr,
1833 				       txbdp->length, DMA_TO_DEVICE);
1834 		}
1835 		txbdp++;
1836 		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1837 		tx_queue->tx_skbuff[i] = NULL;
1838 	}
1839 	kfree(tx_queue->tx_skbuff);
1840 	tx_queue->tx_skbuff = NULL;
1841 }
1842 
1843 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1844 {
1845 	struct rxbd8 *rxbdp;
1846 	struct gfar_private *priv = netdev_priv(rx_queue->dev);
1847 	int i;
1848 
1849 	rxbdp = rx_queue->rx_bd_base;
1850 
1851 	for (i = 0; i < rx_queue->rx_ring_size; i++) {
1852 		if (rx_queue->rx_skbuff[i]) {
1853 			dma_unmap_single(priv->dev, rxbdp->bufPtr,
1854 					 priv->rx_buffer_size,
1855 					 DMA_FROM_DEVICE);
1856 			dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1857 			rx_queue->rx_skbuff[i] = NULL;
1858 		}
1859 		rxbdp->lstatus = 0;
1860 		rxbdp->bufPtr = 0;
1861 		rxbdp++;
1862 	}
1863 	kfree(rx_queue->rx_skbuff);
1864 	rx_queue->rx_skbuff = NULL;
1865 }
1866 
1867 /* If there are any tx skbs or rx skbs still around, free them.
1868  * Then free tx_skbuff and rx_skbuff
1869  */
1870 static void free_skb_resources(struct gfar_private *priv)
1871 {
1872 	struct gfar_priv_tx_q *tx_queue = NULL;
1873 	struct gfar_priv_rx_q *rx_queue = NULL;
1874 	int i;
1875 
1876 	/* Go through all the buffer descriptors and free their data buffers */
1877 	for (i = 0; i < priv->num_tx_queues; i++) {
1878 		struct netdev_queue *txq;
1879 
1880 		tx_queue = priv->tx_queue[i];
1881 		txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1882 		if (tx_queue->tx_skbuff)
1883 			free_skb_tx_queue(tx_queue);
1884 		netdev_tx_reset_queue(txq);
1885 	}
1886 
1887 	for (i = 0; i < priv->num_rx_queues; i++) {
1888 		rx_queue = priv->rx_queue[i];
1889 		if (rx_queue->rx_skbuff)
1890 			free_skb_rx_queue(rx_queue);
1891 	}
1892 
1893 	dma_free_coherent(priv->dev,
1894 			  sizeof(struct txbd8) * priv->total_tx_ring_size +
1895 			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
1896 			  priv->tx_queue[0]->tx_bd_base,
1897 			  priv->tx_queue[0]->tx_bd_dma_base);
1898 }
1899 
1900 void gfar_start(struct gfar_private *priv)
1901 {
1902 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1903 	u32 tempval;
1904 	int i = 0;
1905 
1906 	/* Enable Rx/Tx hw queues */
1907 	gfar_write(&regs->rqueue, priv->rqueue);
1908 	gfar_write(&regs->tqueue, priv->tqueue);
1909 
1910 	/* Initialize DMACTRL to have WWR and WOP */
1911 	tempval = gfar_read(&regs->dmactrl);
1912 	tempval |= DMACTRL_INIT_SETTINGS;
1913 	gfar_write(&regs->dmactrl, tempval);
1914 
1915 	/* Make sure we aren't stopped */
1916 	tempval = gfar_read(&regs->dmactrl);
1917 	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1918 	gfar_write(&regs->dmactrl, tempval);
1919 
1920 	for (i = 0; i < priv->num_grps; i++) {
1921 		regs = priv->gfargrp[i].regs;
1922 		/* Clear THLT/RHLT, so that the DMA starts polling now */
1923 		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1924 		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1925 	}
1926 
1927 	/* Enable Rx/Tx DMA */
1928 	tempval = gfar_read(&regs->maccfg1);
1929 	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1930 	gfar_write(&regs->maccfg1, tempval);
1931 
1932 	gfar_ints_enable(priv);
1933 
1934 	priv->ndev->trans_start = jiffies; /* prevent tx timeout */
1935 }
1936 
1937 static void free_grp_irqs(struct gfar_priv_grp *grp)
1938 {
1939 	free_irq(gfar_irq(grp, TX)->irq, grp);
1940 	free_irq(gfar_irq(grp, RX)->irq, grp);
1941 	free_irq(gfar_irq(grp, ER)->irq, grp);
1942 }
1943 
1944 static int register_grp_irqs(struct gfar_priv_grp *grp)
1945 {
1946 	struct gfar_private *priv = grp->priv;
1947 	struct net_device *dev = priv->ndev;
1948 	int err;
1949 
1950 	/* If the device has multiple interrupts, register for
1951 	 * them.  Otherwise, only register for the one
1952 	 */
1953 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1954 		/* Install our interrupt handlers for Error,
1955 		 * Transmit, and Receive
1956 		 */
1957 		err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
1958 				  gfar_irq(grp, ER)->name, grp);
1959 		if (err < 0) {
1960 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1961 				  gfar_irq(grp, ER)->irq);
1962 
1963 			goto err_irq_fail;
1964 		}
1965 		err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
1966 				  gfar_irq(grp, TX)->name, grp);
1967 		if (err < 0) {
1968 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1969 				  gfar_irq(grp, TX)->irq);
1970 			goto tx_irq_fail;
1971 		}
1972 		err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
1973 				  gfar_irq(grp, RX)->name, grp);
1974 		if (err < 0) {
1975 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1976 				  gfar_irq(grp, RX)->irq);
1977 			goto rx_irq_fail;
1978 		}
1979 	} else {
1980 		err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
1981 				  gfar_irq(grp, TX)->name, grp);
1982 		if (err < 0) {
1983 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1984 				  gfar_irq(grp, TX)->irq);
1985 			goto err_irq_fail;
1986 		}
1987 	}
1988 
1989 	return 0;
1990 
1991 rx_irq_fail:
1992 	free_irq(gfar_irq(grp, TX)->irq, grp);
1993 tx_irq_fail:
1994 	free_irq(gfar_irq(grp, ER)->irq, grp);
1995 err_irq_fail:
1996 	return err;
1997 
1998 }
1999 
2000 static void gfar_free_irq(struct gfar_private *priv)
2001 {
2002 	int i;
2003 
2004 	/* Free the IRQs */
2005 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2006 		for (i = 0; i < priv->num_grps; i++)
2007 			free_grp_irqs(&priv->gfargrp[i]);
2008 	} else {
2009 		for (i = 0; i < priv->num_grps; i++)
2010 			free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2011 				 &priv->gfargrp[i]);
2012 	}
2013 }
2014 
2015 static int gfar_request_irq(struct gfar_private *priv)
2016 {
2017 	int err, i, j;
2018 
2019 	for (i = 0; i < priv->num_grps; i++) {
2020 		err = register_grp_irqs(&priv->gfargrp[i]);
2021 		if (err) {
2022 			for (j = 0; j < i; j++)
2023 				free_grp_irqs(&priv->gfargrp[j]);
2024 			return err;
2025 		}
2026 	}
2027 
2028 	return 0;
2029 }
2030 
2031 /* Bring the controller up and running */
2032 int startup_gfar(struct net_device *ndev)
2033 {
2034 	struct gfar_private *priv = netdev_priv(ndev);
2035 	int err;
2036 
2037 	gfar_mac_reset(priv);
2038 
2039 	err = gfar_alloc_skb_resources(ndev);
2040 	if (err)
2041 		return err;
2042 
2043 	gfar_init_tx_rx_base(priv);
2044 
2045 	smp_mb__before_clear_bit();
2046 	clear_bit(GFAR_DOWN, &priv->state);
2047 	smp_mb__after_clear_bit();
2048 
2049 	/* Start Rx/Tx DMA and enable the interrupts */
2050 	gfar_start(priv);
2051 
2052 	phy_start(priv->phydev);
2053 
2054 	enable_napi(priv);
2055 
2056 	netif_tx_wake_all_queues(ndev);
2057 
2058 	return 0;
2059 }
2060 
2061 /* Called when something needs to use the ethernet device
2062  * Returns 0 for success.
2063  */
2064 static int gfar_enet_open(struct net_device *dev)
2065 {
2066 	struct gfar_private *priv = netdev_priv(dev);
2067 	int err;
2068 
2069 	err = init_phy(dev);
2070 	if (err)
2071 		return err;
2072 
2073 	err = gfar_request_irq(priv);
2074 	if (err)
2075 		return err;
2076 
2077 	err = startup_gfar(dev);
2078 	if (err)
2079 		return err;
2080 
2081 	device_set_wakeup_enable(&dev->dev, priv->wol_en);
2082 
2083 	return err;
2084 }
2085 
2086 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
2087 {
2088 	struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
2089 
2090 	memset(fcb, 0, GMAC_FCB_LEN);
2091 
2092 	return fcb;
2093 }
2094 
2095 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2096 				    int fcb_length)
2097 {
2098 	/* If we're here, it's a IP packet with a TCP or UDP
2099 	 * payload.  We set it to checksum, using a pseudo-header
2100 	 * we provide
2101 	 */
2102 	u8 flags = TXFCB_DEFAULT;
2103 
2104 	/* Tell the controller what the protocol is
2105 	 * And provide the already calculated phcs
2106 	 */
2107 	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2108 		flags |= TXFCB_UDP;
2109 		fcb->phcs = udp_hdr(skb)->check;
2110 	} else
2111 		fcb->phcs = tcp_hdr(skb)->check;
2112 
2113 	/* l3os is the distance between the start of the
2114 	 * frame (skb->data) and the start of the IP hdr.
2115 	 * l4os is the distance between the start of the
2116 	 * l3 hdr and the l4 hdr
2117 	 */
2118 	fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
2119 	fcb->l4os = skb_network_header_len(skb);
2120 
2121 	fcb->flags = flags;
2122 }
2123 
2124 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2125 {
2126 	fcb->flags |= TXFCB_VLN;
2127 	fcb->vlctl = vlan_tx_tag_get(skb);
2128 }
2129 
2130 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2131 				      struct txbd8 *base, int ring_size)
2132 {
2133 	struct txbd8 *new_bd = bdp + stride;
2134 
2135 	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2136 }
2137 
2138 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2139 				      int ring_size)
2140 {
2141 	return skip_txbd(bdp, 1, base, ring_size);
2142 }
2143 
2144 /* eTSEC12: csum generation not supported for some fcb offsets */
2145 static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2146 				       unsigned long fcb_addr)
2147 {
2148 	return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2149 	       (fcb_addr % 0x20) > 0x18);
2150 }
2151 
2152 /* eTSEC76: csum generation for frames larger than 2500 may
2153  * cause excess delays before start of transmission
2154  */
2155 static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2156 				       unsigned int len)
2157 {
2158 	return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2159 	       (len > 2500));
2160 }
2161 
2162 /* This is called by the kernel when a frame is ready for transmission.
2163  * It is pointed to by the dev->hard_start_xmit function pointer
2164  */
2165 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2166 {
2167 	struct gfar_private *priv = netdev_priv(dev);
2168 	struct gfar_priv_tx_q *tx_queue = NULL;
2169 	struct netdev_queue *txq;
2170 	struct gfar __iomem *regs = NULL;
2171 	struct txfcb *fcb = NULL;
2172 	struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2173 	u32 lstatus;
2174 	int i, rq = 0;
2175 	int do_tstamp, do_csum, do_vlan;
2176 	u32 bufaddr;
2177 	unsigned long flags;
2178 	unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
2179 
2180 	rq = skb->queue_mapping;
2181 	tx_queue = priv->tx_queue[rq];
2182 	txq = netdev_get_tx_queue(dev, rq);
2183 	base = tx_queue->tx_bd_base;
2184 	regs = tx_queue->grp->regs;
2185 
2186 	do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
2187 	do_vlan = vlan_tx_tag_present(skb);
2188 	do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2189 		    priv->hwts_tx_en;
2190 
2191 	if (do_csum || do_vlan)
2192 		fcb_len = GMAC_FCB_LEN;
2193 
2194 	/* check if time stamp should be generated */
2195 	if (unlikely(do_tstamp))
2196 		fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2197 
2198 	/* make space for additional header when fcb is needed */
2199 	if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
2200 		struct sk_buff *skb_new;
2201 
2202 		skb_new = skb_realloc_headroom(skb, fcb_len);
2203 		if (!skb_new) {
2204 			dev->stats.tx_errors++;
2205 			dev_kfree_skb_any(skb);
2206 			return NETDEV_TX_OK;
2207 		}
2208 
2209 		if (skb->sk)
2210 			skb_set_owner_w(skb_new, skb->sk);
2211 		dev_consume_skb_any(skb);
2212 		skb = skb_new;
2213 	}
2214 
2215 	/* total number of fragments in the SKB */
2216 	nr_frags = skb_shinfo(skb)->nr_frags;
2217 
2218 	/* calculate the required number of TxBDs for this skb */
2219 	if (unlikely(do_tstamp))
2220 		nr_txbds = nr_frags + 2;
2221 	else
2222 		nr_txbds = nr_frags + 1;
2223 
2224 	/* check if there is space to queue this packet */
2225 	if (nr_txbds > tx_queue->num_txbdfree) {
2226 		/* no space, stop the queue */
2227 		netif_tx_stop_queue(txq);
2228 		dev->stats.tx_fifo_errors++;
2229 		return NETDEV_TX_BUSY;
2230 	}
2231 
2232 	/* Update transmit stats */
2233 	bytes_sent = skb->len;
2234 	tx_queue->stats.tx_bytes += bytes_sent;
2235 	/* keep Tx bytes on wire for BQL accounting */
2236 	GFAR_CB(skb)->bytes_sent = bytes_sent;
2237 	tx_queue->stats.tx_packets++;
2238 
2239 	txbdp = txbdp_start = tx_queue->cur_tx;
2240 	lstatus = txbdp->lstatus;
2241 
2242 	/* Time stamp insertion requires one additional TxBD */
2243 	if (unlikely(do_tstamp))
2244 		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2245 						 tx_queue->tx_ring_size);
2246 
2247 	if (nr_frags == 0) {
2248 		if (unlikely(do_tstamp))
2249 			txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2250 							  TXBD_INTERRUPT);
2251 		else
2252 			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2253 	} else {
2254 		/* Place the fragment addresses and lengths into the TxBDs */
2255 		for (i = 0; i < nr_frags; i++) {
2256 			unsigned int frag_len;
2257 			/* Point at the next BD, wrapping as needed */
2258 			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2259 
2260 			frag_len = skb_shinfo(skb)->frags[i].size;
2261 
2262 			lstatus = txbdp->lstatus | frag_len |
2263 				  BD_LFLAG(TXBD_READY);
2264 
2265 			/* Handle the last BD specially */
2266 			if (i == nr_frags - 1)
2267 				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2268 
2269 			bufaddr = skb_frag_dma_map(priv->dev,
2270 						   &skb_shinfo(skb)->frags[i],
2271 						   0,
2272 						   frag_len,
2273 						   DMA_TO_DEVICE);
2274 
2275 			/* set the TxBD length and buffer pointer */
2276 			txbdp->bufPtr = bufaddr;
2277 			txbdp->lstatus = lstatus;
2278 		}
2279 
2280 		lstatus = txbdp_start->lstatus;
2281 	}
2282 
2283 	/* Add TxPAL between FCB and frame if required */
2284 	if (unlikely(do_tstamp)) {
2285 		skb_push(skb, GMAC_TXPAL_LEN);
2286 		memset(skb->data, 0, GMAC_TXPAL_LEN);
2287 	}
2288 
2289 	/* Add TxFCB if required */
2290 	if (fcb_len) {
2291 		fcb = gfar_add_fcb(skb);
2292 		lstatus |= BD_LFLAG(TXBD_TOE);
2293 	}
2294 
2295 	/* Set up checksumming */
2296 	if (do_csum) {
2297 		gfar_tx_checksum(skb, fcb, fcb_len);
2298 
2299 		if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2300 		    unlikely(gfar_csum_errata_76(priv, skb->len))) {
2301 			__skb_pull(skb, GMAC_FCB_LEN);
2302 			skb_checksum_help(skb);
2303 			if (do_vlan || do_tstamp) {
2304 				/* put back a new fcb for vlan/tstamp TOE */
2305 				fcb = gfar_add_fcb(skb);
2306 			} else {
2307 				/* Tx TOE not used */
2308 				lstatus &= ~(BD_LFLAG(TXBD_TOE));
2309 				fcb = NULL;
2310 			}
2311 		}
2312 	}
2313 
2314 	if (do_vlan)
2315 		gfar_tx_vlan(skb, fcb);
2316 
2317 	/* Setup tx hardware time stamping if requested */
2318 	if (unlikely(do_tstamp)) {
2319 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2320 		fcb->ptp = 1;
2321 	}
2322 
2323 	txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
2324 					     skb_headlen(skb), DMA_TO_DEVICE);
2325 
2326 	/* If time stamping is requested one additional TxBD must be set up. The
2327 	 * first TxBD points to the FCB and must have a data length of
2328 	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2329 	 * the full frame length.
2330 	 */
2331 	if (unlikely(do_tstamp)) {
2332 		txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
2333 		txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2334 					 (skb_headlen(skb) - fcb_len);
2335 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2336 	} else {
2337 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2338 	}
2339 
2340 	netdev_tx_sent_queue(txq, bytes_sent);
2341 
2342 	/* We can work in parallel with gfar_clean_tx_ring(), except
2343 	 * when modifying num_txbdfree. Note that we didn't grab the lock
2344 	 * when we were reading the num_txbdfree and checking for available
2345 	 * space, that's because outside of this function it can only grow,
2346 	 * and once we've got needed space, it cannot suddenly disappear.
2347 	 *
2348 	 * The lock also protects us from gfar_error(), which can modify
2349 	 * regs->tstat and thus retrigger the transfers, which is why we
2350 	 * also must grab the lock before setting ready bit for the first
2351 	 * to be transmitted BD.
2352 	 */
2353 	spin_lock_irqsave(&tx_queue->txlock, flags);
2354 
2355 	/* The powerpc-specific eieio() is used, as wmb() has too strong
2356 	 * semantics (it requires synchronization between cacheable and
2357 	 * uncacheable mappings, which eieio doesn't provide and which we
2358 	 * don't need), thus requiring a more expensive sync instruction.  At
2359 	 * some point, the set of architecture-independent barrier functions
2360 	 * should be expanded to include weaker barriers.
2361 	 */
2362 	eieio();
2363 
2364 	txbdp_start->lstatus = lstatus;
2365 
2366 	eieio(); /* force lstatus write before tx_skbuff */
2367 
2368 	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2369 
2370 	/* Update the current skb pointer to the next entry we will use
2371 	 * (wrapping if necessary)
2372 	 */
2373 	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2374 			      TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2375 
2376 	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2377 
2378 	/* reduce TxBD free count */
2379 	tx_queue->num_txbdfree -= (nr_txbds);
2380 
2381 	/* If the next BD still needs to be cleaned up, then the bds
2382 	 * are full.  We need to tell the kernel to stop sending us stuff.
2383 	 */
2384 	if (!tx_queue->num_txbdfree) {
2385 		netif_tx_stop_queue(txq);
2386 
2387 		dev->stats.tx_fifo_errors++;
2388 	}
2389 
2390 	/* Tell the DMA to go go go */
2391 	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2392 
2393 	/* Unlock priv */
2394 	spin_unlock_irqrestore(&tx_queue->txlock, flags);
2395 
2396 	return NETDEV_TX_OK;
2397 }
2398 
2399 /* Stops the kernel queue, and halts the controller */
2400 static int gfar_close(struct net_device *dev)
2401 {
2402 	struct gfar_private *priv = netdev_priv(dev);
2403 
2404 	cancel_work_sync(&priv->reset_task);
2405 	stop_gfar(dev);
2406 
2407 	/* Disconnect from the PHY */
2408 	phy_disconnect(priv->phydev);
2409 	priv->phydev = NULL;
2410 
2411 	gfar_free_irq(priv);
2412 
2413 	return 0;
2414 }
2415 
2416 /* Changes the mac address if the controller is not running. */
2417 static int gfar_set_mac_address(struct net_device *dev)
2418 {
2419 	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2420 
2421 	return 0;
2422 }
2423 
2424 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2425 {
2426 	struct gfar_private *priv = netdev_priv(dev);
2427 	int frame_size = new_mtu + ETH_HLEN;
2428 
2429 	if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2430 		netif_err(priv, drv, dev, "Invalid MTU setting\n");
2431 		return -EINVAL;
2432 	}
2433 
2434 	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2435 		cpu_relax();
2436 
2437 	if (dev->flags & IFF_UP)
2438 		stop_gfar(dev);
2439 
2440 	dev->mtu = new_mtu;
2441 
2442 	if (dev->flags & IFF_UP)
2443 		startup_gfar(dev);
2444 
2445 	clear_bit_unlock(GFAR_RESETTING, &priv->state);
2446 
2447 	return 0;
2448 }
2449 
2450 void reset_gfar(struct net_device *ndev)
2451 {
2452 	struct gfar_private *priv = netdev_priv(ndev);
2453 
2454 	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2455 		cpu_relax();
2456 
2457 	stop_gfar(ndev);
2458 	startup_gfar(ndev);
2459 
2460 	clear_bit_unlock(GFAR_RESETTING, &priv->state);
2461 }
2462 
2463 /* gfar_reset_task gets scheduled when a packet has not been
2464  * transmitted after a set amount of time.
2465  * For now, assume that clearing out all the structures, and
2466  * starting over will fix the problem.
2467  */
2468 static void gfar_reset_task(struct work_struct *work)
2469 {
2470 	struct gfar_private *priv = container_of(work, struct gfar_private,
2471 						 reset_task);
2472 	reset_gfar(priv->ndev);
2473 }
2474 
2475 static void gfar_timeout(struct net_device *dev)
2476 {
2477 	struct gfar_private *priv = netdev_priv(dev);
2478 
2479 	dev->stats.tx_errors++;
2480 	schedule_work(&priv->reset_task);
2481 }
2482 
2483 static void gfar_align_skb(struct sk_buff *skb)
2484 {
2485 	/* We need the data buffer to be aligned properly.  We will reserve
2486 	 * as many bytes as needed to align the data properly
2487 	 */
2488 	skb_reserve(skb, RXBUF_ALIGNMENT -
2489 		    (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2490 }
2491 
2492 /* Interrupt Handler for Transmit complete */
2493 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2494 {
2495 	struct net_device *dev = tx_queue->dev;
2496 	struct netdev_queue *txq;
2497 	struct gfar_private *priv = netdev_priv(dev);
2498 	struct txbd8 *bdp, *next = NULL;
2499 	struct txbd8 *lbdp = NULL;
2500 	struct txbd8 *base = tx_queue->tx_bd_base;
2501 	struct sk_buff *skb;
2502 	int skb_dirtytx;
2503 	int tx_ring_size = tx_queue->tx_ring_size;
2504 	int frags = 0, nr_txbds = 0;
2505 	int i;
2506 	int howmany = 0;
2507 	int tqi = tx_queue->qindex;
2508 	unsigned int bytes_sent = 0;
2509 	u32 lstatus;
2510 	size_t buflen;
2511 
2512 	txq = netdev_get_tx_queue(dev, tqi);
2513 	bdp = tx_queue->dirty_tx;
2514 	skb_dirtytx = tx_queue->skb_dirtytx;
2515 
2516 	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2517 		unsigned long flags;
2518 
2519 		frags = skb_shinfo(skb)->nr_frags;
2520 
2521 		/* When time stamping, one additional TxBD must be freed.
2522 		 * Also, we need to dma_unmap_single() the TxPAL.
2523 		 */
2524 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2525 			nr_txbds = frags + 2;
2526 		else
2527 			nr_txbds = frags + 1;
2528 
2529 		lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2530 
2531 		lstatus = lbdp->lstatus;
2532 
2533 		/* Only clean completed frames */
2534 		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2535 		    (lstatus & BD_LENGTH_MASK))
2536 			break;
2537 
2538 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2539 			next = next_txbd(bdp, base, tx_ring_size);
2540 			buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2541 		} else
2542 			buflen = bdp->length;
2543 
2544 		dma_unmap_single(priv->dev, bdp->bufPtr,
2545 				 buflen, DMA_TO_DEVICE);
2546 
2547 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2548 			struct skb_shared_hwtstamps shhwtstamps;
2549 			u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2550 
2551 			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2552 			shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2553 			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2554 			skb_tstamp_tx(skb, &shhwtstamps);
2555 			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2556 			bdp = next;
2557 		}
2558 
2559 		bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2560 		bdp = next_txbd(bdp, base, tx_ring_size);
2561 
2562 		for (i = 0; i < frags; i++) {
2563 			dma_unmap_page(priv->dev, bdp->bufPtr,
2564 				       bdp->length, DMA_TO_DEVICE);
2565 			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2566 			bdp = next_txbd(bdp, base, tx_ring_size);
2567 		}
2568 
2569 		bytes_sent += GFAR_CB(skb)->bytes_sent;
2570 
2571 		dev_kfree_skb_any(skb);
2572 
2573 		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2574 
2575 		skb_dirtytx = (skb_dirtytx + 1) &
2576 			      TX_RING_MOD_MASK(tx_ring_size);
2577 
2578 		howmany++;
2579 		spin_lock_irqsave(&tx_queue->txlock, flags);
2580 		tx_queue->num_txbdfree += nr_txbds;
2581 		spin_unlock_irqrestore(&tx_queue->txlock, flags);
2582 	}
2583 
2584 	/* If we freed a buffer, we can restart transmission, if necessary */
2585 	if (tx_queue->num_txbdfree &&
2586 	    netif_tx_queue_stopped(txq) &&
2587 	    !(test_bit(GFAR_DOWN, &priv->state)))
2588 		netif_wake_subqueue(priv->ndev, tqi);
2589 
2590 	/* Update dirty indicators */
2591 	tx_queue->skb_dirtytx = skb_dirtytx;
2592 	tx_queue->dirty_tx = bdp;
2593 
2594 	netdev_tx_completed_queue(txq, howmany, bytes_sent);
2595 }
2596 
2597 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2598 			   struct sk_buff *skb)
2599 {
2600 	struct net_device *dev = rx_queue->dev;
2601 	struct gfar_private *priv = netdev_priv(dev);
2602 	dma_addr_t buf;
2603 
2604 	buf = dma_map_single(priv->dev, skb->data,
2605 			     priv->rx_buffer_size, DMA_FROM_DEVICE);
2606 	gfar_init_rxbdp(rx_queue, bdp, buf);
2607 }
2608 
2609 static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2610 {
2611 	struct gfar_private *priv = netdev_priv(dev);
2612 	struct sk_buff *skb;
2613 
2614 	skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2615 	if (!skb)
2616 		return NULL;
2617 
2618 	gfar_align_skb(skb);
2619 
2620 	return skb;
2621 }
2622 
2623 struct sk_buff *gfar_new_skb(struct net_device *dev)
2624 {
2625 	return gfar_alloc_skb(dev);
2626 }
2627 
2628 static inline void count_errors(unsigned short status, struct net_device *dev)
2629 {
2630 	struct gfar_private *priv = netdev_priv(dev);
2631 	struct net_device_stats *stats = &dev->stats;
2632 	struct gfar_extra_stats *estats = &priv->extra_stats;
2633 
2634 	/* If the packet was truncated, none of the other errors matter */
2635 	if (status & RXBD_TRUNCATED) {
2636 		stats->rx_length_errors++;
2637 
2638 		atomic64_inc(&estats->rx_trunc);
2639 
2640 		return;
2641 	}
2642 	/* Count the errors, if there were any */
2643 	if (status & (RXBD_LARGE | RXBD_SHORT)) {
2644 		stats->rx_length_errors++;
2645 
2646 		if (status & RXBD_LARGE)
2647 			atomic64_inc(&estats->rx_large);
2648 		else
2649 			atomic64_inc(&estats->rx_short);
2650 	}
2651 	if (status & RXBD_NONOCTET) {
2652 		stats->rx_frame_errors++;
2653 		atomic64_inc(&estats->rx_nonoctet);
2654 	}
2655 	if (status & RXBD_CRCERR) {
2656 		atomic64_inc(&estats->rx_crcerr);
2657 		stats->rx_crc_errors++;
2658 	}
2659 	if (status & RXBD_OVERRUN) {
2660 		atomic64_inc(&estats->rx_overrun);
2661 		stats->rx_crc_errors++;
2662 	}
2663 }
2664 
2665 irqreturn_t gfar_receive(int irq, void *grp_id)
2666 {
2667 	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2668 	unsigned long flags;
2669 	u32 imask;
2670 
2671 	if (likely(napi_schedule_prep(&grp->napi_rx))) {
2672 		spin_lock_irqsave(&grp->grplock, flags);
2673 		imask = gfar_read(&grp->regs->imask);
2674 		imask &= IMASK_RX_DISABLED;
2675 		gfar_write(&grp->regs->imask, imask);
2676 		spin_unlock_irqrestore(&grp->grplock, flags);
2677 		__napi_schedule(&grp->napi_rx);
2678 	} else {
2679 		/* Clear IEVENT, so interrupts aren't called again
2680 		 * because of the packets that have already arrived.
2681 		 */
2682 		gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2683 	}
2684 
2685 	return IRQ_HANDLED;
2686 }
2687 
2688 /* Interrupt Handler for Transmit complete */
2689 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2690 {
2691 	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2692 	unsigned long flags;
2693 	u32 imask;
2694 
2695 	if (likely(napi_schedule_prep(&grp->napi_tx))) {
2696 		spin_lock_irqsave(&grp->grplock, flags);
2697 		imask = gfar_read(&grp->regs->imask);
2698 		imask &= IMASK_TX_DISABLED;
2699 		gfar_write(&grp->regs->imask, imask);
2700 		spin_unlock_irqrestore(&grp->grplock, flags);
2701 		__napi_schedule(&grp->napi_tx);
2702 	} else {
2703 		/* Clear IEVENT, so interrupts aren't called again
2704 		 * because of the packets that have already arrived.
2705 		 */
2706 		gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2707 	}
2708 
2709 	return IRQ_HANDLED;
2710 }
2711 
2712 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2713 {
2714 	/* If valid headers were found, and valid sums
2715 	 * were verified, then we tell the kernel that no
2716 	 * checksumming is necessary.  Otherwise, it is [FIXME]
2717 	 */
2718 	if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2719 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2720 	else
2721 		skb_checksum_none_assert(skb);
2722 }
2723 
2724 
2725 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2726 static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2727 			       int amount_pull, struct napi_struct *napi)
2728 {
2729 	struct gfar_private *priv = netdev_priv(dev);
2730 	struct rxfcb *fcb = NULL;
2731 
2732 	/* fcb is at the beginning if exists */
2733 	fcb = (struct rxfcb *)skb->data;
2734 
2735 	/* Remove the FCB from the skb
2736 	 * Remove the padded bytes, if there are any
2737 	 */
2738 	if (amount_pull) {
2739 		skb_record_rx_queue(skb, fcb->rq);
2740 		skb_pull(skb, amount_pull);
2741 	}
2742 
2743 	/* Get receive timestamp from the skb */
2744 	if (priv->hwts_rx_en) {
2745 		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2746 		u64 *ns = (u64 *) skb->data;
2747 
2748 		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2749 		shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2750 	}
2751 
2752 	if (priv->padding)
2753 		skb_pull(skb, priv->padding);
2754 
2755 	if (dev->features & NETIF_F_RXCSUM)
2756 		gfar_rx_checksum(skb, fcb);
2757 
2758 	/* Tell the skb what kind of packet this is */
2759 	skb->protocol = eth_type_trans(skb, dev);
2760 
2761 	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2762 	 * Even if vlan rx accel is disabled, on some chips
2763 	 * RXFCB_VLN is pseudo randomly set.
2764 	 */
2765 	if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2766 	    fcb->flags & RXFCB_VLN)
2767 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl);
2768 
2769 	/* Send the packet up the stack */
2770 	napi_gro_receive(napi, skb);
2771 
2772 }
2773 
2774 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2775  * until the budget/quota has been reached. Returns the number
2776  * of frames handled
2777  */
2778 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2779 {
2780 	struct net_device *dev = rx_queue->dev;
2781 	struct rxbd8 *bdp, *base;
2782 	struct sk_buff *skb;
2783 	int pkt_len;
2784 	int amount_pull;
2785 	int howmany = 0;
2786 	struct gfar_private *priv = netdev_priv(dev);
2787 
2788 	/* Get the first full descriptor */
2789 	bdp = rx_queue->cur_rx;
2790 	base = rx_queue->rx_bd_base;
2791 
2792 	amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
2793 
2794 	while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2795 		struct sk_buff *newskb;
2796 
2797 		rmb();
2798 
2799 		/* Add another skb for the future */
2800 		newskb = gfar_new_skb(dev);
2801 
2802 		skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2803 
2804 		dma_unmap_single(priv->dev, bdp->bufPtr,
2805 				 priv->rx_buffer_size, DMA_FROM_DEVICE);
2806 
2807 		if (unlikely(!(bdp->status & RXBD_ERR) &&
2808 			     bdp->length > priv->rx_buffer_size))
2809 			bdp->status = RXBD_LARGE;
2810 
2811 		/* We drop the frame if we failed to allocate a new buffer */
2812 		if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2813 			     bdp->status & RXBD_ERR)) {
2814 			count_errors(bdp->status, dev);
2815 
2816 			if (unlikely(!newskb))
2817 				newskb = skb;
2818 			else if (skb)
2819 				dev_kfree_skb(skb);
2820 		} else {
2821 			/* Increment the number of packets */
2822 			rx_queue->stats.rx_packets++;
2823 			howmany++;
2824 
2825 			if (likely(skb)) {
2826 				pkt_len = bdp->length - ETH_FCS_LEN;
2827 				/* Remove the FCS from the packet length */
2828 				skb_put(skb, pkt_len);
2829 				rx_queue->stats.rx_bytes += pkt_len;
2830 				skb_record_rx_queue(skb, rx_queue->qindex);
2831 				gfar_process_frame(dev, skb, amount_pull,
2832 						   &rx_queue->grp->napi_rx);
2833 
2834 			} else {
2835 				netif_warn(priv, rx_err, dev, "Missing skb!\n");
2836 				rx_queue->stats.rx_dropped++;
2837 				atomic64_inc(&priv->extra_stats.rx_skbmissing);
2838 			}
2839 
2840 		}
2841 
2842 		rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2843 
2844 		/* Setup the new bdp */
2845 		gfar_new_rxbdp(rx_queue, bdp, newskb);
2846 
2847 		/* Update to the next pointer */
2848 		bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2849 
2850 		/* update to point at the next skb */
2851 		rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2852 				      RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2853 	}
2854 
2855 	/* Update the current rxbd pointer to be the next one */
2856 	rx_queue->cur_rx = bdp;
2857 
2858 	return howmany;
2859 }
2860 
2861 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2862 {
2863 	struct gfar_priv_grp *gfargrp =
2864 		container_of(napi, struct gfar_priv_grp, napi_rx);
2865 	struct gfar __iomem *regs = gfargrp->regs;
2866 	struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2867 	int work_done = 0;
2868 
2869 	/* Clear IEVENT, so interrupts aren't called again
2870 	 * because of the packets that have already arrived
2871 	 */
2872 	gfar_write(&regs->ievent, IEVENT_RX_MASK);
2873 
2874 	work_done = gfar_clean_rx_ring(rx_queue, budget);
2875 
2876 	if (work_done < budget) {
2877 		u32 imask;
2878 		napi_complete(napi);
2879 		/* Clear the halt bit in RSTAT */
2880 		gfar_write(&regs->rstat, gfargrp->rstat);
2881 
2882 		spin_lock_irq(&gfargrp->grplock);
2883 		imask = gfar_read(&regs->imask);
2884 		imask |= IMASK_RX_DEFAULT;
2885 		gfar_write(&regs->imask, imask);
2886 		spin_unlock_irq(&gfargrp->grplock);
2887 	}
2888 
2889 	return work_done;
2890 }
2891 
2892 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2893 {
2894 	struct gfar_priv_grp *gfargrp =
2895 		container_of(napi, struct gfar_priv_grp, napi_tx);
2896 	struct gfar __iomem *regs = gfargrp->regs;
2897 	struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2898 	u32 imask;
2899 
2900 	/* Clear IEVENT, so interrupts aren't called again
2901 	 * because of the packets that have already arrived
2902 	 */
2903 	gfar_write(&regs->ievent, IEVENT_TX_MASK);
2904 
2905 	/* run Tx cleanup to completion */
2906 	if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2907 		gfar_clean_tx_ring(tx_queue);
2908 
2909 	napi_complete(napi);
2910 
2911 	spin_lock_irq(&gfargrp->grplock);
2912 	imask = gfar_read(&regs->imask);
2913 	imask |= IMASK_TX_DEFAULT;
2914 	gfar_write(&regs->imask, imask);
2915 	spin_unlock_irq(&gfargrp->grplock);
2916 
2917 	return 0;
2918 }
2919 
2920 static int gfar_poll_rx(struct napi_struct *napi, int budget)
2921 {
2922 	struct gfar_priv_grp *gfargrp =
2923 		container_of(napi, struct gfar_priv_grp, napi_rx);
2924 	struct gfar_private *priv = gfargrp->priv;
2925 	struct gfar __iomem *regs = gfargrp->regs;
2926 	struct gfar_priv_rx_q *rx_queue = NULL;
2927 	int work_done = 0, work_done_per_q = 0;
2928 	int i, budget_per_q = 0;
2929 	unsigned long rstat_rxf;
2930 	int num_act_queues;
2931 
2932 	/* Clear IEVENT, so interrupts aren't called again
2933 	 * because of the packets that have already arrived
2934 	 */
2935 	gfar_write(&regs->ievent, IEVENT_RX_MASK);
2936 
2937 	rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
2938 
2939 	num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
2940 	if (num_act_queues)
2941 		budget_per_q = budget/num_act_queues;
2942 
2943 	for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2944 		/* skip queue if not active */
2945 		if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
2946 			continue;
2947 
2948 		rx_queue = priv->rx_queue[i];
2949 		work_done_per_q =
2950 			gfar_clean_rx_ring(rx_queue, budget_per_q);
2951 		work_done += work_done_per_q;
2952 
2953 		/* finished processing this queue */
2954 		if (work_done_per_q < budget_per_q) {
2955 			/* clear active queue hw indication */
2956 			gfar_write(&regs->rstat,
2957 				   RSTAT_CLEAR_RXF0 >> i);
2958 			num_act_queues--;
2959 
2960 			if (!num_act_queues)
2961 				break;
2962 		}
2963 	}
2964 
2965 	if (!num_act_queues) {
2966 		u32 imask;
2967 		napi_complete(napi);
2968 
2969 		/* Clear the halt bit in RSTAT */
2970 		gfar_write(&regs->rstat, gfargrp->rstat);
2971 
2972 		spin_lock_irq(&gfargrp->grplock);
2973 		imask = gfar_read(&regs->imask);
2974 		imask |= IMASK_RX_DEFAULT;
2975 		gfar_write(&regs->imask, imask);
2976 		spin_unlock_irq(&gfargrp->grplock);
2977 	}
2978 
2979 	return work_done;
2980 }
2981 
2982 static int gfar_poll_tx(struct napi_struct *napi, int budget)
2983 {
2984 	struct gfar_priv_grp *gfargrp =
2985 		container_of(napi, struct gfar_priv_grp, napi_tx);
2986 	struct gfar_private *priv = gfargrp->priv;
2987 	struct gfar __iomem *regs = gfargrp->regs;
2988 	struct gfar_priv_tx_q *tx_queue = NULL;
2989 	int has_tx_work = 0;
2990 	int i;
2991 
2992 	/* Clear IEVENT, so interrupts aren't called again
2993 	 * because of the packets that have already arrived
2994 	 */
2995 	gfar_write(&regs->ievent, IEVENT_TX_MASK);
2996 
2997 	for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
2998 		tx_queue = priv->tx_queue[i];
2999 		/* run Tx cleanup to completion */
3000 		if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3001 			gfar_clean_tx_ring(tx_queue);
3002 			has_tx_work = 1;
3003 		}
3004 	}
3005 
3006 	if (!has_tx_work) {
3007 		u32 imask;
3008 		napi_complete(napi);
3009 
3010 		spin_lock_irq(&gfargrp->grplock);
3011 		imask = gfar_read(&regs->imask);
3012 		imask |= IMASK_TX_DEFAULT;
3013 		gfar_write(&regs->imask, imask);
3014 		spin_unlock_irq(&gfargrp->grplock);
3015 	}
3016 
3017 	return 0;
3018 }
3019 
3020 
3021 #ifdef CONFIG_NET_POLL_CONTROLLER
3022 /* Polling 'interrupt' - used by things like netconsole to send skbs
3023  * without having to re-enable interrupts. It's not called while
3024  * the interrupt routine is executing.
3025  */
3026 static void gfar_netpoll(struct net_device *dev)
3027 {
3028 	struct gfar_private *priv = netdev_priv(dev);
3029 	int i;
3030 
3031 	/* If the device has multiple interrupts, run tx/rx */
3032 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3033 		for (i = 0; i < priv->num_grps; i++) {
3034 			struct gfar_priv_grp *grp = &priv->gfargrp[i];
3035 
3036 			disable_irq(gfar_irq(grp, TX)->irq);
3037 			disable_irq(gfar_irq(grp, RX)->irq);
3038 			disable_irq(gfar_irq(grp, ER)->irq);
3039 			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3040 			enable_irq(gfar_irq(grp, ER)->irq);
3041 			enable_irq(gfar_irq(grp, RX)->irq);
3042 			enable_irq(gfar_irq(grp, TX)->irq);
3043 		}
3044 	} else {
3045 		for (i = 0; i < priv->num_grps; i++) {
3046 			struct gfar_priv_grp *grp = &priv->gfargrp[i];
3047 
3048 			disable_irq(gfar_irq(grp, TX)->irq);
3049 			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3050 			enable_irq(gfar_irq(grp, TX)->irq);
3051 		}
3052 	}
3053 }
3054 #endif
3055 
3056 /* The interrupt handler for devices with one interrupt */
3057 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
3058 {
3059 	struct gfar_priv_grp *gfargrp = grp_id;
3060 
3061 	/* Save ievent for future reference */
3062 	u32 events = gfar_read(&gfargrp->regs->ievent);
3063 
3064 	/* Check for reception */
3065 	if (events & IEVENT_RX_MASK)
3066 		gfar_receive(irq, grp_id);
3067 
3068 	/* Check for transmit completion */
3069 	if (events & IEVENT_TX_MASK)
3070 		gfar_transmit(irq, grp_id);
3071 
3072 	/* Check for errors */
3073 	if (events & IEVENT_ERR_MASK)
3074 		gfar_error(irq, grp_id);
3075 
3076 	return IRQ_HANDLED;
3077 }
3078 
3079 static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3080 {
3081 	struct phy_device *phydev = priv->phydev;
3082 	u32 val = 0;
3083 
3084 	if (!phydev->duplex)
3085 		return val;
3086 
3087 	if (!priv->pause_aneg_en) {
3088 		if (priv->tx_pause_en)
3089 			val |= MACCFG1_TX_FLOW;
3090 		if (priv->rx_pause_en)
3091 			val |= MACCFG1_RX_FLOW;
3092 	} else {
3093 		u16 lcl_adv, rmt_adv;
3094 		u8 flowctrl;
3095 		/* get link partner capabilities */
3096 		rmt_adv = 0;
3097 		if (phydev->pause)
3098 			rmt_adv = LPA_PAUSE_CAP;
3099 		if (phydev->asym_pause)
3100 			rmt_adv |= LPA_PAUSE_ASYM;
3101 
3102 		lcl_adv = mii_advertise_flowctrl(phydev->advertising);
3103 
3104 		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3105 		if (flowctrl & FLOW_CTRL_TX)
3106 			val |= MACCFG1_TX_FLOW;
3107 		if (flowctrl & FLOW_CTRL_RX)
3108 			val |= MACCFG1_RX_FLOW;
3109 	}
3110 
3111 	return val;
3112 }
3113 
3114 /* Called every time the controller might need to be made
3115  * aware of new link state.  The PHY code conveys this
3116  * information through variables in the phydev structure, and this
3117  * function converts those variables into the appropriate
3118  * register values, and can bring down the device if needed.
3119  */
3120 static void adjust_link(struct net_device *dev)
3121 {
3122 	struct gfar_private *priv = netdev_priv(dev);
3123 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3124 	struct phy_device *phydev = priv->phydev;
3125 	int new_state = 0;
3126 
3127 	if (test_bit(GFAR_RESETTING, &priv->state))
3128 		return;
3129 
3130 	if (phydev->link) {
3131 		u32 tempval1 = gfar_read(&regs->maccfg1);
3132 		u32 tempval = gfar_read(&regs->maccfg2);
3133 		u32 ecntrl = gfar_read(&regs->ecntrl);
3134 
3135 		/* Now we make sure that we can be in full duplex mode.
3136 		 * If not, we operate in half-duplex mode.
3137 		 */
3138 		if (phydev->duplex != priv->oldduplex) {
3139 			new_state = 1;
3140 			if (!(phydev->duplex))
3141 				tempval &= ~(MACCFG2_FULL_DUPLEX);
3142 			else
3143 				tempval |= MACCFG2_FULL_DUPLEX;
3144 
3145 			priv->oldduplex = phydev->duplex;
3146 		}
3147 
3148 		if (phydev->speed != priv->oldspeed) {
3149 			new_state = 1;
3150 			switch (phydev->speed) {
3151 			case 1000:
3152 				tempval =
3153 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3154 
3155 				ecntrl &= ~(ECNTRL_R100);
3156 				break;
3157 			case 100:
3158 			case 10:
3159 				tempval =
3160 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3161 
3162 				/* Reduced mode distinguishes
3163 				 * between 10 and 100
3164 				 */
3165 				if (phydev->speed == SPEED_100)
3166 					ecntrl |= ECNTRL_R100;
3167 				else
3168 					ecntrl &= ~(ECNTRL_R100);
3169 				break;
3170 			default:
3171 				netif_warn(priv, link, dev,
3172 					   "Ack!  Speed (%d) is not 10/100/1000!\n",
3173 					   phydev->speed);
3174 				break;
3175 			}
3176 
3177 			priv->oldspeed = phydev->speed;
3178 		}
3179 
3180 		tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3181 		tempval1 |= gfar_get_flowctrl_cfg(priv);
3182 
3183 		gfar_write(&regs->maccfg1, tempval1);
3184 		gfar_write(&regs->maccfg2, tempval);
3185 		gfar_write(&regs->ecntrl, ecntrl);
3186 
3187 		if (!priv->oldlink) {
3188 			new_state = 1;
3189 			priv->oldlink = 1;
3190 		}
3191 	} else if (priv->oldlink) {
3192 		new_state = 1;
3193 		priv->oldlink = 0;
3194 		priv->oldspeed = 0;
3195 		priv->oldduplex = -1;
3196 	}
3197 
3198 	if (new_state && netif_msg_link(priv))
3199 		phy_print_status(phydev);
3200 }
3201 
3202 /* Update the hash table based on the current list of multicast
3203  * addresses we subscribe to.  Also, change the promiscuity of
3204  * the device based on the flags (this function is called
3205  * whenever dev->flags is changed
3206  */
3207 static void gfar_set_multi(struct net_device *dev)
3208 {
3209 	struct netdev_hw_addr *ha;
3210 	struct gfar_private *priv = netdev_priv(dev);
3211 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3212 	u32 tempval;
3213 
3214 	if (dev->flags & IFF_PROMISC) {
3215 		/* Set RCTRL to PROM */
3216 		tempval = gfar_read(&regs->rctrl);
3217 		tempval |= RCTRL_PROM;
3218 		gfar_write(&regs->rctrl, tempval);
3219 	} else {
3220 		/* Set RCTRL to not PROM */
3221 		tempval = gfar_read(&regs->rctrl);
3222 		tempval &= ~(RCTRL_PROM);
3223 		gfar_write(&regs->rctrl, tempval);
3224 	}
3225 
3226 	if (dev->flags & IFF_ALLMULTI) {
3227 		/* Set the hash to rx all multicast frames */
3228 		gfar_write(&regs->igaddr0, 0xffffffff);
3229 		gfar_write(&regs->igaddr1, 0xffffffff);
3230 		gfar_write(&regs->igaddr2, 0xffffffff);
3231 		gfar_write(&regs->igaddr3, 0xffffffff);
3232 		gfar_write(&regs->igaddr4, 0xffffffff);
3233 		gfar_write(&regs->igaddr5, 0xffffffff);
3234 		gfar_write(&regs->igaddr6, 0xffffffff);
3235 		gfar_write(&regs->igaddr7, 0xffffffff);
3236 		gfar_write(&regs->gaddr0, 0xffffffff);
3237 		gfar_write(&regs->gaddr1, 0xffffffff);
3238 		gfar_write(&regs->gaddr2, 0xffffffff);
3239 		gfar_write(&regs->gaddr3, 0xffffffff);
3240 		gfar_write(&regs->gaddr4, 0xffffffff);
3241 		gfar_write(&regs->gaddr5, 0xffffffff);
3242 		gfar_write(&regs->gaddr6, 0xffffffff);
3243 		gfar_write(&regs->gaddr7, 0xffffffff);
3244 	} else {
3245 		int em_num;
3246 		int idx;
3247 
3248 		/* zero out the hash */
3249 		gfar_write(&regs->igaddr0, 0x0);
3250 		gfar_write(&regs->igaddr1, 0x0);
3251 		gfar_write(&regs->igaddr2, 0x0);
3252 		gfar_write(&regs->igaddr3, 0x0);
3253 		gfar_write(&regs->igaddr4, 0x0);
3254 		gfar_write(&regs->igaddr5, 0x0);
3255 		gfar_write(&regs->igaddr6, 0x0);
3256 		gfar_write(&regs->igaddr7, 0x0);
3257 		gfar_write(&regs->gaddr0, 0x0);
3258 		gfar_write(&regs->gaddr1, 0x0);
3259 		gfar_write(&regs->gaddr2, 0x0);
3260 		gfar_write(&regs->gaddr3, 0x0);
3261 		gfar_write(&regs->gaddr4, 0x0);
3262 		gfar_write(&regs->gaddr5, 0x0);
3263 		gfar_write(&regs->gaddr6, 0x0);
3264 		gfar_write(&regs->gaddr7, 0x0);
3265 
3266 		/* If we have extended hash tables, we need to
3267 		 * clear the exact match registers to prepare for
3268 		 * setting them
3269 		 */
3270 		if (priv->extended_hash) {
3271 			em_num = GFAR_EM_NUM + 1;
3272 			gfar_clear_exact_match(dev);
3273 			idx = 1;
3274 		} else {
3275 			idx = 0;
3276 			em_num = 0;
3277 		}
3278 
3279 		if (netdev_mc_empty(dev))
3280 			return;
3281 
3282 		/* Parse the list, and set the appropriate bits */
3283 		netdev_for_each_mc_addr(ha, dev) {
3284 			if (idx < em_num) {
3285 				gfar_set_mac_for_addr(dev, idx, ha->addr);
3286 				idx++;
3287 			} else
3288 				gfar_set_hash_for_addr(dev, ha->addr);
3289 		}
3290 	}
3291 }
3292 
3293 
3294 /* Clears each of the exact match registers to zero, so they
3295  * don't interfere with normal reception
3296  */
3297 static void gfar_clear_exact_match(struct net_device *dev)
3298 {
3299 	int idx;
3300 	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3301 
3302 	for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3303 		gfar_set_mac_for_addr(dev, idx, zero_arr);
3304 }
3305 
3306 /* Set the appropriate hash bit for the given addr */
3307 /* The algorithm works like so:
3308  * 1) Take the Destination Address (ie the multicast address), and
3309  * do a CRC on it (little endian), and reverse the bits of the
3310  * result.
3311  * 2) Use the 8 most significant bits as a hash into a 256-entry
3312  * table.  The table is controlled through 8 32-bit registers:
3313  * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
3314  * gaddr7.  This means that the 3 most significant bits in the
3315  * hash index which gaddr register to use, and the 5 other bits
3316  * indicate which bit (assuming an IBM numbering scheme, which
3317  * for PowerPC (tm) is usually the case) in the register holds
3318  * the entry.
3319  */
3320 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3321 {
3322 	u32 tempval;
3323 	struct gfar_private *priv = netdev_priv(dev);
3324 	u32 result = ether_crc(ETH_ALEN, addr);
3325 	int width = priv->hash_width;
3326 	u8 whichbit = (result >> (32 - width)) & 0x1f;
3327 	u8 whichreg = result >> (32 - width + 5);
3328 	u32 value = (1 << (31-whichbit));
3329 
3330 	tempval = gfar_read(priv->hash_regs[whichreg]);
3331 	tempval |= value;
3332 	gfar_write(priv->hash_regs[whichreg], tempval);
3333 }
3334 
3335 
3336 /* There are multiple MAC Address register pairs on some controllers
3337  * This function sets the numth pair to a given address
3338  */
3339 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3340 				  const u8 *addr)
3341 {
3342 	struct gfar_private *priv = netdev_priv(dev);
3343 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3344 	int idx;
3345 	char tmpbuf[ETH_ALEN];
3346 	u32 tempval;
3347 	u32 __iomem *macptr = &regs->macstnaddr1;
3348 
3349 	macptr += num*2;
3350 
3351 	/* Now copy it into the mac registers backwards, cuz
3352 	 * little endian is silly
3353 	 */
3354 	for (idx = 0; idx < ETH_ALEN; idx++)
3355 		tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
3356 
3357 	gfar_write(macptr, *((u32 *) (tmpbuf)));
3358 
3359 	tempval = *((u32 *) (tmpbuf + 4));
3360 
3361 	gfar_write(macptr+1, tempval);
3362 }
3363 
3364 /* GFAR error interrupt handler */
3365 static irqreturn_t gfar_error(int irq, void *grp_id)
3366 {
3367 	struct gfar_priv_grp *gfargrp = grp_id;
3368 	struct gfar __iomem *regs = gfargrp->regs;
3369 	struct gfar_private *priv= gfargrp->priv;
3370 	struct net_device *dev = priv->ndev;
3371 
3372 	/* Save ievent for future reference */
3373 	u32 events = gfar_read(&regs->ievent);
3374 
3375 	/* Clear IEVENT */
3376 	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
3377 
3378 	/* Magic Packet is not an error. */
3379 	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3380 	    (events & IEVENT_MAG))
3381 		events &= ~IEVENT_MAG;
3382 
3383 	/* Hmm... */
3384 	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3385 		netdev_dbg(dev,
3386 			   "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3387 			   events, gfar_read(&regs->imask));
3388 
3389 	/* Update the error counters */
3390 	if (events & IEVENT_TXE) {
3391 		dev->stats.tx_errors++;
3392 
3393 		if (events & IEVENT_LC)
3394 			dev->stats.tx_window_errors++;
3395 		if (events & IEVENT_CRL)
3396 			dev->stats.tx_aborted_errors++;
3397 		if (events & IEVENT_XFUN) {
3398 			unsigned long flags;
3399 
3400 			netif_dbg(priv, tx_err, dev,
3401 				  "TX FIFO underrun, packet dropped\n");
3402 			dev->stats.tx_dropped++;
3403 			atomic64_inc(&priv->extra_stats.tx_underrun);
3404 
3405 			local_irq_save(flags);
3406 			lock_tx_qs(priv);
3407 
3408 			/* Reactivate the Tx Queues */
3409 			gfar_write(&regs->tstat, gfargrp->tstat);
3410 
3411 			unlock_tx_qs(priv);
3412 			local_irq_restore(flags);
3413 		}
3414 		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3415 	}
3416 	if (events & IEVENT_BSY) {
3417 		dev->stats.rx_errors++;
3418 		atomic64_inc(&priv->extra_stats.rx_bsy);
3419 
3420 		gfar_receive(irq, grp_id);
3421 
3422 		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3423 			  gfar_read(&regs->rstat));
3424 	}
3425 	if (events & IEVENT_BABR) {
3426 		dev->stats.rx_errors++;
3427 		atomic64_inc(&priv->extra_stats.rx_babr);
3428 
3429 		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
3430 	}
3431 	if (events & IEVENT_EBERR) {
3432 		atomic64_inc(&priv->extra_stats.eberr);
3433 		netif_dbg(priv, rx_err, dev, "bus error\n");
3434 	}
3435 	if (events & IEVENT_RXC)
3436 		netif_dbg(priv, rx_status, dev, "control frame\n");
3437 
3438 	if (events & IEVENT_BABT) {
3439 		atomic64_inc(&priv->extra_stats.tx_babt);
3440 		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
3441 	}
3442 	return IRQ_HANDLED;
3443 }
3444 
3445 static struct of_device_id gfar_match[] =
3446 {
3447 	{
3448 		.type = "network",
3449 		.compatible = "gianfar",
3450 	},
3451 	{
3452 		.compatible = "fsl,etsec2",
3453 	},
3454 	{},
3455 };
3456 MODULE_DEVICE_TABLE(of, gfar_match);
3457 
3458 /* Structure for a device driver */
3459 static struct platform_driver gfar_driver = {
3460 	.driver = {
3461 		.name = "fsl-gianfar",
3462 		.owner = THIS_MODULE,
3463 		.pm = GFAR_PM_OPS,
3464 		.of_match_table = gfar_match,
3465 	},
3466 	.probe = gfar_probe,
3467 	.remove = gfar_remove,
3468 };
3469 
3470 module_platform_driver(gfar_driver);
3471