1 /* drivers/net/ethernet/freescale/gianfar.c
2  *
3  * Gianfar Ethernet Driver
4  * This driver is designed for the non-CPM ethernet controllers
5  * on the 85xx and 83xx family of integrated processors
6  * Based on 8260_io/fcc_enet.c
7  *
8  * Author: Andy Fleming
9  * Maintainer: Kumar Gala
10  * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11  *
12  * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
13  * Copyright 2007 MontaVista Software, Inc.
14  *
15  * This program is free software; you can redistribute  it and/or modify it
16  * under  the terms of  the GNU General  Public License as published by the
17  * Free Software Foundation;  either version 2 of the  License, or (at your
18  * option) any later version.
19  *
20  *  Gianfar:  AKA Lambda Draconis, "Dragon"
21  *  RA 11 31 24.2
22  *  Dec +69 19 52
23  *  V 3.84
24  *  B-V +1.62
25  *
26  *  Theory of operation
27  *
28  *  The driver is initialized through of_device. Configuration information
29  *  is therefore conveyed through an OF-style device tree.
30  *
31  *  The Gianfar Ethernet Controller uses a ring of buffer
32  *  descriptors.  The beginning is indicated by a register
33  *  pointing to the physical address of the start of the ring.
34  *  The end is determined by a "wrap" bit being set in the
35  *  last descriptor of the ring.
36  *
37  *  When a packet is received, the RXF bit in the
38  *  IEVENT register is set, triggering an interrupt when the
39  *  corresponding bit in the IMASK register is also set (if
40  *  interrupt coalescing is active, then the interrupt may not
41  *  happen immediately, but will wait until either a set number
42  *  of frames or amount of time have passed).  In NAPI, the
43  *  interrupt handler will signal there is work to be done, and
44  *  exit. This method will start at the last known empty
45  *  descriptor, and process every subsequent descriptor until there
46  *  are none left with data (NAPI will stop after a set number of
47  *  packets to give time to other tasks, but will eventually
48  *  process all the packets).  The data arrives inside a
49  *  pre-allocated skb, and so after the skb is passed up to the
50  *  stack, a new skb must be allocated, and the address field in
51  *  the buffer descriptor must be updated to indicate this new
52  *  skb.
53  *
54  *  When the kernel requests that a packet be transmitted, the
55  *  driver starts where it left off last time, and points the
56  *  descriptor at the buffer which was passed in.  The driver
57  *  then informs the DMA engine that there are packets ready to
58  *  be transmitted.  Once the controller is finished transmitting
59  *  the packet, an interrupt may be triggered (under the same
60  *  conditions as for reception, but depending on the TXF bit).
61  *  The driver then cleans up the buffer.
62  */
63 
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65 #define DEBUG
66 
67 #include <linux/kernel.h>
68 #include <linux/string.h>
69 #include <linux/errno.h>
70 #include <linux/unistd.h>
71 #include <linux/slab.h>
72 #include <linux/interrupt.h>
73 #include <linux/init.h>
74 #include <linux/delay.h>
75 #include <linux/netdevice.h>
76 #include <linux/etherdevice.h>
77 #include <linux/skbuff.h>
78 #include <linux/if_vlan.h>
79 #include <linux/spinlock.h>
80 #include <linux/mm.h>
81 #include <linux/of_mdio.h>
82 #include <linux/of_platform.h>
83 #include <linux/ip.h>
84 #include <linux/tcp.h>
85 #include <linux/udp.h>
86 #include <linux/in.h>
87 #include <linux/net_tstamp.h>
88 
89 #include <asm/io.h>
90 #include <asm/reg.h>
91 #include <asm/irq.h>
92 #include <asm/uaccess.h>
93 #include <linux/module.h>
94 #include <linux/dma-mapping.h>
95 #include <linux/crc32.h>
96 #include <linux/mii.h>
97 #include <linux/phy.h>
98 #include <linux/phy_fixed.h>
99 #include <linux/of.h>
100 #include <linux/of_net.h>
101 
102 #include "gianfar.h"
103 
104 #define TX_TIMEOUT      (1*HZ)
105 
106 const char gfar_driver_version[] = "1.3";
107 
108 static int gfar_enet_open(struct net_device *dev);
109 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
110 static void gfar_reset_task(struct work_struct *work);
111 static void gfar_timeout(struct net_device *dev);
112 static int gfar_close(struct net_device *dev);
113 struct sk_buff *gfar_new_skb(struct net_device *dev);
114 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
115 			   struct sk_buff *skb);
116 static int gfar_set_mac_address(struct net_device *dev);
117 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
118 static irqreturn_t gfar_error(int irq, void *dev_id);
119 static irqreturn_t gfar_transmit(int irq, void *dev_id);
120 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
121 static void adjust_link(struct net_device *dev);
122 static void init_registers(struct net_device *dev);
123 static int init_phy(struct net_device *dev);
124 static int gfar_probe(struct platform_device *ofdev);
125 static int gfar_remove(struct platform_device *ofdev);
126 static void free_skb_resources(struct gfar_private *priv);
127 static void gfar_set_multi(struct net_device *dev);
128 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
129 static void gfar_configure_serdes(struct net_device *dev);
130 static int gfar_poll(struct napi_struct *napi, int budget);
131 #ifdef CONFIG_NET_POLL_CONTROLLER
132 static void gfar_netpoll(struct net_device *dev);
133 #endif
134 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
135 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
136 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
137 			      int amount_pull, struct napi_struct *napi);
138 void gfar_halt(struct net_device *dev);
139 static void gfar_halt_nodisable(struct net_device *dev);
140 void gfar_start(struct net_device *dev);
141 static void gfar_clear_exact_match(struct net_device *dev);
142 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
143 				  const u8 *addr);
144 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
145 
146 MODULE_AUTHOR("Freescale Semiconductor, Inc");
147 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
148 MODULE_LICENSE("GPL");
149 
150 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
151 			    dma_addr_t buf)
152 {
153 	u32 lstatus;
154 
155 	bdp->bufPtr = buf;
156 
157 	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
158 	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
159 		lstatus |= BD_LFLAG(RXBD_WRAP);
160 
161 	eieio();
162 
163 	bdp->lstatus = lstatus;
164 }
165 
166 static int gfar_init_bds(struct net_device *ndev)
167 {
168 	struct gfar_private *priv = netdev_priv(ndev);
169 	struct gfar_priv_tx_q *tx_queue = NULL;
170 	struct gfar_priv_rx_q *rx_queue = NULL;
171 	struct txbd8 *txbdp;
172 	struct rxbd8 *rxbdp;
173 	int i, j;
174 
175 	for (i = 0; i < priv->num_tx_queues; i++) {
176 		tx_queue = priv->tx_queue[i];
177 		/* Initialize some variables in our dev structure */
178 		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
179 		tx_queue->dirty_tx = tx_queue->tx_bd_base;
180 		tx_queue->cur_tx = tx_queue->tx_bd_base;
181 		tx_queue->skb_curtx = 0;
182 		tx_queue->skb_dirtytx = 0;
183 
184 		/* Initialize Transmit Descriptor Ring */
185 		txbdp = tx_queue->tx_bd_base;
186 		for (j = 0; j < tx_queue->tx_ring_size; j++) {
187 			txbdp->lstatus = 0;
188 			txbdp->bufPtr = 0;
189 			txbdp++;
190 		}
191 
192 		/* Set the last descriptor in the ring to indicate wrap */
193 		txbdp--;
194 		txbdp->status |= TXBD_WRAP;
195 	}
196 
197 	for (i = 0; i < priv->num_rx_queues; i++) {
198 		rx_queue = priv->rx_queue[i];
199 		rx_queue->cur_rx = rx_queue->rx_bd_base;
200 		rx_queue->skb_currx = 0;
201 		rxbdp = rx_queue->rx_bd_base;
202 
203 		for (j = 0; j < rx_queue->rx_ring_size; j++) {
204 			struct sk_buff *skb = rx_queue->rx_skbuff[j];
205 
206 			if (skb) {
207 				gfar_init_rxbdp(rx_queue, rxbdp,
208 						rxbdp->bufPtr);
209 			} else {
210 				skb = gfar_new_skb(ndev);
211 				if (!skb) {
212 					netdev_err(ndev, "Can't allocate RX buffers\n");
213 					return -ENOMEM;
214 				}
215 				rx_queue->rx_skbuff[j] = skb;
216 
217 				gfar_new_rxbdp(rx_queue, rxbdp, skb);
218 			}
219 
220 			rxbdp++;
221 		}
222 
223 	}
224 
225 	return 0;
226 }
227 
228 static int gfar_alloc_skb_resources(struct net_device *ndev)
229 {
230 	void *vaddr;
231 	dma_addr_t addr;
232 	int i, j, k;
233 	struct gfar_private *priv = netdev_priv(ndev);
234 	struct device *dev = &priv->ofdev->dev;
235 	struct gfar_priv_tx_q *tx_queue = NULL;
236 	struct gfar_priv_rx_q *rx_queue = NULL;
237 
238 	priv->total_tx_ring_size = 0;
239 	for (i = 0; i < priv->num_tx_queues; i++)
240 		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
241 
242 	priv->total_rx_ring_size = 0;
243 	for (i = 0; i < priv->num_rx_queues; i++)
244 		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
245 
246 	/* Allocate memory for the buffer descriptors */
247 	vaddr = dma_alloc_coherent(dev,
248 			sizeof(struct txbd8) * priv->total_tx_ring_size +
249 			sizeof(struct rxbd8) * priv->total_rx_ring_size,
250 			&addr, GFP_KERNEL);
251 	if (!vaddr) {
252 		netif_err(priv, ifup, ndev,
253 			  "Could not allocate buffer descriptors!\n");
254 		return -ENOMEM;
255 	}
256 
257 	for (i = 0; i < priv->num_tx_queues; i++) {
258 		tx_queue = priv->tx_queue[i];
259 		tx_queue->tx_bd_base = vaddr;
260 		tx_queue->tx_bd_dma_base = addr;
261 		tx_queue->dev = ndev;
262 		/* enet DMA only understands physical addresses */
263 		addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
264 		vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
265 	}
266 
267 	/* Start the rx descriptor ring where the tx ring leaves off */
268 	for (i = 0; i < priv->num_rx_queues; i++) {
269 		rx_queue = priv->rx_queue[i];
270 		rx_queue->rx_bd_base = vaddr;
271 		rx_queue->rx_bd_dma_base = addr;
272 		rx_queue->dev = ndev;
273 		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
274 		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
275 	}
276 
277 	/* Setup the skbuff rings */
278 	for (i = 0; i < priv->num_tx_queues; i++) {
279 		tx_queue = priv->tx_queue[i];
280 		tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
281 					      tx_queue->tx_ring_size,
282 					      GFP_KERNEL);
283 		if (!tx_queue->tx_skbuff) {
284 			netif_err(priv, ifup, ndev,
285 				  "Could not allocate tx_skbuff\n");
286 			goto cleanup;
287 		}
288 
289 		for (k = 0; k < tx_queue->tx_ring_size; k++)
290 			tx_queue->tx_skbuff[k] = NULL;
291 	}
292 
293 	for (i = 0; i < priv->num_rx_queues; i++) {
294 		rx_queue = priv->rx_queue[i];
295 		rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
296 					      rx_queue->rx_ring_size,
297 					      GFP_KERNEL);
298 
299 		if (!rx_queue->rx_skbuff) {
300 			netif_err(priv, ifup, ndev,
301 				  "Could not allocate rx_skbuff\n");
302 			goto cleanup;
303 		}
304 
305 		for (j = 0; j < rx_queue->rx_ring_size; j++)
306 			rx_queue->rx_skbuff[j] = NULL;
307 	}
308 
309 	if (gfar_init_bds(ndev))
310 		goto cleanup;
311 
312 	return 0;
313 
314 cleanup:
315 	free_skb_resources(priv);
316 	return -ENOMEM;
317 }
318 
319 static void gfar_init_tx_rx_base(struct gfar_private *priv)
320 {
321 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
322 	u32 __iomem *baddr;
323 	int i;
324 
325 	baddr = &regs->tbase0;
326 	for (i = 0; i < priv->num_tx_queues; i++) {
327 		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
328 		baddr += 2;
329 	}
330 
331 	baddr = &regs->rbase0;
332 	for (i = 0; i < priv->num_rx_queues; i++) {
333 		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
334 		baddr += 2;
335 	}
336 }
337 
338 static void gfar_init_mac(struct net_device *ndev)
339 {
340 	struct gfar_private *priv = netdev_priv(ndev);
341 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
342 	u32 rctrl = 0;
343 	u32 tctrl = 0;
344 	u32 attrs = 0;
345 
346 	/* write the tx/rx base registers */
347 	gfar_init_tx_rx_base(priv);
348 
349 	/* Configure the coalescing support */
350 	gfar_configure_coalescing(priv, 0xFF, 0xFF);
351 
352 	if (priv->rx_filer_enable) {
353 		rctrl |= RCTRL_FILREN;
354 		/* Program the RIR0 reg with the required distribution */
355 		gfar_write(&regs->rir0, DEFAULT_RIR0);
356 	}
357 
358 	if (ndev->features & NETIF_F_RXCSUM)
359 		rctrl |= RCTRL_CHECKSUMMING;
360 
361 	if (priv->extended_hash) {
362 		rctrl |= RCTRL_EXTHASH;
363 
364 		gfar_clear_exact_match(ndev);
365 		rctrl |= RCTRL_EMEN;
366 	}
367 
368 	if (priv->padding) {
369 		rctrl &= ~RCTRL_PAL_MASK;
370 		rctrl |= RCTRL_PADDING(priv->padding);
371 	}
372 
373 	/* Insert receive time stamps into padding alignment bytes */
374 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
375 		rctrl &= ~RCTRL_PAL_MASK;
376 		rctrl |= RCTRL_PADDING(8);
377 		priv->padding = 8;
378 	}
379 
380 	/* Enable HW time stamping if requested from user space */
381 	if (priv->hwts_rx_en)
382 		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
383 
384 	if (ndev->features & NETIF_F_HW_VLAN_RX)
385 		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
386 
387 	/* Init rctrl based on our settings */
388 	gfar_write(&regs->rctrl, rctrl);
389 
390 	if (ndev->features & NETIF_F_IP_CSUM)
391 		tctrl |= TCTRL_INIT_CSUM;
392 
393 	if (priv->prio_sched_en)
394 		tctrl |= TCTRL_TXSCHED_PRIO;
395 	else {
396 		tctrl |= TCTRL_TXSCHED_WRRS;
397 		gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
398 		gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
399 	}
400 
401 	gfar_write(&regs->tctrl, tctrl);
402 
403 	/* Set the extraction length and index */
404 	attrs = ATTRELI_EL(priv->rx_stash_size) |
405 		ATTRELI_EI(priv->rx_stash_index);
406 
407 	gfar_write(&regs->attreli, attrs);
408 
409 	/* Start with defaults, and add stashing or locking
410 	 * depending on the approprate variables
411 	 */
412 	attrs = ATTR_INIT_SETTINGS;
413 
414 	if (priv->bd_stash_en)
415 		attrs |= ATTR_BDSTASH;
416 
417 	if (priv->rx_stash_size != 0)
418 		attrs |= ATTR_BUFSTASH;
419 
420 	gfar_write(&regs->attr, attrs);
421 
422 	gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
423 	gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
424 	gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
425 }
426 
427 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
428 {
429 	struct gfar_private *priv = netdev_priv(dev);
430 	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
431 	unsigned long tx_packets = 0, tx_bytes = 0;
432 	int i;
433 
434 	for (i = 0; i < priv->num_rx_queues; i++) {
435 		rx_packets += priv->rx_queue[i]->stats.rx_packets;
436 		rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
437 		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
438 	}
439 
440 	dev->stats.rx_packets = rx_packets;
441 	dev->stats.rx_bytes   = rx_bytes;
442 	dev->stats.rx_dropped = rx_dropped;
443 
444 	for (i = 0; i < priv->num_tx_queues; i++) {
445 		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
446 		tx_packets += priv->tx_queue[i]->stats.tx_packets;
447 	}
448 
449 	dev->stats.tx_bytes   = tx_bytes;
450 	dev->stats.tx_packets = tx_packets;
451 
452 	return &dev->stats;
453 }
454 
455 static const struct net_device_ops gfar_netdev_ops = {
456 	.ndo_open = gfar_enet_open,
457 	.ndo_start_xmit = gfar_start_xmit,
458 	.ndo_stop = gfar_close,
459 	.ndo_change_mtu = gfar_change_mtu,
460 	.ndo_set_features = gfar_set_features,
461 	.ndo_set_rx_mode = gfar_set_multi,
462 	.ndo_tx_timeout = gfar_timeout,
463 	.ndo_do_ioctl = gfar_ioctl,
464 	.ndo_get_stats = gfar_get_stats,
465 	.ndo_set_mac_address = eth_mac_addr,
466 	.ndo_validate_addr = eth_validate_addr,
467 #ifdef CONFIG_NET_POLL_CONTROLLER
468 	.ndo_poll_controller = gfar_netpoll,
469 #endif
470 };
471 
472 void lock_rx_qs(struct gfar_private *priv)
473 {
474 	int i;
475 
476 	for (i = 0; i < priv->num_rx_queues; i++)
477 		spin_lock(&priv->rx_queue[i]->rxlock);
478 }
479 
480 void lock_tx_qs(struct gfar_private *priv)
481 {
482 	int i;
483 
484 	for (i = 0; i < priv->num_tx_queues; i++)
485 		spin_lock(&priv->tx_queue[i]->txlock);
486 }
487 
488 void unlock_rx_qs(struct gfar_private *priv)
489 {
490 	int i;
491 
492 	for (i = 0; i < priv->num_rx_queues; i++)
493 		spin_unlock(&priv->rx_queue[i]->rxlock);
494 }
495 
496 void unlock_tx_qs(struct gfar_private *priv)
497 {
498 	int i;
499 
500 	for (i = 0; i < priv->num_tx_queues; i++)
501 		spin_unlock(&priv->tx_queue[i]->txlock);
502 }
503 
504 static bool gfar_is_vlan_on(struct gfar_private *priv)
505 {
506 	return (priv->ndev->features & NETIF_F_HW_VLAN_RX) ||
507 	       (priv->ndev->features & NETIF_F_HW_VLAN_TX);
508 }
509 
510 /* Returns 1 if incoming frames use an FCB */
511 static inline int gfar_uses_fcb(struct gfar_private *priv)
512 {
513 	return gfar_is_vlan_on(priv) ||
514 	       (priv->ndev->features & NETIF_F_RXCSUM) ||
515 	       (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
516 }
517 
518 static void free_tx_pointers(struct gfar_private *priv)
519 {
520 	int i;
521 
522 	for (i = 0; i < priv->num_tx_queues; i++)
523 		kfree(priv->tx_queue[i]);
524 }
525 
526 static void free_rx_pointers(struct gfar_private *priv)
527 {
528 	int i;
529 
530 	for (i = 0; i < priv->num_rx_queues; i++)
531 		kfree(priv->rx_queue[i]);
532 }
533 
534 static void unmap_group_regs(struct gfar_private *priv)
535 {
536 	int i;
537 
538 	for (i = 0; i < MAXGROUPS; i++)
539 		if (priv->gfargrp[i].regs)
540 			iounmap(priv->gfargrp[i].regs);
541 }
542 
543 static void disable_napi(struct gfar_private *priv)
544 {
545 	int i;
546 
547 	for (i = 0; i < priv->num_grps; i++)
548 		napi_disable(&priv->gfargrp[i].napi);
549 }
550 
551 static void enable_napi(struct gfar_private *priv)
552 {
553 	int i;
554 
555 	for (i = 0; i < priv->num_grps; i++)
556 		napi_enable(&priv->gfargrp[i].napi);
557 }
558 
559 static int gfar_parse_group(struct device_node *np,
560 			    struct gfar_private *priv, const char *model)
561 {
562 	u32 *queue_mask;
563 
564 	priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
565 	if (!priv->gfargrp[priv->num_grps].regs)
566 		return -ENOMEM;
567 
568 	priv->gfargrp[priv->num_grps].interruptTransmit =
569 			irq_of_parse_and_map(np, 0);
570 
571 	/* If we aren't the FEC we have multiple interrupts */
572 	if (model && strcasecmp(model, "FEC")) {
573 		priv->gfargrp[priv->num_grps].interruptReceive =
574 			irq_of_parse_and_map(np, 1);
575 		priv->gfargrp[priv->num_grps].interruptError =
576 			irq_of_parse_and_map(np,2);
577 		if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
578 		    priv->gfargrp[priv->num_grps].interruptReceive  == NO_IRQ ||
579 		    priv->gfargrp[priv->num_grps].interruptError    == NO_IRQ)
580 			return -EINVAL;
581 	}
582 
583 	priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
584 	priv->gfargrp[priv->num_grps].priv = priv;
585 	spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
586 	if (priv->mode == MQ_MG_MODE) {
587 		queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
588 		priv->gfargrp[priv->num_grps].rx_bit_map = queue_mask ?
589 			*queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
590 		queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
591 		priv->gfargrp[priv->num_grps].tx_bit_map = queue_mask ?
592 			*queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
593 	} else {
594 		priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
595 		priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
596 	}
597 	priv->num_grps++;
598 
599 	return 0;
600 }
601 
602 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
603 {
604 	const char *model;
605 	const char *ctype;
606 	const void *mac_addr;
607 	int err = 0, i;
608 	struct net_device *dev = NULL;
609 	struct gfar_private *priv = NULL;
610 	struct device_node *np = ofdev->dev.of_node;
611 	struct device_node *child = NULL;
612 	const u32 *stash;
613 	const u32 *stash_len;
614 	const u32 *stash_idx;
615 	unsigned int num_tx_qs, num_rx_qs;
616 	u32 *tx_queues, *rx_queues;
617 
618 	if (!np || !of_device_is_available(np))
619 		return -ENODEV;
620 
621 	/* parse the num of tx and rx queues */
622 	tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
623 	num_tx_qs = tx_queues ? *tx_queues : 1;
624 
625 	if (num_tx_qs > MAX_TX_QS) {
626 		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
627 		       num_tx_qs, MAX_TX_QS);
628 		pr_err("Cannot do alloc_etherdev, aborting\n");
629 		return -EINVAL;
630 	}
631 
632 	rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
633 	num_rx_qs = rx_queues ? *rx_queues : 1;
634 
635 	if (num_rx_qs > MAX_RX_QS) {
636 		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
637 		       num_rx_qs, MAX_RX_QS);
638 		pr_err("Cannot do alloc_etherdev, aborting\n");
639 		return -EINVAL;
640 	}
641 
642 	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
643 	dev = *pdev;
644 	if (NULL == dev)
645 		return -ENOMEM;
646 
647 	priv = netdev_priv(dev);
648 	priv->node = ofdev->dev.of_node;
649 	priv->ndev = dev;
650 
651 	priv->num_tx_queues = num_tx_qs;
652 	netif_set_real_num_rx_queues(dev, num_rx_qs);
653 	priv->num_rx_queues = num_rx_qs;
654 	priv->num_grps = 0x0;
655 
656 	/* Init Rx queue filer rule set linked list */
657 	INIT_LIST_HEAD(&priv->rx_list.list);
658 	priv->rx_list.count = 0;
659 	mutex_init(&priv->rx_queue_access);
660 
661 	model = of_get_property(np, "model", NULL);
662 
663 	for (i = 0; i < MAXGROUPS; i++)
664 		priv->gfargrp[i].regs = NULL;
665 
666 	/* Parse and initialize group specific information */
667 	if (of_device_is_compatible(np, "fsl,etsec2")) {
668 		priv->mode = MQ_MG_MODE;
669 		for_each_child_of_node(np, child) {
670 			err = gfar_parse_group(child, priv, model);
671 			if (err)
672 				goto err_grp_init;
673 		}
674 	} else {
675 		priv->mode = SQ_SG_MODE;
676 		err = gfar_parse_group(np, priv, model);
677 		if (err)
678 			goto err_grp_init;
679 	}
680 
681 	for (i = 0; i < priv->num_tx_queues; i++)
682 	       priv->tx_queue[i] = NULL;
683 	for (i = 0; i < priv->num_rx_queues; i++)
684 		priv->rx_queue[i] = NULL;
685 
686 	for (i = 0; i < priv->num_tx_queues; i++) {
687 		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
688 					    GFP_KERNEL);
689 		if (!priv->tx_queue[i]) {
690 			err = -ENOMEM;
691 			goto tx_alloc_failed;
692 		}
693 		priv->tx_queue[i]->tx_skbuff = NULL;
694 		priv->tx_queue[i]->qindex = i;
695 		priv->tx_queue[i]->dev = dev;
696 		spin_lock_init(&(priv->tx_queue[i]->txlock));
697 	}
698 
699 	for (i = 0; i < priv->num_rx_queues; i++) {
700 		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
701 					    GFP_KERNEL);
702 		if (!priv->rx_queue[i]) {
703 			err = -ENOMEM;
704 			goto rx_alloc_failed;
705 		}
706 		priv->rx_queue[i]->rx_skbuff = NULL;
707 		priv->rx_queue[i]->qindex = i;
708 		priv->rx_queue[i]->dev = dev;
709 		spin_lock_init(&(priv->rx_queue[i]->rxlock));
710 	}
711 
712 
713 	stash = of_get_property(np, "bd-stash", NULL);
714 
715 	if (stash) {
716 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
717 		priv->bd_stash_en = 1;
718 	}
719 
720 	stash_len = of_get_property(np, "rx-stash-len", NULL);
721 
722 	if (stash_len)
723 		priv->rx_stash_size = *stash_len;
724 
725 	stash_idx = of_get_property(np, "rx-stash-idx", NULL);
726 
727 	if (stash_idx)
728 		priv->rx_stash_index = *stash_idx;
729 
730 	if (stash_len || stash_idx)
731 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
732 
733 	mac_addr = of_get_mac_address(np);
734 
735 	if (mac_addr)
736 		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
737 
738 	if (model && !strcasecmp(model, "TSEC"))
739 		priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
740 				     FSL_GIANFAR_DEV_HAS_COALESCE |
741 				     FSL_GIANFAR_DEV_HAS_RMON |
742 				     FSL_GIANFAR_DEV_HAS_MULTI_INTR;
743 
744 	if (model && !strcasecmp(model, "eTSEC"))
745 		priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
746 				     FSL_GIANFAR_DEV_HAS_COALESCE |
747 				     FSL_GIANFAR_DEV_HAS_RMON |
748 				     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
749 				     FSL_GIANFAR_DEV_HAS_PADDING |
750 				     FSL_GIANFAR_DEV_HAS_CSUM |
751 				     FSL_GIANFAR_DEV_HAS_VLAN |
752 				     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
753 				     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
754 				     FSL_GIANFAR_DEV_HAS_TIMER;
755 
756 	ctype = of_get_property(np, "phy-connection-type", NULL);
757 
758 	/* We only care about rgmii-id.  The rest are autodetected */
759 	if (ctype && !strcmp(ctype, "rgmii-id"))
760 		priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
761 	else
762 		priv->interface = PHY_INTERFACE_MODE_MII;
763 
764 	if (of_get_property(np, "fsl,magic-packet", NULL))
765 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
766 
767 	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
768 
769 	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
770 	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
771 
772 	return 0;
773 
774 rx_alloc_failed:
775 	free_rx_pointers(priv);
776 tx_alloc_failed:
777 	free_tx_pointers(priv);
778 err_grp_init:
779 	unmap_group_regs(priv);
780 	free_netdev(dev);
781 	return err;
782 }
783 
784 static int gfar_hwtstamp_ioctl(struct net_device *netdev,
785 			       struct ifreq *ifr, int cmd)
786 {
787 	struct hwtstamp_config config;
788 	struct gfar_private *priv = netdev_priv(netdev);
789 
790 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
791 		return -EFAULT;
792 
793 	/* reserved for future extensions */
794 	if (config.flags)
795 		return -EINVAL;
796 
797 	switch (config.tx_type) {
798 	case HWTSTAMP_TX_OFF:
799 		priv->hwts_tx_en = 0;
800 		break;
801 	case HWTSTAMP_TX_ON:
802 		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
803 			return -ERANGE;
804 		priv->hwts_tx_en = 1;
805 		break;
806 	default:
807 		return -ERANGE;
808 	}
809 
810 	switch (config.rx_filter) {
811 	case HWTSTAMP_FILTER_NONE:
812 		if (priv->hwts_rx_en) {
813 			stop_gfar(netdev);
814 			priv->hwts_rx_en = 0;
815 			startup_gfar(netdev);
816 		}
817 		break;
818 	default:
819 		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
820 			return -ERANGE;
821 		if (!priv->hwts_rx_en) {
822 			stop_gfar(netdev);
823 			priv->hwts_rx_en = 1;
824 			startup_gfar(netdev);
825 		}
826 		config.rx_filter = HWTSTAMP_FILTER_ALL;
827 		break;
828 	}
829 
830 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
831 		-EFAULT : 0;
832 }
833 
834 /* Ioctl MII Interface */
835 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
836 {
837 	struct gfar_private *priv = netdev_priv(dev);
838 
839 	if (!netif_running(dev))
840 		return -EINVAL;
841 
842 	if (cmd == SIOCSHWTSTAMP)
843 		return gfar_hwtstamp_ioctl(dev, rq, cmd);
844 
845 	if (!priv->phydev)
846 		return -ENODEV;
847 
848 	return phy_mii_ioctl(priv->phydev, rq, cmd);
849 }
850 
851 static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
852 {
853 	unsigned int new_bit_map = 0x0;
854 	int mask = 0x1 << (max_qs - 1), i;
855 
856 	for (i = 0; i < max_qs; i++) {
857 		if (bit_map & mask)
858 			new_bit_map = new_bit_map + (1 << i);
859 		mask = mask >> 0x1;
860 	}
861 	return new_bit_map;
862 }
863 
864 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
865 				   u32 class)
866 {
867 	u32 rqfpr = FPR_FILER_MASK;
868 	u32 rqfcr = 0x0;
869 
870 	rqfar--;
871 	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
872 	priv->ftp_rqfpr[rqfar] = rqfpr;
873 	priv->ftp_rqfcr[rqfar] = rqfcr;
874 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
875 
876 	rqfar--;
877 	rqfcr = RQFCR_CMP_NOMATCH;
878 	priv->ftp_rqfpr[rqfar] = rqfpr;
879 	priv->ftp_rqfcr[rqfar] = rqfcr;
880 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
881 
882 	rqfar--;
883 	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
884 	rqfpr = class;
885 	priv->ftp_rqfcr[rqfar] = rqfcr;
886 	priv->ftp_rqfpr[rqfar] = rqfpr;
887 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
888 
889 	rqfar--;
890 	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
891 	rqfpr = class;
892 	priv->ftp_rqfcr[rqfar] = rqfcr;
893 	priv->ftp_rqfpr[rqfar] = rqfpr;
894 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
895 
896 	return rqfar;
897 }
898 
899 static void gfar_init_filer_table(struct gfar_private *priv)
900 {
901 	int i = 0x0;
902 	u32 rqfar = MAX_FILER_IDX;
903 	u32 rqfcr = 0x0;
904 	u32 rqfpr = FPR_FILER_MASK;
905 
906 	/* Default rule */
907 	rqfcr = RQFCR_CMP_MATCH;
908 	priv->ftp_rqfcr[rqfar] = rqfcr;
909 	priv->ftp_rqfpr[rqfar] = rqfpr;
910 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
911 
912 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
913 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
914 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
915 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
916 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
917 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
918 
919 	/* cur_filer_idx indicated the first non-masked rule */
920 	priv->cur_filer_idx = rqfar;
921 
922 	/* Rest are masked rules */
923 	rqfcr = RQFCR_CMP_NOMATCH;
924 	for (i = 0; i < rqfar; i++) {
925 		priv->ftp_rqfcr[i] = rqfcr;
926 		priv->ftp_rqfpr[i] = rqfpr;
927 		gfar_write_filer(priv, i, rqfcr, rqfpr);
928 	}
929 }
930 
931 static void gfar_detect_errata(struct gfar_private *priv)
932 {
933 	struct device *dev = &priv->ofdev->dev;
934 	unsigned int pvr = mfspr(SPRN_PVR);
935 	unsigned int svr = mfspr(SPRN_SVR);
936 	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
937 	unsigned int rev = svr & 0xffff;
938 
939 	/* MPC8313 Rev 2.0 and higher; All MPC837x */
940 	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
941 	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
942 		priv->errata |= GFAR_ERRATA_74;
943 
944 	/* MPC8313 and MPC837x all rev */
945 	if ((pvr == 0x80850010 && mod == 0x80b0) ||
946 	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
947 		priv->errata |= GFAR_ERRATA_76;
948 
949 	/* MPC8313 and MPC837x all rev */
950 	if ((pvr == 0x80850010 && mod == 0x80b0) ||
951 	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
952 		priv->errata |= GFAR_ERRATA_A002;
953 
954 	/* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
955 	if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
956 	    (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
957 		priv->errata |= GFAR_ERRATA_12;
958 
959 	if (priv->errata)
960 		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
961 			 priv->errata);
962 }
963 
964 /* Set up the ethernet device structure, private data,
965  * and anything else we need before we start
966  */
967 static int gfar_probe(struct platform_device *ofdev)
968 {
969 	u32 tempval;
970 	struct net_device *dev = NULL;
971 	struct gfar_private *priv = NULL;
972 	struct gfar __iomem *regs = NULL;
973 	int err = 0, i, grp_idx = 0;
974 	u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
975 	u32 isrg = 0;
976 	u32 __iomem *baddr;
977 
978 	err = gfar_of_init(ofdev, &dev);
979 
980 	if (err)
981 		return err;
982 
983 	priv = netdev_priv(dev);
984 	priv->ndev = dev;
985 	priv->ofdev = ofdev;
986 	priv->node = ofdev->dev.of_node;
987 	SET_NETDEV_DEV(dev, &ofdev->dev);
988 
989 	spin_lock_init(&priv->bflock);
990 	INIT_WORK(&priv->reset_task, gfar_reset_task);
991 
992 	dev_set_drvdata(&ofdev->dev, priv);
993 	regs = priv->gfargrp[0].regs;
994 
995 	gfar_detect_errata(priv);
996 
997 	/* Stop the DMA engine now, in case it was running before
998 	 * (The firmware could have used it, and left it running).
999 	 */
1000 	gfar_halt(dev);
1001 
1002 	/* Reset MAC layer */
1003 	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
1004 
1005 	/* We need to delay at least 3 TX clocks */
1006 	udelay(2);
1007 
1008 	tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
1009 	gfar_write(&regs->maccfg1, tempval);
1010 
1011 	/* Initialize MACCFG2. */
1012 	tempval = MACCFG2_INIT_SETTINGS;
1013 	if (gfar_has_errata(priv, GFAR_ERRATA_74))
1014 		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1015 	gfar_write(&regs->maccfg2, tempval);
1016 
1017 	/* Initialize ECNTRL */
1018 	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
1019 
1020 	/* Set the dev->base_addr to the gfar reg region */
1021 	dev->base_addr = (unsigned long) regs;
1022 
1023 	SET_NETDEV_DEV(dev, &ofdev->dev);
1024 
1025 	/* Fill in the dev structure */
1026 	dev->watchdog_timeo = TX_TIMEOUT;
1027 	dev->mtu = 1500;
1028 	dev->netdev_ops = &gfar_netdev_ops;
1029 	dev->ethtool_ops = &gfar_ethtool_ops;
1030 
1031 	/* Register for napi ...We are registering NAPI for each grp */
1032 	for (i = 0; i < priv->num_grps; i++)
1033 		netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
1034 			       GFAR_DEV_WEIGHT);
1035 
1036 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1037 		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1038 				   NETIF_F_RXCSUM;
1039 		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1040 				 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1041 	}
1042 
1043 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1044 		dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1045 		dev->features |= NETIF_F_HW_VLAN_RX;
1046 	}
1047 
1048 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1049 		priv->extended_hash = 1;
1050 		priv->hash_width = 9;
1051 
1052 		priv->hash_regs[0] = &regs->igaddr0;
1053 		priv->hash_regs[1] = &regs->igaddr1;
1054 		priv->hash_regs[2] = &regs->igaddr2;
1055 		priv->hash_regs[3] = &regs->igaddr3;
1056 		priv->hash_regs[4] = &regs->igaddr4;
1057 		priv->hash_regs[5] = &regs->igaddr5;
1058 		priv->hash_regs[6] = &regs->igaddr6;
1059 		priv->hash_regs[7] = &regs->igaddr7;
1060 		priv->hash_regs[8] = &regs->gaddr0;
1061 		priv->hash_regs[9] = &regs->gaddr1;
1062 		priv->hash_regs[10] = &regs->gaddr2;
1063 		priv->hash_regs[11] = &regs->gaddr3;
1064 		priv->hash_regs[12] = &regs->gaddr4;
1065 		priv->hash_regs[13] = &regs->gaddr5;
1066 		priv->hash_regs[14] = &regs->gaddr6;
1067 		priv->hash_regs[15] = &regs->gaddr7;
1068 
1069 	} else {
1070 		priv->extended_hash = 0;
1071 		priv->hash_width = 8;
1072 
1073 		priv->hash_regs[0] = &regs->gaddr0;
1074 		priv->hash_regs[1] = &regs->gaddr1;
1075 		priv->hash_regs[2] = &regs->gaddr2;
1076 		priv->hash_regs[3] = &regs->gaddr3;
1077 		priv->hash_regs[4] = &regs->gaddr4;
1078 		priv->hash_regs[5] = &regs->gaddr5;
1079 		priv->hash_regs[6] = &regs->gaddr6;
1080 		priv->hash_regs[7] = &regs->gaddr7;
1081 	}
1082 
1083 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
1084 		priv->padding = DEFAULT_PADDING;
1085 	else
1086 		priv->padding = 0;
1087 
1088 	if (dev->features & NETIF_F_IP_CSUM ||
1089 	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1090 		dev->needed_headroom = GMAC_FCB_LEN;
1091 
1092 	/* Program the isrg regs only if number of grps > 1 */
1093 	if (priv->num_grps > 1) {
1094 		baddr = &regs->isrg0;
1095 		for (i = 0; i < priv->num_grps; i++) {
1096 			isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
1097 			isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
1098 			gfar_write(baddr, isrg);
1099 			baddr++;
1100 			isrg = 0x0;
1101 		}
1102 	}
1103 
1104 	/* Need to reverse the bit maps as  bit_map's MSB is q0
1105 	 * but, for_each_set_bit parses from right to left, which
1106 	 * basically reverses the queue numbers
1107 	 */
1108 	for (i = 0; i< priv->num_grps; i++) {
1109 		priv->gfargrp[i].tx_bit_map =
1110 			reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1111 		priv->gfargrp[i].rx_bit_map =
1112 			reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1113 	}
1114 
1115 	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1116 	 * also assign queues to groups
1117 	 */
1118 	for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1119 		priv->gfargrp[grp_idx].num_rx_queues = 0x0;
1120 
1121 		for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
1122 				 priv->num_rx_queues) {
1123 			priv->gfargrp[grp_idx].num_rx_queues++;
1124 			priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1125 			rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1126 			rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1127 		}
1128 		priv->gfargrp[grp_idx].num_tx_queues = 0x0;
1129 
1130 		for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
1131 				 priv->num_tx_queues) {
1132 			priv->gfargrp[grp_idx].num_tx_queues++;
1133 			priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1134 			tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1135 			tqueue = tqueue | (TQUEUE_EN0 >> i);
1136 		}
1137 		priv->gfargrp[grp_idx].rstat = rstat;
1138 		priv->gfargrp[grp_idx].tstat = tstat;
1139 		rstat = tstat =0;
1140 	}
1141 
1142 	gfar_write(&regs->rqueue, rqueue);
1143 	gfar_write(&regs->tqueue, tqueue);
1144 
1145 	priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1146 
1147 	/* Initializing some of the rx/tx queue level parameters */
1148 	for (i = 0; i < priv->num_tx_queues; i++) {
1149 		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1150 		priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1151 		priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1152 		priv->tx_queue[i]->txic = DEFAULT_TXIC;
1153 	}
1154 
1155 	for (i = 0; i < priv->num_rx_queues; i++) {
1156 		priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1157 		priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1158 		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1159 	}
1160 
1161 	/* always enable rx filer */
1162 	priv->rx_filer_enable = 1;
1163 	/* Enable most messages by default */
1164 	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1165 	/* use pritority h/w tx queue scheduling for single queue devices */
1166 	if (priv->num_tx_queues == 1)
1167 		priv->prio_sched_en = 1;
1168 
1169 	/* Carrier starts down, phylib will bring it up */
1170 	netif_carrier_off(dev);
1171 
1172 	err = register_netdev(dev);
1173 
1174 	if (err) {
1175 		pr_err("%s: Cannot register net device, aborting\n", dev->name);
1176 		goto register_fail;
1177 	}
1178 
1179 	device_init_wakeup(&dev->dev,
1180 			   priv->device_flags &
1181 			   FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1182 
1183 	/* fill out IRQ number and name fields */
1184 	for (i = 0; i < priv->num_grps; i++) {
1185 		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1186 			sprintf(priv->gfargrp[i].int_name_tx, "%s%s%c%s",
1187 				dev->name, "_g", '0' + i, "_tx");
1188 			sprintf(priv->gfargrp[i].int_name_rx, "%s%s%c%s",
1189 				dev->name, "_g", '0' + i, "_rx");
1190 			sprintf(priv->gfargrp[i].int_name_er, "%s%s%c%s",
1191 				dev->name, "_g", '0' + i, "_er");
1192 		} else
1193 			strcpy(priv->gfargrp[i].int_name_tx, dev->name);
1194 	}
1195 
1196 	/* Initialize the filer table */
1197 	gfar_init_filer_table(priv);
1198 
1199 	/* Create all the sysfs files */
1200 	gfar_init_sysfs(dev);
1201 
1202 	/* Print out the device info */
1203 	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1204 
1205 	/* Even more device info helps when determining which kernel
1206 	 * provided which set of benchmarks.
1207 	 */
1208 	netdev_info(dev, "Running with NAPI enabled\n");
1209 	for (i = 0; i < priv->num_rx_queues; i++)
1210 		netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1211 			    i, priv->rx_queue[i]->rx_ring_size);
1212 	for (i = 0; i < priv->num_tx_queues; i++)
1213 		netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1214 			    i, priv->tx_queue[i]->tx_ring_size);
1215 
1216 	return 0;
1217 
1218 register_fail:
1219 	unmap_group_regs(priv);
1220 	free_tx_pointers(priv);
1221 	free_rx_pointers(priv);
1222 	if (priv->phy_node)
1223 		of_node_put(priv->phy_node);
1224 	if (priv->tbi_node)
1225 		of_node_put(priv->tbi_node);
1226 	free_netdev(dev);
1227 	return err;
1228 }
1229 
1230 static int gfar_remove(struct platform_device *ofdev)
1231 {
1232 	struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
1233 
1234 	if (priv->phy_node)
1235 		of_node_put(priv->phy_node);
1236 	if (priv->tbi_node)
1237 		of_node_put(priv->tbi_node);
1238 
1239 	dev_set_drvdata(&ofdev->dev, NULL);
1240 
1241 	unregister_netdev(priv->ndev);
1242 	unmap_group_regs(priv);
1243 	free_netdev(priv->ndev);
1244 
1245 	return 0;
1246 }
1247 
1248 #ifdef CONFIG_PM
1249 
1250 static int gfar_suspend(struct device *dev)
1251 {
1252 	struct gfar_private *priv = dev_get_drvdata(dev);
1253 	struct net_device *ndev = priv->ndev;
1254 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1255 	unsigned long flags;
1256 	u32 tempval;
1257 
1258 	int magic_packet = priv->wol_en &&
1259 			   (priv->device_flags &
1260 			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1261 
1262 	netif_device_detach(ndev);
1263 
1264 	if (netif_running(ndev)) {
1265 
1266 		local_irq_save(flags);
1267 		lock_tx_qs(priv);
1268 		lock_rx_qs(priv);
1269 
1270 		gfar_halt_nodisable(ndev);
1271 
1272 		/* Disable Tx, and Rx if wake-on-LAN is disabled. */
1273 		tempval = gfar_read(&regs->maccfg1);
1274 
1275 		tempval &= ~MACCFG1_TX_EN;
1276 
1277 		if (!magic_packet)
1278 			tempval &= ~MACCFG1_RX_EN;
1279 
1280 		gfar_write(&regs->maccfg1, tempval);
1281 
1282 		unlock_rx_qs(priv);
1283 		unlock_tx_qs(priv);
1284 		local_irq_restore(flags);
1285 
1286 		disable_napi(priv);
1287 
1288 		if (magic_packet) {
1289 			/* Enable interrupt on Magic Packet */
1290 			gfar_write(&regs->imask, IMASK_MAG);
1291 
1292 			/* Enable Magic Packet mode */
1293 			tempval = gfar_read(&regs->maccfg2);
1294 			tempval |= MACCFG2_MPEN;
1295 			gfar_write(&regs->maccfg2, tempval);
1296 		} else {
1297 			phy_stop(priv->phydev);
1298 		}
1299 	}
1300 
1301 	return 0;
1302 }
1303 
1304 static int gfar_resume(struct device *dev)
1305 {
1306 	struct gfar_private *priv = dev_get_drvdata(dev);
1307 	struct net_device *ndev = priv->ndev;
1308 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1309 	unsigned long flags;
1310 	u32 tempval;
1311 	int magic_packet = priv->wol_en &&
1312 			   (priv->device_flags &
1313 			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1314 
1315 	if (!netif_running(ndev)) {
1316 		netif_device_attach(ndev);
1317 		return 0;
1318 	}
1319 
1320 	if (!magic_packet && priv->phydev)
1321 		phy_start(priv->phydev);
1322 
1323 	/* Disable Magic Packet mode, in case something
1324 	 * else woke us up.
1325 	 */
1326 	local_irq_save(flags);
1327 	lock_tx_qs(priv);
1328 	lock_rx_qs(priv);
1329 
1330 	tempval = gfar_read(&regs->maccfg2);
1331 	tempval &= ~MACCFG2_MPEN;
1332 	gfar_write(&regs->maccfg2, tempval);
1333 
1334 	gfar_start(ndev);
1335 
1336 	unlock_rx_qs(priv);
1337 	unlock_tx_qs(priv);
1338 	local_irq_restore(flags);
1339 
1340 	netif_device_attach(ndev);
1341 
1342 	enable_napi(priv);
1343 
1344 	return 0;
1345 }
1346 
1347 static int gfar_restore(struct device *dev)
1348 {
1349 	struct gfar_private *priv = dev_get_drvdata(dev);
1350 	struct net_device *ndev = priv->ndev;
1351 
1352 	if (!netif_running(ndev)) {
1353 		netif_device_attach(ndev);
1354 
1355 		return 0;
1356 	}
1357 
1358 	if (gfar_init_bds(ndev)) {
1359 		free_skb_resources(priv);
1360 		return -ENOMEM;
1361 	}
1362 
1363 	init_registers(ndev);
1364 	gfar_set_mac_address(ndev);
1365 	gfar_init_mac(ndev);
1366 	gfar_start(ndev);
1367 
1368 	priv->oldlink = 0;
1369 	priv->oldspeed = 0;
1370 	priv->oldduplex = -1;
1371 
1372 	if (priv->phydev)
1373 		phy_start(priv->phydev);
1374 
1375 	netif_device_attach(ndev);
1376 	enable_napi(priv);
1377 
1378 	return 0;
1379 }
1380 
1381 static struct dev_pm_ops gfar_pm_ops = {
1382 	.suspend = gfar_suspend,
1383 	.resume = gfar_resume,
1384 	.freeze = gfar_suspend,
1385 	.thaw = gfar_resume,
1386 	.restore = gfar_restore,
1387 };
1388 
1389 #define GFAR_PM_OPS (&gfar_pm_ops)
1390 
1391 #else
1392 
1393 #define GFAR_PM_OPS NULL
1394 
1395 #endif
1396 
1397 /* Reads the controller's registers to determine what interface
1398  * connects it to the PHY.
1399  */
1400 static phy_interface_t gfar_get_interface(struct net_device *dev)
1401 {
1402 	struct gfar_private *priv = netdev_priv(dev);
1403 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1404 	u32 ecntrl;
1405 
1406 	ecntrl = gfar_read(&regs->ecntrl);
1407 
1408 	if (ecntrl & ECNTRL_SGMII_MODE)
1409 		return PHY_INTERFACE_MODE_SGMII;
1410 
1411 	if (ecntrl & ECNTRL_TBI_MODE) {
1412 		if (ecntrl & ECNTRL_REDUCED_MODE)
1413 			return PHY_INTERFACE_MODE_RTBI;
1414 		else
1415 			return PHY_INTERFACE_MODE_TBI;
1416 	}
1417 
1418 	if (ecntrl & ECNTRL_REDUCED_MODE) {
1419 		if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1420 			return PHY_INTERFACE_MODE_RMII;
1421 		}
1422 		else {
1423 			phy_interface_t interface = priv->interface;
1424 
1425 			/* This isn't autodetected right now, so it must
1426 			 * be set by the device tree or platform code.
1427 			 */
1428 			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1429 				return PHY_INTERFACE_MODE_RGMII_ID;
1430 
1431 			return PHY_INTERFACE_MODE_RGMII;
1432 		}
1433 	}
1434 
1435 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1436 		return PHY_INTERFACE_MODE_GMII;
1437 
1438 	return PHY_INTERFACE_MODE_MII;
1439 }
1440 
1441 
1442 /* Initializes driver's PHY state, and attaches to the PHY.
1443  * Returns 0 on success.
1444  */
1445 static int init_phy(struct net_device *dev)
1446 {
1447 	struct gfar_private *priv = netdev_priv(dev);
1448 	uint gigabit_support =
1449 		priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1450 		SUPPORTED_1000baseT_Full : 0;
1451 	phy_interface_t interface;
1452 
1453 	priv->oldlink = 0;
1454 	priv->oldspeed = 0;
1455 	priv->oldduplex = -1;
1456 
1457 	interface = gfar_get_interface(dev);
1458 
1459 	priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1460 				      interface);
1461 	if (!priv->phydev)
1462 		priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1463 							 interface);
1464 	if (!priv->phydev) {
1465 		dev_err(&dev->dev, "could not attach to PHY\n");
1466 		return -ENODEV;
1467 	}
1468 
1469 	if (interface == PHY_INTERFACE_MODE_SGMII)
1470 		gfar_configure_serdes(dev);
1471 
1472 	/* Remove any features not supported by the controller */
1473 	priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1474 	priv->phydev->advertising = priv->phydev->supported;
1475 
1476 	return 0;
1477 }
1478 
1479 /* Initialize TBI PHY interface for communicating with the
1480  * SERDES lynx PHY on the chip.  We communicate with this PHY
1481  * through the MDIO bus on each controller, treating it as a
1482  * "normal" PHY at the address found in the TBIPA register.  We assume
1483  * that the TBIPA register is valid.  Either the MDIO bus code will set
1484  * it to a value that doesn't conflict with other PHYs on the bus, or the
1485  * value doesn't matter, as there are no other PHYs on the bus.
1486  */
1487 static void gfar_configure_serdes(struct net_device *dev)
1488 {
1489 	struct gfar_private *priv = netdev_priv(dev);
1490 	struct phy_device *tbiphy;
1491 
1492 	if (!priv->tbi_node) {
1493 		dev_warn(&dev->dev, "error: SGMII mode requires that the "
1494 				    "device tree specify a tbi-handle\n");
1495 		return;
1496 	}
1497 
1498 	tbiphy = of_phy_find_device(priv->tbi_node);
1499 	if (!tbiphy) {
1500 		dev_err(&dev->dev, "error: Could not get TBI device\n");
1501 		return;
1502 	}
1503 
1504 	/* If the link is already up, we must already be ok, and don't need to
1505 	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
1506 	 * everything for us?  Resetting it takes the link down and requires
1507 	 * several seconds for it to come back.
1508 	 */
1509 	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1510 		return;
1511 
1512 	/* Single clk mode, mii mode off(for serdes communication) */
1513 	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1514 
1515 	phy_write(tbiphy, MII_ADVERTISE,
1516 		  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1517 		  ADVERTISE_1000XPSE_ASYM);
1518 
1519 	phy_write(tbiphy, MII_BMCR,
1520 		  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1521 		  BMCR_SPEED1000);
1522 }
1523 
1524 static void init_registers(struct net_device *dev)
1525 {
1526 	struct gfar_private *priv = netdev_priv(dev);
1527 	struct gfar __iomem *regs = NULL;
1528 	int i;
1529 
1530 	for (i = 0; i < priv->num_grps; i++) {
1531 		regs = priv->gfargrp[i].regs;
1532 		/* Clear IEVENT */
1533 		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1534 
1535 		/* Initialize IMASK */
1536 		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1537 	}
1538 
1539 	regs = priv->gfargrp[0].regs;
1540 	/* Init hash registers to zero */
1541 	gfar_write(&regs->igaddr0, 0);
1542 	gfar_write(&regs->igaddr1, 0);
1543 	gfar_write(&regs->igaddr2, 0);
1544 	gfar_write(&regs->igaddr3, 0);
1545 	gfar_write(&regs->igaddr4, 0);
1546 	gfar_write(&regs->igaddr5, 0);
1547 	gfar_write(&regs->igaddr6, 0);
1548 	gfar_write(&regs->igaddr7, 0);
1549 
1550 	gfar_write(&regs->gaddr0, 0);
1551 	gfar_write(&regs->gaddr1, 0);
1552 	gfar_write(&regs->gaddr2, 0);
1553 	gfar_write(&regs->gaddr3, 0);
1554 	gfar_write(&regs->gaddr4, 0);
1555 	gfar_write(&regs->gaddr5, 0);
1556 	gfar_write(&regs->gaddr6, 0);
1557 	gfar_write(&regs->gaddr7, 0);
1558 
1559 	/* Zero out the rmon mib registers if it has them */
1560 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1561 		memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
1562 
1563 		/* Mask off the CAM interrupts */
1564 		gfar_write(&regs->rmon.cam1, 0xffffffff);
1565 		gfar_write(&regs->rmon.cam2, 0xffffffff);
1566 	}
1567 
1568 	/* Initialize the max receive buffer length */
1569 	gfar_write(&regs->mrblr, priv->rx_buffer_size);
1570 
1571 	/* Initialize the Minimum Frame Length Register */
1572 	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1573 }
1574 
1575 static int __gfar_is_rx_idle(struct gfar_private *priv)
1576 {
1577 	u32 res;
1578 
1579 	/* Normaly TSEC should not hang on GRS commands, so we should
1580 	 * actually wait for IEVENT_GRSC flag.
1581 	 */
1582 	if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
1583 		return 0;
1584 
1585 	/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1586 	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1587 	 * and the Rx can be safely reset.
1588 	 */
1589 	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1590 	res &= 0x7f807f80;
1591 	if ((res & 0xffff) == (res >> 16))
1592 		return 1;
1593 
1594 	return 0;
1595 }
1596 
1597 /* Halt the receive and transmit queues */
1598 static void gfar_halt_nodisable(struct net_device *dev)
1599 {
1600 	struct gfar_private *priv = netdev_priv(dev);
1601 	struct gfar __iomem *regs = NULL;
1602 	u32 tempval;
1603 	int i;
1604 
1605 	for (i = 0; i < priv->num_grps; i++) {
1606 		regs = priv->gfargrp[i].regs;
1607 		/* Mask all interrupts */
1608 		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1609 
1610 		/* Clear all interrupts */
1611 		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1612 	}
1613 
1614 	regs = priv->gfargrp[0].regs;
1615 	/* Stop the DMA, and wait for it to stop */
1616 	tempval = gfar_read(&regs->dmactrl);
1617 	if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
1618 	    (DMACTRL_GRS | DMACTRL_GTS)) {
1619 		int ret;
1620 
1621 		tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1622 		gfar_write(&regs->dmactrl, tempval);
1623 
1624 		do {
1625 			ret = spin_event_timeout(((gfar_read(&regs->ievent) &
1626 				 (IEVENT_GRSC | IEVENT_GTSC)) ==
1627 				 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1628 			if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
1629 				ret = __gfar_is_rx_idle(priv);
1630 		} while (!ret);
1631 	}
1632 }
1633 
1634 /* Halt the receive and transmit queues */
1635 void gfar_halt(struct net_device *dev)
1636 {
1637 	struct gfar_private *priv = netdev_priv(dev);
1638 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1639 	u32 tempval;
1640 
1641 	gfar_halt_nodisable(dev);
1642 
1643 	/* Disable Rx and Tx */
1644 	tempval = gfar_read(&regs->maccfg1);
1645 	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1646 	gfar_write(&regs->maccfg1, tempval);
1647 }
1648 
1649 static void free_grp_irqs(struct gfar_priv_grp *grp)
1650 {
1651 	free_irq(grp->interruptError, grp);
1652 	free_irq(grp->interruptTransmit, grp);
1653 	free_irq(grp->interruptReceive, grp);
1654 }
1655 
1656 void stop_gfar(struct net_device *dev)
1657 {
1658 	struct gfar_private *priv = netdev_priv(dev);
1659 	unsigned long flags;
1660 	int i;
1661 
1662 	phy_stop(priv->phydev);
1663 
1664 
1665 	/* Lock it down */
1666 	local_irq_save(flags);
1667 	lock_tx_qs(priv);
1668 	lock_rx_qs(priv);
1669 
1670 	gfar_halt(dev);
1671 
1672 	unlock_rx_qs(priv);
1673 	unlock_tx_qs(priv);
1674 	local_irq_restore(flags);
1675 
1676 	/* Free the IRQs */
1677 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1678 		for (i = 0; i < priv->num_grps; i++)
1679 			free_grp_irqs(&priv->gfargrp[i]);
1680 	} else {
1681 		for (i = 0; i < priv->num_grps; i++)
1682 			free_irq(priv->gfargrp[i].interruptTransmit,
1683 				 &priv->gfargrp[i]);
1684 	}
1685 
1686 	free_skb_resources(priv);
1687 }
1688 
1689 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1690 {
1691 	struct txbd8 *txbdp;
1692 	struct gfar_private *priv = netdev_priv(tx_queue->dev);
1693 	int i, j;
1694 
1695 	txbdp = tx_queue->tx_bd_base;
1696 
1697 	for (i = 0; i < tx_queue->tx_ring_size; i++) {
1698 		if (!tx_queue->tx_skbuff[i])
1699 			continue;
1700 
1701 		dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
1702 				 txbdp->length, DMA_TO_DEVICE);
1703 		txbdp->lstatus = 0;
1704 		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1705 		     j++) {
1706 			txbdp++;
1707 			dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
1708 				       txbdp->length, DMA_TO_DEVICE);
1709 		}
1710 		txbdp++;
1711 		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1712 		tx_queue->tx_skbuff[i] = NULL;
1713 	}
1714 	kfree(tx_queue->tx_skbuff);
1715 	tx_queue->tx_skbuff = NULL;
1716 }
1717 
1718 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1719 {
1720 	struct rxbd8 *rxbdp;
1721 	struct gfar_private *priv = netdev_priv(rx_queue->dev);
1722 	int i;
1723 
1724 	rxbdp = rx_queue->rx_bd_base;
1725 
1726 	for (i = 0; i < rx_queue->rx_ring_size; i++) {
1727 		if (rx_queue->rx_skbuff[i]) {
1728 			dma_unmap_single(&priv->ofdev->dev,
1729 					 rxbdp->bufPtr, priv->rx_buffer_size,
1730 					 DMA_FROM_DEVICE);
1731 			dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1732 			rx_queue->rx_skbuff[i] = NULL;
1733 		}
1734 		rxbdp->lstatus = 0;
1735 		rxbdp->bufPtr = 0;
1736 		rxbdp++;
1737 	}
1738 	kfree(rx_queue->rx_skbuff);
1739 	rx_queue->rx_skbuff = NULL;
1740 }
1741 
1742 /* If there are any tx skbs or rx skbs still around, free them.
1743  * Then free tx_skbuff and rx_skbuff
1744  */
1745 static void free_skb_resources(struct gfar_private *priv)
1746 {
1747 	struct gfar_priv_tx_q *tx_queue = NULL;
1748 	struct gfar_priv_rx_q *rx_queue = NULL;
1749 	int i;
1750 
1751 	/* Go through all the buffer descriptors and free their data buffers */
1752 	for (i = 0; i < priv->num_tx_queues; i++) {
1753 		struct netdev_queue *txq;
1754 
1755 		tx_queue = priv->tx_queue[i];
1756 		txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1757 		if (tx_queue->tx_skbuff)
1758 			free_skb_tx_queue(tx_queue);
1759 		netdev_tx_reset_queue(txq);
1760 	}
1761 
1762 	for (i = 0; i < priv->num_rx_queues; i++) {
1763 		rx_queue = priv->rx_queue[i];
1764 		if (rx_queue->rx_skbuff)
1765 			free_skb_rx_queue(rx_queue);
1766 	}
1767 
1768 	dma_free_coherent(&priv->ofdev->dev,
1769 			  sizeof(struct txbd8) * priv->total_tx_ring_size +
1770 			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
1771 			  priv->tx_queue[0]->tx_bd_base,
1772 			  priv->tx_queue[0]->tx_bd_dma_base);
1773 }
1774 
1775 void gfar_start(struct net_device *dev)
1776 {
1777 	struct gfar_private *priv = netdev_priv(dev);
1778 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1779 	u32 tempval;
1780 	int i = 0;
1781 
1782 	/* Enable Rx and Tx in MACCFG1 */
1783 	tempval = gfar_read(&regs->maccfg1);
1784 	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1785 	gfar_write(&regs->maccfg1, tempval);
1786 
1787 	/* Initialize DMACTRL to have WWR and WOP */
1788 	tempval = gfar_read(&regs->dmactrl);
1789 	tempval |= DMACTRL_INIT_SETTINGS;
1790 	gfar_write(&regs->dmactrl, tempval);
1791 
1792 	/* Make sure we aren't stopped */
1793 	tempval = gfar_read(&regs->dmactrl);
1794 	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1795 	gfar_write(&regs->dmactrl, tempval);
1796 
1797 	for (i = 0; i < priv->num_grps; i++) {
1798 		regs = priv->gfargrp[i].regs;
1799 		/* Clear THLT/RHLT, so that the DMA starts polling now */
1800 		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1801 		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1802 		/* Unmask the interrupts we look for */
1803 		gfar_write(&regs->imask, IMASK_DEFAULT);
1804 	}
1805 
1806 	dev->trans_start = jiffies; /* prevent tx timeout */
1807 }
1808 
1809 void gfar_configure_coalescing(struct gfar_private *priv,
1810 			       unsigned long tx_mask, unsigned long rx_mask)
1811 {
1812 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1813 	u32 __iomem *baddr;
1814 	int i = 0;
1815 
1816 	/* Backward compatible case ---- even if we enable
1817 	 * multiple queues, there's only single reg to program
1818 	 */
1819 	gfar_write(&regs->txic, 0);
1820 	if (likely(priv->tx_queue[0]->txcoalescing))
1821 		gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1822 
1823 	gfar_write(&regs->rxic, 0);
1824 	if (unlikely(priv->rx_queue[0]->rxcoalescing))
1825 		gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1826 
1827 	if (priv->mode == MQ_MG_MODE) {
1828 		baddr = &regs->txic0;
1829 		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
1830 			gfar_write(baddr + i, 0);
1831 			if (likely(priv->tx_queue[i]->txcoalescing))
1832 				gfar_write(baddr + i, priv->tx_queue[i]->txic);
1833 		}
1834 
1835 		baddr = &regs->rxic0;
1836 		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
1837 			gfar_write(baddr + i, 0);
1838 			if (likely(priv->rx_queue[i]->rxcoalescing))
1839 				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1840 		}
1841 	}
1842 }
1843 
1844 static int register_grp_irqs(struct gfar_priv_grp *grp)
1845 {
1846 	struct gfar_private *priv = grp->priv;
1847 	struct net_device *dev = priv->ndev;
1848 	int err;
1849 
1850 	/* If the device has multiple interrupts, register for
1851 	 * them.  Otherwise, only register for the one
1852 	 */
1853 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1854 		/* Install our interrupt handlers for Error,
1855 		 * Transmit, and Receive
1856 		 */
1857 		if ((err = request_irq(grp->interruptError, gfar_error,
1858 				       0, grp->int_name_er, grp)) < 0) {
1859 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1860 				  grp->interruptError);
1861 
1862 			goto err_irq_fail;
1863 		}
1864 
1865 		if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1866 				       0, grp->int_name_tx, grp)) < 0) {
1867 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1868 				  grp->interruptTransmit);
1869 			goto tx_irq_fail;
1870 		}
1871 
1872 		if ((err = request_irq(grp->interruptReceive, gfar_receive,
1873 				       0, grp->int_name_rx, grp)) < 0) {
1874 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1875 				  grp->interruptReceive);
1876 			goto rx_irq_fail;
1877 		}
1878 	} else {
1879 		if ((err = request_irq(grp->interruptTransmit, gfar_interrupt,
1880 				       0, grp->int_name_tx, grp)) < 0) {
1881 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1882 				  grp->interruptTransmit);
1883 			goto err_irq_fail;
1884 		}
1885 	}
1886 
1887 	return 0;
1888 
1889 rx_irq_fail:
1890 	free_irq(grp->interruptTransmit, grp);
1891 tx_irq_fail:
1892 	free_irq(grp->interruptError, grp);
1893 err_irq_fail:
1894 	return err;
1895 
1896 }
1897 
1898 /* Bring the controller up and running */
1899 int startup_gfar(struct net_device *ndev)
1900 {
1901 	struct gfar_private *priv = netdev_priv(ndev);
1902 	struct gfar __iomem *regs = NULL;
1903 	int err, i, j;
1904 
1905 	for (i = 0; i < priv->num_grps; i++) {
1906 		regs= priv->gfargrp[i].regs;
1907 		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1908 	}
1909 
1910 	regs= priv->gfargrp[0].regs;
1911 	err = gfar_alloc_skb_resources(ndev);
1912 	if (err)
1913 		return err;
1914 
1915 	gfar_init_mac(ndev);
1916 
1917 	for (i = 0; i < priv->num_grps; i++) {
1918 		err = register_grp_irqs(&priv->gfargrp[i]);
1919 		if (err) {
1920 			for (j = 0; j < i; j++)
1921 				free_grp_irqs(&priv->gfargrp[j]);
1922 			goto irq_fail;
1923 		}
1924 	}
1925 
1926 	/* Start the controller */
1927 	gfar_start(ndev);
1928 
1929 	phy_start(priv->phydev);
1930 
1931 	gfar_configure_coalescing(priv, 0xFF, 0xFF);
1932 
1933 	return 0;
1934 
1935 irq_fail:
1936 	free_skb_resources(priv);
1937 	return err;
1938 }
1939 
1940 /* Called when something needs to use the ethernet device
1941  * Returns 0 for success.
1942  */
1943 static int gfar_enet_open(struct net_device *dev)
1944 {
1945 	struct gfar_private *priv = netdev_priv(dev);
1946 	int err;
1947 
1948 	enable_napi(priv);
1949 
1950 	/* Initialize a bunch of registers */
1951 	init_registers(dev);
1952 
1953 	gfar_set_mac_address(dev);
1954 
1955 	err = init_phy(dev);
1956 
1957 	if (err) {
1958 		disable_napi(priv);
1959 		return err;
1960 	}
1961 
1962 	err = startup_gfar(dev);
1963 	if (err) {
1964 		disable_napi(priv);
1965 		return err;
1966 	}
1967 
1968 	netif_tx_start_all_queues(dev);
1969 
1970 	device_set_wakeup_enable(&dev->dev, priv->wol_en);
1971 
1972 	return err;
1973 }
1974 
1975 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1976 {
1977 	struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
1978 
1979 	memset(fcb, 0, GMAC_FCB_LEN);
1980 
1981 	return fcb;
1982 }
1983 
1984 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
1985 				    int fcb_length)
1986 {
1987 	/* If we're here, it's a IP packet with a TCP or UDP
1988 	 * payload.  We set it to checksum, using a pseudo-header
1989 	 * we provide
1990 	 */
1991 	u8 flags = TXFCB_DEFAULT;
1992 
1993 	/* Tell the controller what the protocol is
1994 	 * And provide the already calculated phcs
1995 	 */
1996 	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1997 		flags |= TXFCB_UDP;
1998 		fcb->phcs = udp_hdr(skb)->check;
1999 	} else
2000 		fcb->phcs = tcp_hdr(skb)->check;
2001 
2002 	/* l3os is the distance between the start of the
2003 	 * frame (skb->data) and the start of the IP hdr.
2004 	 * l4os is the distance between the start of the
2005 	 * l3 hdr and the l4 hdr
2006 	 */
2007 	fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
2008 	fcb->l4os = skb_network_header_len(skb);
2009 
2010 	fcb->flags = flags;
2011 }
2012 
2013 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2014 {
2015 	fcb->flags |= TXFCB_VLN;
2016 	fcb->vlctl = vlan_tx_tag_get(skb);
2017 }
2018 
2019 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2020 				      struct txbd8 *base, int ring_size)
2021 {
2022 	struct txbd8 *new_bd = bdp + stride;
2023 
2024 	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2025 }
2026 
2027 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2028 				      int ring_size)
2029 {
2030 	return skip_txbd(bdp, 1, base, ring_size);
2031 }
2032 
2033 /* This is called by the kernel when a frame is ready for transmission.
2034  * It is pointed to by the dev->hard_start_xmit function pointer
2035  */
2036 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2037 {
2038 	struct gfar_private *priv = netdev_priv(dev);
2039 	struct gfar_priv_tx_q *tx_queue = NULL;
2040 	struct netdev_queue *txq;
2041 	struct gfar __iomem *regs = NULL;
2042 	struct txfcb *fcb = NULL;
2043 	struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2044 	u32 lstatus;
2045 	int i, rq = 0, do_tstamp = 0;
2046 	u32 bufaddr;
2047 	unsigned long flags;
2048 	unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
2049 
2050 	/* TOE=1 frames larger than 2500 bytes may see excess delays
2051 	 * before start of transmission.
2052 	 */
2053 	if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
2054 		     skb->ip_summed == CHECKSUM_PARTIAL &&
2055 		     skb->len > 2500)) {
2056 		int ret;
2057 
2058 		ret = skb_checksum_help(skb);
2059 		if (ret)
2060 			return ret;
2061 	}
2062 
2063 	rq = skb->queue_mapping;
2064 	tx_queue = priv->tx_queue[rq];
2065 	txq = netdev_get_tx_queue(dev, rq);
2066 	base = tx_queue->tx_bd_base;
2067 	regs = tx_queue->grp->regs;
2068 
2069 	/* check if time stamp should be generated */
2070 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2071 		     priv->hwts_tx_en)) {
2072 		do_tstamp = 1;
2073 		fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2074 	}
2075 
2076 	/* make space for additional header when fcb is needed */
2077 	if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
2078 	     vlan_tx_tag_present(skb) ||
2079 	     unlikely(do_tstamp)) &&
2080 	    (skb_headroom(skb) < fcb_length)) {
2081 		struct sk_buff *skb_new;
2082 
2083 		skb_new = skb_realloc_headroom(skb, fcb_length);
2084 		if (!skb_new) {
2085 			dev->stats.tx_errors++;
2086 			kfree_skb(skb);
2087 			return NETDEV_TX_OK;
2088 		}
2089 
2090 		if (skb->sk)
2091 			skb_set_owner_w(skb_new, skb->sk);
2092 		consume_skb(skb);
2093 		skb = skb_new;
2094 	}
2095 
2096 	/* total number of fragments in the SKB */
2097 	nr_frags = skb_shinfo(skb)->nr_frags;
2098 
2099 	/* calculate the required number of TxBDs for this skb */
2100 	if (unlikely(do_tstamp))
2101 		nr_txbds = nr_frags + 2;
2102 	else
2103 		nr_txbds = nr_frags + 1;
2104 
2105 	/* check if there is space to queue this packet */
2106 	if (nr_txbds > tx_queue->num_txbdfree) {
2107 		/* no space, stop the queue */
2108 		netif_tx_stop_queue(txq);
2109 		dev->stats.tx_fifo_errors++;
2110 		return NETDEV_TX_BUSY;
2111 	}
2112 
2113 	/* Update transmit stats */
2114 	tx_queue->stats.tx_bytes += skb->len;
2115 	tx_queue->stats.tx_packets++;
2116 
2117 	txbdp = txbdp_start = tx_queue->cur_tx;
2118 	lstatus = txbdp->lstatus;
2119 
2120 	/* Time stamp insertion requires one additional TxBD */
2121 	if (unlikely(do_tstamp))
2122 		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2123 						 tx_queue->tx_ring_size);
2124 
2125 	if (nr_frags == 0) {
2126 		if (unlikely(do_tstamp))
2127 			txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2128 							  TXBD_INTERRUPT);
2129 		else
2130 			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2131 	} else {
2132 		/* Place the fragment addresses and lengths into the TxBDs */
2133 		for (i = 0; i < nr_frags; i++) {
2134 			/* Point at the next BD, wrapping as needed */
2135 			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2136 
2137 			length = skb_shinfo(skb)->frags[i].size;
2138 
2139 			lstatus = txbdp->lstatus | length |
2140 				  BD_LFLAG(TXBD_READY);
2141 
2142 			/* Handle the last BD specially */
2143 			if (i == nr_frags - 1)
2144 				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2145 
2146 			bufaddr = skb_frag_dma_map(&priv->ofdev->dev,
2147 						   &skb_shinfo(skb)->frags[i],
2148 						   0,
2149 						   length,
2150 						   DMA_TO_DEVICE);
2151 
2152 			/* set the TxBD length and buffer pointer */
2153 			txbdp->bufPtr = bufaddr;
2154 			txbdp->lstatus = lstatus;
2155 		}
2156 
2157 		lstatus = txbdp_start->lstatus;
2158 	}
2159 
2160 	/* Add TxPAL between FCB and frame if required */
2161 	if (unlikely(do_tstamp)) {
2162 		skb_push(skb, GMAC_TXPAL_LEN);
2163 		memset(skb->data, 0, GMAC_TXPAL_LEN);
2164 	}
2165 
2166 	/* Set up checksumming */
2167 	if (CHECKSUM_PARTIAL == skb->ip_summed) {
2168 		fcb = gfar_add_fcb(skb);
2169 		/* as specified by errata */
2170 		if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) &&
2171 			     ((unsigned long)fcb % 0x20) > 0x18)) {
2172 			__skb_pull(skb, GMAC_FCB_LEN);
2173 			skb_checksum_help(skb);
2174 		} else {
2175 			lstatus |= BD_LFLAG(TXBD_TOE);
2176 			gfar_tx_checksum(skb, fcb, fcb_length);
2177 		}
2178 	}
2179 
2180 	if (vlan_tx_tag_present(skb)) {
2181 		if (unlikely(NULL == fcb)) {
2182 			fcb = gfar_add_fcb(skb);
2183 			lstatus |= BD_LFLAG(TXBD_TOE);
2184 		}
2185 
2186 		gfar_tx_vlan(skb, fcb);
2187 	}
2188 
2189 	/* Setup tx hardware time stamping if requested */
2190 	if (unlikely(do_tstamp)) {
2191 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2192 		if (fcb == NULL)
2193 			fcb = gfar_add_fcb(skb);
2194 		fcb->ptp = 1;
2195 		lstatus |= BD_LFLAG(TXBD_TOE);
2196 	}
2197 
2198 	txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
2199 					     skb_headlen(skb), DMA_TO_DEVICE);
2200 
2201 	/* If time stamping is requested one additional TxBD must be set up. The
2202 	 * first TxBD points to the FCB and must have a data length of
2203 	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2204 	 * the full frame length.
2205 	 */
2206 	if (unlikely(do_tstamp)) {
2207 		txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
2208 		txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2209 					 (skb_headlen(skb) - fcb_length);
2210 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2211 	} else {
2212 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2213 	}
2214 
2215 	netdev_tx_sent_queue(txq, skb->len);
2216 
2217 	/* We can work in parallel with gfar_clean_tx_ring(), except
2218 	 * when modifying num_txbdfree. Note that we didn't grab the lock
2219 	 * when we were reading the num_txbdfree and checking for available
2220 	 * space, that's because outside of this function it can only grow,
2221 	 * and once we've got needed space, it cannot suddenly disappear.
2222 	 *
2223 	 * The lock also protects us from gfar_error(), which can modify
2224 	 * regs->tstat and thus retrigger the transfers, which is why we
2225 	 * also must grab the lock before setting ready bit for the first
2226 	 * to be transmitted BD.
2227 	 */
2228 	spin_lock_irqsave(&tx_queue->txlock, flags);
2229 
2230 	/* The powerpc-specific eieio() is used, as wmb() has too strong
2231 	 * semantics (it requires synchronization between cacheable and
2232 	 * uncacheable mappings, which eieio doesn't provide and which we
2233 	 * don't need), thus requiring a more expensive sync instruction.  At
2234 	 * some point, the set of architecture-independent barrier functions
2235 	 * should be expanded to include weaker barriers.
2236 	 */
2237 	eieio();
2238 
2239 	txbdp_start->lstatus = lstatus;
2240 
2241 	eieio(); /* force lstatus write before tx_skbuff */
2242 
2243 	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2244 
2245 	/* Update the current skb pointer to the next entry we will use
2246 	 * (wrapping if necessary)
2247 	 */
2248 	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2249 			      TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2250 
2251 	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2252 
2253 	/* reduce TxBD free count */
2254 	tx_queue->num_txbdfree -= (nr_txbds);
2255 
2256 	/* If the next BD still needs to be cleaned up, then the bds
2257 	 * are full.  We need to tell the kernel to stop sending us stuff.
2258 	 */
2259 	if (!tx_queue->num_txbdfree) {
2260 		netif_tx_stop_queue(txq);
2261 
2262 		dev->stats.tx_fifo_errors++;
2263 	}
2264 
2265 	/* Tell the DMA to go go go */
2266 	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2267 
2268 	/* Unlock priv */
2269 	spin_unlock_irqrestore(&tx_queue->txlock, flags);
2270 
2271 	return NETDEV_TX_OK;
2272 }
2273 
2274 /* Stops the kernel queue, and halts the controller */
2275 static int gfar_close(struct net_device *dev)
2276 {
2277 	struct gfar_private *priv = netdev_priv(dev);
2278 
2279 	disable_napi(priv);
2280 
2281 	cancel_work_sync(&priv->reset_task);
2282 	stop_gfar(dev);
2283 
2284 	/* Disconnect from the PHY */
2285 	phy_disconnect(priv->phydev);
2286 	priv->phydev = NULL;
2287 
2288 	netif_tx_stop_all_queues(dev);
2289 
2290 	return 0;
2291 }
2292 
2293 /* Changes the mac address if the controller is not running. */
2294 static int gfar_set_mac_address(struct net_device *dev)
2295 {
2296 	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2297 
2298 	return 0;
2299 }
2300 
2301 /* Check if rx parser should be activated */
2302 void gfar_check_rx_parser_mode(struct gfar_private *priv)
2303 {
2304 	struct gfar __iomem *regs;
2305 	u32 tempval;
2306 
2307 	regs = priv->gfargrp[0].regs;
2308 
2309 	tempval = gfar_read(&regs->rctrl);
2310 	/* If parse is no longer required, then disable parser */
2311 	if (tempval & RCTRL_REQ_PARSER)
2312 		tempval |= RCTRL_PRSDEP_INIT;
2313 	else
2314 		tempval &= ~RCTRL_PRSDEP_INIT;
2315 	gfar_write(&regs->rctrl, tempval);
2316 }
2317 
2318 /* Enables and disables VLAN insertion/extraction */
2319 void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
2320 {
2321 	struct gfar_private *priv = netdev_priv(dev);
2322 	struct gfar __iomem *regs = NULL;
2323 	unsigned long flags;
2324 	u32 tempval;
2325 
2326 	regs = priv->gfargrp[0].regs;
2327 	local_irq_save(flags);
2328 	lock_rx_qs(priv);
2329 
2330 	if (features & NETIF_F_HW_VLAN_TX) {
2331 		/* Enable VLAN tag insertion */
2332 		tempval = gfar_read(&regs->tctrl);
2333 		tempval |= TCTRL_VLINS;
2334 		gfar_write(&regs->tctrl, tempval);
2335 	} else {
2336 		/* Disable VLAN tag insertion */
2337 		tempval = gfar_read(&regs->tctrl);
2338 		tempval &= ~TCTRL_VLINS;
2339 		gfar_write(&regs->tctrl, tempval);
2340 	}
2341 
2342 	if (features & NETIF_F_HW_VLAN_RX) {
2343 		/* Enable VLAN tag extraction */
2344 		tempval = gfar_read(&regs->rctrl);
2345 		tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
2346 		gfar_write(&regs->rctrl, tempval);
2347 	} else {
2348 		/* Disable VLAN tag extraction */
2349 		tempval = gfar_read(&regs->rctrl);
2350 		tempval &= ~RCTRL_VLEX;
2351 		gfar_write(&regs->rctrl, tempval);
2352 
2353 		gfar_check_rx_parser_mode(priv);
2354 	}
2355 
2356 	gfar_change_mtu(dev, dev->mtu);
2357 
2358 	unlock_rx_qs(priv);
2359 	local_irq_restore(flags);
2360 }
2361 
2362 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2363 {
2364 	int tempsize, tempval;
2365 	struct gfar_private *priv = netdev_priv(dev);
2366 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
2367 	int oldsize = priv->rx_buffer_size;
2368 	int frame_size = new_mtu + ETH_HLEN;
2369 
2370 	if (gfar_is_vlan_on(priv))
2371 		frame_size += VLAN_HLEN;
2372 
2373 	if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2374 		netif_err(priv, drv, dev, "Invalid MTU setting\n");
2375 		return -EINVAL;
2376 	}
2377 
2378 	if (gfar_uses_fcb(priv))
2379 		frame_size += GMAC_FCB_LEN;
2380 
2381 	frame_size += priv->padding;
2382 
2383 	tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2384 		   INCREMENTAL_BUFFER_SIZE;
2385 
2386 	/* Only stop and start the controller if it isn't already
2387 	 * stopped, and we changed something
2388 	 */
2389 	if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2390 		stop_gfar(dev);
2391 
2392 	priv->rx_buffer_size = tempsize;
2393 
2394 	dev->mtu = new_mtu;
2395 
2396 	gfar_write(&regs->mrblr, priv->rx_buffer_size);
2397 	gfar_write(&regs->maxfrm, priv->rx_buffer_size);
2398 
2399 	/* If the mtu is larger than the max size for standard
2400 	 * ethernet frames (ie, a jumbo frame), then set maccfg2
2401 	 * to allow huge frames, and to check the length
2402 	 */
2403 	tempval = gfar_read(&regs->maccfg2);
2404 
2405 	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
2406 	    gfar_has_errata(priv, GFAR_ERRATA_74))
2407 		tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2408 	else
2409 		tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2410 
2411 	gfar_write(&regs->maccfg2, tempval);
2412 
2413 	if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2414 		startup_gfar(dev);
2415 
2416 	return 0;
2417 }
2418 
2419 /* gfar_reset_task gets scheduled when a packet has not been
2420  * transmitted after a set amount of time.
2421  * For now, assume that clearing out all the structures, and
2422  * starting over will fix the problem.
2423  */
2424 static void gfar_reset_task(struct work_struct *work)
2425 {
2426 	struct gfar_private *priv = container_of(work, struct gfar_private,
2427 						 reset_task);
2428 	struct net_device *dev = priv->ndev;
2429 
2430 	if (dev->flags & IFF_UP) {
2431 		netif_tx_stop_all_queues(dev);
2432 		stop_gfar(dev);
2433 		startup_gfar(dev);
2434 		netif_tx_start_all_queues(dev);
2435 	}
2436 
2437 	netif_tx_schedule_all(dev);
2438 }
2439 
2440 static void gfar_timeout(struct net_device *dev)
2441 {
2442 	struct gfar_private *priv = netdev_priv(dev);
2443 
2444 	dev->stats.tx_errors++;
2445 	schedule_work(&priv->reset_task);
2446 }
2447 
2448 static void gfar_align_skb(struct sk_buff *skb)
2449 {
2450 	/* We need the data buffer to be aligned properly.  We will reserve
2451 	 * as many bytes as needed to align the data properly
2452 	 */
2453 	skb_reserve(skb, RXBUF_ALIGNMENT -
2454 		    (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2455 }
2456 
2457 /* Interrupt Handler for Transmit complete */
2458 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2459 {
2460 	struct net_device *dev = tx_queue->dev;
2461 	struct netdev_queue *txq;
2462 	struct gfar_private *priv = netdev_priv(dev);
2463 	struct gfar_priv_rx_q *rx_queue = NULL;
2464 	struct txbd8 *bdp, *next = NULL;
2465 	struct txbd8 *lbdp = NULL;
2466 	struct txbd8 *base = tx_queue->tx_bd_base;
2467 	struct sk_buff *skb;
2468 	int skb_dirtytx;
2469 	int tx_ring_size = tx_queue->tx_ring_size;
2470 	int frags = 0, nr_txbds = 0;
2471 	int i;
2472 	int howmany = 0;
2473 	int tqi = tx_queue->qindex;
2474 	unsigned int bytes_sent = 0;
2475 	u32 lstatus;
2476 	size_t buflen;
2477 
2478 	rx_queue = priv->rx_queue[tqi];
2479 	txq = netdev_get_tx_queue(dev, tqi);
2480 	bdp = tx_queue->dirty_tx;
2481 	skb_dirtytx = tx_queue->skb_dirtytx;
2482 
2483 	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2484 		unsigned long flags;
2485 
2486 		frags = skb_shinfo(skb)->nr_frags;
2487 
2488 		/* When time stamping, one additional TxBD must be freed.
2489 		 * Also, we need to dma_unmap_single() the TxPAL.
2490 		 */
2491 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2492 			nr_txbds = frags + 2;
2493 		else
2494 			nr_txbds = frags + 1;
2495 
2496 		lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2497 
2498 		lstatus = lbdp->lstatus;
2499 
2500 		/* Only clean completed frames */
2501 		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2502 		    (lstatus & BD_LENGTH_MASK))
2503 			break;
2504 
2505 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2506 			next = next_txbd(bdp, base, tx_ring_size);
2507 			buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2508 		} else
2509 			buflen = bdp->length;
2510 
2511 		dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2512 				 buflen, DMA_TO_DEVICE);
2513 
2514 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2515 			struct skb_shared_hwtstamps shhwtstamps;
2516 			u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2517 
2518 			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2519 			shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2520 			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2521 			skb_tstamp_tx(skb, &shhwtstamps);
2522 			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2523 			bdp = next;
2524 		}
2525 
2526 		bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2527 		bdp = next_txbd(bdp, base, tx_ring_size);
2528 
2529 		for (i = 0; i < frags; i++) {
2530 			dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr,
2531 				       bdp->length, DMA_TO_DEVICE);
2532 			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2533 			bdp = next_txbd(bdp, base, tx_ring_size);
2534 		}
2535 
2536 		bytes_sent += skb->len;
2537 
2538 		dev_kfree_skb_any(skb);
2539 
2540 		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2541 
2542 		skb_dirtytx = (skb_dirtytx + 1) &
2543 			      TX_RING_MOD_MASK(tx_ring_size);
2544 
2545 		howmany++;
2546 		spin_lock_irqsave(&tx_queue->txlock, flags);
2547 		tx_queue->num_txbdfree += nr_txbds;
2548 		spin_unlock_irqrestore(&tx_queue->txlock, flags);
2549 	}
2550 
2551 	/* If we freed a buffer, we can restart transmission, if necessary */
2552 	if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree)
2553 		netif_wake_subqueue(dev, tqi);
2554 
2555 	/* Update dirty indicators */
2556 	tx_queue->skb_dirtytx = skb_dirtytx;
2557 	tx_queue->dirty_tx = bdp;
2558 
2559 	netdev_tx_completed_queue(txq, howmany, bytes_sent);
2560 
2561 	return howmany;
2562 }
2563 
2564 static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
2565 {
2566 	unsigned long flags;
2567 
2568 	spin_lock_irqsave(&gfargrp->grplock, flags);
2569 	if (napi_schedule_prep(&gfargrp->napi)) {
2570 		gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
2571 		__napi_schedule(&gfargrp->napi);
2572 	} else {
2573 		/* Clear IEVENT, so interrupts aren't called again
2574 		 * because of the packets that have already arrived.
2575 		 */
2576 		gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
2577 	}
2578 	spin_unlock_irqrestore(&gfargrp->grplock, flags);
2579 
2580 }
2581 
2582 /* Interrupt Handler for Transmit complete */
2583 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2584 {
2585 	gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2586 	return IRQ_HANDLED;
2587 }
2588 
2589 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2590 			   struct sk_buff *skb)
2591 {
2592 	struct net_device *dev = rx_queue->dev;
2593 	struct gfar_private *priv = netdev_priv(dev);
2594 	dma_addr_t buf;
2595 
2596 	buf = dma_map_single(&priv->ofdev->dev, skb->data,
2597 			     priv->rx_buffer_size, DMA_FROM_DEVICE);
2598 	gfar_init_rxbdp(rx_queue, bdp, buf);
2599 }
2600 
2601 static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2602 {
2603 	struct gfar_private *priv = netdev_priv(dev);
2604 	struct sk_buff *skb;
2605 
2606 	skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2607 	if (!skb)
2608 		return NULL;
2609 
2610 	gfar_align_skb(skb);
2611 
2612 	return skb;
2613 }
2614 
2615 struct sk_buff *gfar_new_skb(struct net_device *dev)
2616 {
2617 	return gfar_alloc_skb(dev);
2618 }
2619 
2620 static inline void count_errors(unsigned short status, struct net_device *dev)
2621 {
2622 	struct gfar_private *priv = netdev_priv(dev);
2623 	struct net_device_stats *stats = &dev->stats;
2624 	struct gfar_extra_stats *estats = &priv->extra_stats;
2625 
2626 	/* If the packet was truncated, none of the other errors matter */
2627 	if (status & RXBD_TRUNCATED) {
2628 		stats->rx_length_errors++;
2629 
2630 		estats->rx_trunc++;
2631 
2632 		return;
2633 	}
2634 	/* Count the errors, if there were any */
2635 	if (status & (RXBD_LARGE | RXBD_SHORT)) {
2636 		stats->rx_length_errors++;
2637 
2638 		if (status & RXBD_LARGE)
2639 			estats->rx_large++;
2640 		else
2641 			estats->rx_short++;
2642 	}
2643 	if (status & RXBD_NONOCTET) {
2644 		stats->rx_frame_errors++;
2645 		estats->rx_nonoctet++;
2646 	}
2647 	if (status & RXBD_CRCERR) {
2648 		estats->rx_crcerr++;
2649 		stats->rx_crc_errors++;
2650 	}
2651 	if (status & RXBD_OVERRUN) {
2652 		estats->rx_overrun++;
2653 		stats->rx_crc_errors++;
2654 	}
2655 }
2656 
2657 irqreturn_t gfar_receive(int irq, void *grp_id)
2658 {
2659 	gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2660 	return IRQ_HANDLED;
2661 }
2662 
2663 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2664 {
2665 	/* If valid headers were found, and valid sums
2666 	 * were verified, then we tell the kernel that no
2667 	 * checksumming is necessary.  Otherwise, it is [FIXME]
2668 	 */
2669 	if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2670 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2671 	else
2672 		skb_checksum_none_assert(skb);
2673 }
2674 
2675 
2676 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2677 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2678 			      int amount_pull, struct napi_struct *napi)
2679 {
2680 	struct gfar_private *priv = netdev_priv(dev);
2681 	struct rxfcb *fcb = NULL;
2682 
2683 	gro_result_t ret;
2684 
2685 	/* fcb is at the beginning if exists */
2686 	fcb = (struct rxfcb *)skb->data;
2687 
2688 	/* Remove the FCB from the skb
2689 	 * Remove the padded bytes, if there are any
2690 	 */
2691 	if (amount_pull) {
2692 		skb_record_rx_queue(skb, fcb->rq);
2693 		skb_pull(skb, amount_pull);
2694 	}
2695 
2696 	/* Get receive timestamp from the skb */
2697 	if (priv->hwts_rx_en) {
2698 		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2699 		u64 *ns = (u64 *) skb->data;
2700 
2701 		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2702 		shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2703 	}
2704 
2705 	if (priv->padding)
2706 		skb_pull(skb, priv->padding);
2707 
2708 	if (dev->features & NETIF_F_RXCSUM)
2709 		gfar_rx_checksum(skb, fcb);
2710 
2711 	/* Tell the skb what kind of packet this is */
2712 	skb->protocol = eth_type_trans(skb, dev);
2713 
2714 	/* There's need to check for NETIF_F_HW_VLAN_RX here.
2715 	 * Even if vlan rx accel is disabled, on some chips
2716 	 * RXFCB_VLN is pseudo randomly set.
2717 	 */
2718 	if (dev->features & NETIF_F_HW_VLAN_RX &&
2719 	    fcb->flags & RXFCB_VLN)
2720 		__vlan_hwaccel_put_tag(skb, fcb->vlctl);
2721 
2722 	/* Send the packet up the stack */
2723 	ret = napi_gro_receive(napi, skb);
2724 
2725 	if (GRO_DROP == ret)
2726 		priv->extra_stats.kernel_dropped++;
2727 
2728 	return 0;
2729 }
2730 
2731 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2732  * until the budget/quota has been reached. Returns the number
2733  * of frames handled
2734  */
2735 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2736 {
2737 	struct net_device *dev = rx_queue->dev;
2738 	struct rxbd8 *bdp, *base;
2739 	struct sk_buff *skb;
2740 	int pkt_len;
2741 	int amount_pull;
2742 	int howmany = 0;
2743 	struct gfar_private *priv = netdev_priv(dev);
2744 
2745 	/* Get the first full descriptor */
2746 	bdp = rx_queue->cur_rx;
2747 	base = rx_queue->rx_bd_base;
2748 
2749 	amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
2750 
2751 	while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2752 		struct sk_buff *newskb;
2753 
2754 		rmb();
2755 
2756 		/* Add another skb for the future */
2757 		newskb = gfar_new_skb(dev);
2758 
2759 		skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2760 
2761 		dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2762 				 priv->rx_buffer_size, DMA_FROM_DEVICE);
2763 
2764 		if (unlikely(!(bdp->status & RXBD_ERR) &&
2765 			     bdp->length > priv->rx_buffer_size))
2766 			bdp->status = RXBD_LARGE;
2767 
2768 		/* We drop the frame if we failed to allocate a new buffer */
2769 		if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2770 			     bdp->status & RXBD_ERR)) {
2771 			count_errors(bdp->status, dev);
2772 
2773 			if (unlikely(!newskb))
2774 				newskb = skb;
2775 			else if (skb)
2776 				dev_kfree_skb(skb);
2777 		} else {
2778 			/* Increment the number of packets */
2779 			rx_queue->stats.rx_packets++;
2780 			howmany++;
2781 
2782 			if (likely(skb)) {
2783 				pkt_len = bdp->length - ETH_FCS_LEN;
2784 				/* Remove the FCS from the packet length */
2785 				skb_put(skb, pkt_len);
2786 				rx_queue->stats.rx_bytes += pkt_len;
2787 				skb_record_rx_queue(skb, rx_queue->qindex);
2788 				gfar_process_frame(dev, skb, amount_pull,
2789 						   &rx_queue->grp->napi);
2790 
2791 			} else {
2792 				netif_warn(priv, rx_err, dev, "Missing skb!\n");
2793 				rx_queue->stats.rx_dropped++;
2794 				priv->extra_stats.rx_skbmissing++;
2795 			}
2796 
2797 		}
2798 
2799 		rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2800 
2801 		/* Setup the new bdp */
2802 		gfar_new_rxbdp(rx_queue, bdp, newskb);
2803 
2804 		/* Update to the next pointer */
2805 		bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2806 
2807 		/* update to point at the next skb */
2808 		rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2809 				      RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2810 	}
2811 
2812 	/* Update the current rxbd pointer to be the next one */
2813 	rx_queue->cur_rx = bdp;
2814 
2815 	return howmany;
2816 }
2817 
2818 static int gfar_poll(struct napi_struct *napi, int budget)
2819 {
2820 	struct gfar_priv_grp *gfargrp =
2821 		container_of(napi, struct gfar_priv_grp, napi);
2822 	struct gfar_private *priv = gfargrp->priv;
2823 	struct gfar __iomem *regs = gfargrp->regs;
2824 	struct gfar_priv_tx_q *tx_queue = NULL;
2825 	struct gfar_priv_rx_q *rx_queue = NULL;
2826 	int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
2827 	int tx_cleaned = 0, i, left_over_budget = budget;
2828 	unsigned long serviced_queues = 0;
2829 	int num_queues = 0;
2830 
2831 	num_queues = gfargrp->num_rx_queues;
2832 	budget_per_queue = budget/num_queues;
2833 
2834 	/* Clear IEVENT, so interrupts aren't called again
2835 	 * because of the packets that have already arrived
2836 	 */
2837 	gfar_write(&regs->ievent, IEVENT_RTX_MASK);
2838 
2839 	while (num_queues && left_over_budget) {
2840 		budget_per_queue = left_over_budget/num_queues;
2841 		left_over_budget = 0;
2842 
2843 		for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2844 			if (test_bit(i, &serviced_queues))
2845 				continue;
2846 			rx_queue = priv->rx_queue[i];
2847 			tx_queue = priv->tx_queue[rx_queue->qindex];
2848 
2849 			tx_cleaned += gfar_clean_tx_ring(tx_queue);
2850 			rx_cleaned_per_queue =
2851 				gfar_clean_rx_ring(rx_queue, budget_per_queue);
2852 			rx_cleaned += rx_cleaned_per_queue;
2853 			if (rx_cleaned_per_queue < budget_per_queue) {
2854 				left_over_budget = left_over_budget +
2855 					(budget_per_queue -
2856 					 rx_cleaned_per_queue);
2857 				set_bit(i, &serviced_queues);
2858 				num_queues--;
2859 			}
2860 		}
2861 	}
2862 
2863 	if (tx_cleaned)
2864 		return budget;
2865 
2866 	if (rx_cleaned < budget) {
2867 		napi_complete(napi);
2868 
2869 		/* Clear the halt bit in RSTAT */
2870 		gfar_write(&regs->rstat, gfargrp->rstat);
2871 
2872 		gfar_write(&regs->imask, IMASK_DEFAULT);
2873 
2874 		/* If we are coalescing interrupts, update the timer
2875 		 * Otherwise, clear it
2876 		 */
2877 		gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
2878 					  gfargrp->tx_bit_map);
2879 	}
2880 
2881 	return rx_cleaned;
2882 }
2883 
2884 #ifdef CONFIG_NET_POLL_CONTROLLER
2885 /* Polling 'interrupt' - used by things like netconsole to send skbs
2886  * without having to re-enable interrupts. It's not called while
2887  * the interrupt routine is executing.
2888  */
2889 static void gfar_netpoll(struct net_device *dev)
2890 {
2891 	struct gfar_private *priv = netdev_priv(dev);
2892 	int i;
2893 
2894 	/* If the device has multiple interrupts, run tx/rx */
2895 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2896 		for (i = 0; i < priv->num_grps; i++) {
2897 			disable_irq(priv->gfargrp[i].interruptTransmit);
2898 			disable_irq(priv->gfargrp[i].interruptReceive);
2899 			disable_irq(priv->gfargrp[i].interruptError);
2900 			gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2901 				       &priv->gfargrp[i]);
2902 			enable_irq(priv->gfargrp[i].interruptError);
2903 			enable_irq(priv->gfargrp[i].interruptReceive);
2904 			enable_irq(priv->gfargrp[i].interruptTransmit);
2905 		}
2906 	} else {
2907 		for (i = 0; i < priv->num_grps; i++) {
2908 			disable_irq(priv->gfargrp[i].interruptTransmit);
2909 			gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2910 				       &priv->gfargrp[i]);
2911 			enable_irq(priv->gfargrp[i].interruptTransmit);
2912 		}
2913 	}
2914 }
2915 #endif
2916 
2917 /* The interrupt handler for devices with one interrupt */
2918 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2919 {
2920 	struct gfar_priv_grp *gfargrp = grp_id;
2921 
2922 	/* Save ievent for future reference */
2923 	u32 events = gfar_read(&gfargrp->regs->ievent);
2924 
2925 	/* Check for reception */
2926 	if (events & IEVENT_RX_MASK)
2927 		gfar_receive(irq, grp_id);
2928 
2929 	/* Check for transmit completion */
2930 	if (events & IEVENT_TX_MASK)
2931 		gfar_transmit(irq, grp_id);
2932 
2933 	/* Check for errors */
2934 	if (events & IEVENT_ERR_MASK)
2935 		gfar_error(irq, grp_id);
2936 
2937 	return IRQ_HANDLED;
2938 }
2939 
2940 /* Called every time the controller might need to be made
2941  * aware of new link state.  The PHY code conveys this
2942  * information through variables in the phydev structure, and this
2943  * function converts those variables into the appropriate
2944  * register values, and can bring down the device if needed.
2945  */
2946 static void adjust_link(struct net_device *dev)
2947 {
2948 	struct gfar_private *priv = netdev_priv(dev);
2949 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
2950 	unsigned long flags;
2951 	struct phy_device *phydev = priv->phydev;
2952 	int new_state = 0;
2953 
2954 	local_irq_save(flags);
2955 	lock_tx_qs(priv);
2956 
2957 	if (phydev->link) {
2958 		u32 tempval = gfar_read(&regs->maccfg2);
2959 		u32 ecntrl = gfar_read(&regs->ecntrl);
2960 
2961 		/* Now we make sure that we can be in full duplex mode.
2962 		 * If not, we operate in half-duplex mode.
2963 		 */
2964 		if (phydev->duplex != priv->oldduplex) {
2965 			new_state = 1;
2966 			if (!(phydev->duplex))
2967 				tempval &= ~(MACCFG2_FULL_DUPLEX);
2968 			else
2969 				tempval |= MACCFG2_FULL_DUPLEX;
2970 
2971 			priv->oldduplex = phydev->duplex;
2972 		}
2973 
2974 		if (phydev->speed != priv->oldspeed) {
2975 			new_state = 1;
2976 			switch (phydev->speed) {
2977 			case 1000:
2978 				tempval =
2979 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
2980 
2981 				ecntrl &= ~(ECNTRL_R100);
2982 				break;
2983 			case 100:
2984 			case 10:
2985 				tempval =
2986 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
2987 
2988 				/* Reduced mode distinguishes
2989 				 * between 10 and 100
2990 				 */
2991 				if (phydev->speed == SPEED_100)
2992 					ecntrl |= ECNTRL_R100;
2993 				else
2994 					ecntrl &= ~(ECNTRL_R100);
2995 				break;
2996 			default:
2997 				netif_warn(priv, link, dev,
2998 					   "Ack!  Speed (%d) is not 10/100/1000!\n",
2999 					   phydev->speed);
3000 				break;
3001 			}
3002 
3003 			priv->oldspeed = phydev->speed;
3004 		}
3005 
3006 		gfar_write(&regs->maccfg2, tempval);
3007 		gfar_write(&regs->ecntrl, ecntrl);
3008 
3009 		if (!priv->oldlink) {
3010 			new_state = 1;
3011 			priv->oldlink = 1;
3012 		}
3013 	} else if (priv->oldlink) {
3014 		new_state = 1;
3015 		priv->oldlink = 0;
3016 		priv->oldspeed = 0;
3017 		priv->oldduplex = -1;
3018 	}
3019 
3020 	if (new_state && netif_msg_link(priv))
3021 		phy_print_status(phydev);
3022 	unlock_tx_qs(priv);
3023 	local_irq_restore(flags);
3024 }
3025 
3026 /* Update the hash table based on the current list of multicast
3027  * addresses we subscribe to.  Also, change the promiscuity of
3028  * the device based on the flags (this function is called
3029  * whenever dev->flags is changed
3030  */
3031 static void gfar_set_multi(struct net_device *dev)
3032 {
3033 	struct netdev_hw_addr *ha;
3034 	struct gfar_private *priv = netdev_priv(dev);
3035 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3036 	u32 tempval;
3037 
3038 	if (dev->flags & IFF_PROMISC) {
3039 		/* Set RCTRL to PROM */
3040 		tempval = gfar_read(&regs->rctrl);
3041 		tempval |= RCTRL_PROM;
3042 		gfar_write(&regs->rctrl, tempval);
3043 	} else {
3044 		/* Set RCTRL to not PROM */
3045 		tempval = gfar_read(&regs->rctrl);
3046 		tempval &= ~(RCTRL_PROM);
3047 		gfar_write(&regs->rctrl, tempval);
3048 	}
3049 
3050 	if (dev->flags & IFF_ALLMULTI) {
3051 		/* Set the hash to rx all multicast frames */
3052 		gfar_write(&regs->igaddr0, 0xffffffff);
3053 		gfar_write(&regs->igaddr1, 0xffffffff);
3054 		gfar_write(&regs->igaddr2, 0xffffffff);
3055 		gfar_write(&regs->igaddr3, 0xffffffff);
3056 		gfar_write(&regs->igaddr4, 0xffffffff);
3057 		gfar_write(&regs->igaddr5, 0xffffffff);
3058 		gfar_write(&regs->igaddr6, 0xffffffff);
3059 		gfar_write(&regs->igaddr7, 0xffffffff);
3060 		gfar_write(&regs->gaddr0, 0xffffffff);
3061 		gfar_write(&regs->gaddr1, 0xffffffff);
3062 		gfar_write(&regs->gaddr2, 0xffffffff);
3063 		gfar_write(&regs->gaddr3, 0xffffffff);
3064 		gfar_write(&regs->gaddr4, 0xffffffff);
3065 		gfar_write(&regs->gaddr5, 0xffffffff);
3066 		gfar_write(&regs->gaddr6, 0xffffffff);
3067 		gfar_write(&regs->gaddr7, 0xffffffff);
3068 	} else {
3069 		int em_num;
3070 		int idx;
3071 
3072 		/* zero out the hash */
3073 		gfar_write(&regs->igaddr0, 0x0);
3074 		gfar_write(&regs->igaddr1, 0x0);
3075 		gfar_write(&regs->igaddr2, 0x0);
3076 		gfar_write(&regs->igaddr3, 0x0);
3077 		gfar_write(&regs->igaddr4, 0x0);
3078 		gfar_write(&regs->igaddr5, 0x0);
3079 		gfar_write(&regs->igaddr6, 0x0);
3080 		gfar_write(&regs->igaddr7, 0x0);
3081 		gfar_write(&regs->gaddr0, 0x0);
3082 		gfar_write(&regs->gaddr1, 0x0);
3083 		gfar_write(&regs->gaddr2, 0x0);
3084 		gfar_write(&regs->gaddr3, 0x0);
3085 		gfar_write(&regs->gaddr4, 0x0);
3086 		gfar_write(&regs->gaddr5, 0x0);
3087 		gfar_write(&regs->gaddr6, 0x0);
3088 		gfar_write(&regs->gaddr7, 0x0);
3089 
3090 		/* If we have extended hash tables, we need to
3091 		 * clear the exact match registers to prepare for
3092 		 * setting them
3093 		 */
3094 		if (priv->extended_hash) {
3095 			em_num = GFAR_EM_NUM + 1;
3096 			gfar_clear_exact_match(dev);
3097 			idx = 1;
3098 		} else {
3099 			idx = 0;
3100 			em_num = 0;
3101 		}
3102 
3103 		if (netdev_mc_empty(dev))
3104 			return;
3105 
3106 		/* Parse the list, and set the appropriate bits */
3107 		netdev_for_each_mc_addr(ha, dev) {
3108 			if (idx < em_num) {
3109 				gfar_set_mac_for_addr(dev, idx, ha->addr);
3110 				idx++;
3111 			} else
3112 				gfar_set_hash_for_addr(dev, ha->addr);
3113 		}
3114 	}
3115 }
3116 
3117 
3118 /* Clears each of the exact match registers to zero, so they
3119  * don't interfere with normal reception
3120  */
3121 static void gfar_clear_exact_match(struct net_device *dev)
3122 {
3123 	int idx;
3124 	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3125 
3126 	for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3127 		gfar_set_mac_for_addr(dev, idx, zero_arr);
3128 }
3129 
3130 /* Set the appropriate hash bit for the given addr */
3131 /* The algorithm works like so:
3132  * 1) Take the Destination Address (ie the multicast address), and
3133  * do a CRC on it (little endian), and reverse the bits of the
3134  * result.
3135  * 2) Use the 8 most significant bits as a hash into a 256-entry
3136  * table.  The table is controlled through 8 32-bit registers:
3137  * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
3138  * gaddr7.  This means that the 3 most significant bits in the
3139  * hash index which gaddr register to use, and the 5 other bits
3140  * indicate which bit (assuming an IBM numbering scheme, which
3141  * for PowerPC (tm) is usually the case) in the register holds
3142  * the entry.
3143  */
3144 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3145 {
3146 	u32 tempval;
3147 	struct gfar_private *priv = netdev_priv(dev);
3148 	u32 result = ether_crc(ETH_ALEN, addr);
3149 	int width = priv->hash_width;
3150 	u8 whichbit = (result >> (32 - width)) & 0x1f;
3151 	u8 whichreg = result >> (32 - width + 5);
3152 	u32 value = (1 << (31-whichbit));
3153 
3154 	tempval = gfar_read(priv->hash_regs[whichreg]);
3155 	tempval |= value;
3156 	gfar_write(priv->hash_regs[whichreg], tempval);
3157 }
3158 
3159 
3160 /* There are multiple MAC Address register pairs on some controllers
3161  * This function sets the numth pair to a given address
3162  */
3163 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3164 				  const u8 *addr)
3165 {
3166 	struct gfar_private *priv = netdev_priv(dev);
3167 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3168 	int idx;
3169 	char tmpbuf[ETH_ALEN];
3170 	u32 tempval;
3171 	u32 __iomem *macptr = &regs->macstnaddr1;
3172 
3173 	macptr += num*2;
3174 
3175 	/* Now copy it into the mac registers backwards, cuz
3176 	 * little endian is silly
3177 	 */
3178 	for (idx = 0; idx < ETH_ALEN; idx++)
3179 		tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
3180 
3181 	gfar_write(macptr, *((u32 *) (tmpbuf)));
3182 
3183 	tempval = *((u32 *) (tmpbuf + 4));
3184 
3185 	gfar_write(macptr+1, tempval);
3186 }
3187 
3188 /* GFAR error interrupt handler */
3189 static irqreturn_t gfar_error(int irq, void *grp_id)
3190 {
3191 	struct gfar_priv_grp *gfargrp = grp_id;
3192 	struct gfar __iomem *regs = gfargrp->regs;
3193 	struct gfar_private *priv= gfargrp->priv;
3194 	struct net_device *dev = priv->ndev;
3195 
3196 	/* Save ievent for future reference */
3197 	u32 events = gfar_read(&regs->ievent);
3198 
3199 	/* Clear IEVENT */
3200 	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
3201 
3202 	/* Magic Packet is not an error. */
3203 	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3204 	    (events & IEVENT_MAG))
3205 		events &= ~IEVENT_MAG;
3206 
3207 	/* Hmm... */
3208 	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3209 		netdev_dbg(dev,
3210 			   "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3211 			   events, gfar_read(&regs->imask));
3212 
3213 	/* Update the error counters */
3214 	if (events & IEVENT_TXE) {
3215 		dev->stats.tx_errors++;
3216 
3217 		if (events & IEVENT_LC)
3218 			dev->stats.tx_window_errors++;
3219 		if (events & IEVENT_CRL)
3220 			dev->stats.tx_aborted_errors++;
3221 		if (events & IEVENT_XFUN) {
3222 			unsigned long flags;
3223 
3224 			netif_dbg(priv, tx_err, dev,
3225 				  "TX FIFO underrun, packet dropped\n");
3226 			dev->stats.tx_dropped++;
3227 			priv->extra_stats.tx_underrun++;
3228 
3229 			local_irq_save(flags);
3230 			lock_tx_qs(priv);
3231 
3232 			/* Reactivate the Tx Queues */
3233 			gfar_write(&regs->tstat, gfargrp->tstat);
3234 
3235 			unlock_tx_qs(priv);
3236 			local_irq_restore(flags);
3237 		}
3238 		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3239 	}
3240 	if (events & IEVENT_BSY) {
3241 		dev->stats.rx_errors++;
3242 		priv->extra_stats.rx_bsy++;
3243 
3244 		gfar_receive(irq, grp_id);
3245 
3246 		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3247 			  gfar_read(&regs->rstat));
3248 	}
3249 	if (events & IEVENT_BABR) {
3250 		dev->stats.rx_errors++;
3251 		priv->extra_stats.rx_babr++;
3252 
3253 		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
3254 	}
3255 	if (events & IEVENT_EBERR) {
3256 		priv->extra_stats.eberr++;
3257 		netif_dbg(priv, rx_err, dev, "bus error\n");
3258 	}
3259 	if (events & IEVENT_RXC)
3260 		netif_dbg(priv, rx_status, dev, "control frame\n");
3261 
3262 	if (events & IEVENT_BABT) {
3263 		priv->extra_stats.tx_babt++;
3264 		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
3265 	}
3266 	return IRQ_HANDLED;
3267 }
3268 
3269 static struct of_device_id gfar_match[] =
3270 {
3271 	{
3272 		.type = "network",
3273 		.compatible = "gianfar",
3274 	},
3275 	{
3276 		.compatible = "fsl,etsec2",
3277 	},
3278 	{},
3279 };
3280 MODULE_DEVICE_TABLE(of, gfar_match);
3281 
3282 /* Structure for a device driver */
3283 static struct platform_driver gfar_driver = {
3284 	.driver = {
3285 		.name = "fsl-gianfar",
3286 		.owner = THIS_MODULE,
3287 		.pm = GFAR_PM_OPS,
3288 		.of_match_table = gfar_match,
3289 	},
3290 	.probe = gfar_probe,
3291 	.remove = gfar_remove,
3292 };
3293 
3294 module_platform_driver(gfar_driver);
3295