1 /* drivers/net/ethernet/freescale/gianfar.c
2  *
3  * Gianfar Ethernet Driver
4  * This driver is designed for the non-CPM ethernet controllers
5  * on the 85xx and 83xx family of integrated processors
6  * Based on 8260_io/fcc_enet.c
7  *
8  * Author: Andy Fleming
9  * Maintainer: Kumar Gala
10  * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11  *
12  * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
13  * Copyright 2007 MontaVista Software, Inc.
14  *
15  * This program is free software; you can redistribute  it and/or modify it
16  * under  the terms of  the GNU General  Public License as published by the
17  * Free Software Foundation;  either version 2 of the  License, or (at your
18  * option) any later version.
19  *
20  *  Gianfar:  AKA Lambda Draconis, "Dragon"
21  *  RA 11 31 24.2
22  *  Dec +69 19 52
23  *  V 3.84
24  *  B-V +1.62
25  *
26  *  Theory of operation
27  *
28  *  The driver is initialized through of_device. Configuration information
29  *  is therefore conveyed through an OF-style device tree.
30  *
31  *  The Gianfar Ethernet Controller uses a ring of buffer
32  *  descriptors.  The beginning is indicated by a register
33  *  pointing to the physical address of the start of the ring.
34  *  The end is determined by a "wrap" bit being set in the
35  *  last descriptor of the ring.
36  *
37  *  When a packet is received, the RXF bit in the
38  *  IEVENT register is set, triggering an interrupt when the
39  *  corresponding bit in the IMASK register is also set (if
40  *  interrupt coalescing is active, then the interrupt may not
41  *  happen immediately, but will wait until either a set number
42  *  of frames or amount of time have passed).  In NAPI, the
43  *  interrupt handler will signal there is work to be done, and
44  *  exit. This method will start at the last known empty
45  *  descriptor, and process every subsequent descriptor until there
46  *  are none left with data (NAPI will stop after a set number of
47  *  packets to give time to other tasks, but will eventually
48  *  process all the packets).  The data arrives inside a
49  *  pre-allocated skb, and so after the skb is passed up to the
50  *  stack, a new skb must be allocated, and the address field in
51  *  the buffer descriptor must be updated to indicate this new
52  *  skb.
53  *
54  *  When the kernel requests that a packet be transmitted, the
55  *  driver starts where it left off last time, and points the
56  *  descriptor at the buffer which was passed in.  The driver
57  *  then informs the DMA engine that there are packets ready to
58  *  be transmitted.  Once the controller is finished transmitting
59  *  the packet, an interrupt may be triggered (under the same
60  *  conditions as for reception, but depending on the TXF bit).
61  *  The driver then cleans up the buffer.
62  */
63 
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65 #define DEBUG
66 
67 #include <linux/kernel.h>
68 #include <linux/string.h>
69 #include <linux/errno.h>
70 #include <linux/unistd.h>
71 #include <linux/slab.h>
72 #include <linux/interrupt.h>
73 #include <linux/delay.h>
74 #include <linux/netdevice.h>
75 #include <linux/etherdevice.h>
76 #include <linux/skbuff.h>
77 #include <linux/if_vlan.h>
78 #include <linux/spinlock.h>
79 #include <linux/mm.h>
80 #include <linux/of_address.h>
81 #include <linux/of_irq.h>
82 #include <linux/of_mdio.h>
83 #include <linux/of_platform.h>
84 #include <linux/ip.h>
85 #include <linux/tcp.h>
86 #include <linux/udp.h>
87 #include <linux/in.h>
88 #include <linux/net_tstamp.h>
89 
90 #include <asm/io.h>
91 #ifdef CONFIG_PPC
92 #include <asm/reg.h>
93 #include <asm/mpc85xx.h>
94 #endif
95 #include <asm/irq.h>
96 #include <asm/uaccess.h>
97 #include <linux/module.h>
98 #include <linux/dma-mapping.h>
99 #include <linux/crc32.h>
100 #include <linux/mii.h>
101 #include <linux/phy.h>
102 #include <linux/phy_fixed.h>
103 #include <linux/of.h>
104 #include <linux/of_net.h>
105 #include <linux/of_address.h>
106 #include <linux/of_irq.h>
107 
108 #include "gianfar.h"
109 
110 #define TX_TIMEOUT      (1*HZ)
111 
112 const char gfar_driver_version[] = "1.3";
113 
114 static int gfar_enet_open(struct net_device *dev);
115 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
116 static void gfar_reset_task(struct work_struct *work);
117 static void gfar_timeout(struct net_device *dev);
118 static int gfar_close(struct net_device *dev);
119 struct sk_buff *gfar_new_skb(struct net_device *dev);
120 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
121 			   struct sk_buff *skb);
122 static int gfar_set_mac_address(struct net_device *dev);
123 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
124 static irqreturn_t gfar_error(int irq, void *dev_id);
125 static irqreturn_t gfar_transmit(int irq, void *dev_id);
126 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
127 static void adjust_link(struct net_device *dev);
128 static noinline void gfar_update_link_state(struct gfar_private *priv);
129 static int init_phy(struct net_device *dev);
130 static int gfar_probe(struct platform_device *ofdev);
131 static int gfar_remove(struct platform_device *ofdev);
132 static void free_skb_resources(struct gfar_private *priv);
133 static void gfar_set_multi(struct net_device *dev);
134 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
135 static void gfar_configure_serdes(struct net_device *dev);
136 static int gfar_poll_rx(struct napi_struct *napi, int budget);
137 static int gfar_poll_tx(struct napi_struct *napi, int budget);
138 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
139 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
140 #ifdef CONFIG_NET_POLL_CONTROLLER
141 static void gfar_netpoll(struct net_device *dev);
142 #endif
143 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
144 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
145 static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
146 			       int amount_pull, struct napi_struct *napi);
147 static void gfar_halt_nodisable(struct gfar_private *priv);
148 static void gfar_clear_exact_match(struct net_device *dev);
149 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
150 				  const u8 *addr);
151 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
152 
153 MODULE_AUTHOR("Freescale Semiconductor, Inc");
154 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
155 MODULE_LICENSE("GPL");
156 
157 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
158 			    dma_addr_t buf)
159 {
160 	u32 lstatus;
161 
162 	bdp->bufPtr = buf;
163 
164 	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
165 	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
166 		lstatus |= BD_LFLAG(RXBD_WRAP);
167 
168 	gfar_wmb();
169 
170 	bdp->lstatus = lstatus;
171 }
172 
173 static int gfar_init_bds(struct net_device *ndev)
174 {
175 	struct gfar_private *priv = netdev_priv(ndev);
176 	struct gfar_priv_tx_q *tx_queue = NULL;
177 	struct gfar_priv_rx_q *rx_queue = NULL;
178 	struct txbd8 *txbdp;
179 	struct rxbd8 *rxbdp;
180 	int i, j;
181 
182 	for (i = 0; i < priv->num_tx_queues; i++) {
183 		tx_queue = priv->tx_queue[i];
184 		/* Initialize some variables in our dev structure */
185 		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
186 		tx_queue->dirty_tx = tx_queue->tx_bd_base;
187 		tx_queue->cur_tx = tx_queue->tx_bd_base;
188 		tx_queue->skb_curtx = 0;
189 		tx_queue->skb_dirtytx = 0;
190 
191 		/* Initialize Transmit Descriptor Ring */
192 		txbdp = tx_queue->tx_bd_base;
193 		for (j = 0; j < tx_queue->tx_ring_size; j++) {
194 			txbdp->lstatus = 0;
195 			txbdp->bufPtr = 0;
196 			txbdp++;
197 		}
198 
199 		/* Set the last descriptor in the ring to indicate wrap */
200 		txbdp--;
201 		txbdp->status |= TXBD_WRAP;
202 	}
203 
204 	for (i = 0; i < priv->num_rx_queues; i++) {
205 		rx_queue = priv->rx_queue[i];
206 		rx_queue->cur_rx = rx_queue->rx_bd_base;
207 		rx_queue->skb_currx = 0;
208 		rxbdp = rx_queue->rx_bd_base;
209 
210 		for (j = 0; j < rx_queue->rx_ring_size; j++) {
211 			struct sk_buff *skb = rx_queue->rx_skbuff[j];
212 
213 			if (skb) {
214 				gfar_init_rxbdp(rx_queue, rxbdp,
215 						rxbdp->bufPtr);
216 			} else {
217 				skb = gfar_new_skb(ndev);
218 				if (!skb) {
219 					netdev_err(ndev, "Can't allocate RX buffers\n");
220 					return -ENOMEM;
221 				}
222 				rx_queue->rx_skbuff[j] = skb;
223 
224 				gfar_new_rxbdp(rx_queue, rxbdp, skb);
225 			}
226 
227 			rxbdp++;
228 		}
229 
230 	}
231 
232 	return 0;
233 }
234 
235 static int gfar_alloc_skb_resources(struct net_device *ndev)
236 {
237 	void *vaddr;
238 	dma_addr_t addr;
239 	int i, j, k;
240 	struct gfar_private *priv = netdev_priv(ndev);
241 	struct device *dev = priv->dev;
242 	struct gfar_priv_tx_q *tx_queue = NULL;
243 	struct gfar_priv_rx_q *rx_queue = NULL;
244 
245 	priv->total_tx_ring_size = 0;
246 	for (i = 0; i < priv->num_tx_queues; i++)
247 		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
248 
249 	priv->total_rx_ring_size = 0;
250 	for (i = 0; i < priv->num_rx_queues; i++)
251 		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
252 
253 	/* Allocate memory for the buffer descriptors */
254 	vaddr = dma_alloc_coherent(dev,
255 				   (priv->total_tx_ring_size *
256 				    sizeof(struct txbd8)) +
257 				   (priv->total_rx_ring_size *
258 				    sizeof(struct rxbd8)),
259 				   &addr, GFP_KERNEL);
260 	if (!vaddr)
261 		return -ENOMEM;
262 
263 	for (i = 0; i < priv->num_tx_queues; i++) {
264 		tx_queue = priv->tx_queue[i];
265 		tx_queue->tx_bd_base = vaddr;
266 		tx_queue->tx_bd_dma_base = addr;
267 		tx_queue->dev = ndev;
268 		/* enet DMA only understands physical addresses */
269 		addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
270 		vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
271 	}
272 
273 	/* Start the rx descriptor ring where the tx ring leaves off */
274 	for (i = 0; i < priv->num_rx_queues; i++) {
275 		rx_queue = priv->rx_queue[i];
276 		rx_queue->rx_bd_base = vaddr;
277 		rx_queue->rx_bd_dma_base = addr;
278 		rx_queue->dev = ndev;
279 		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
280 		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
281 	}
282 
283 	/* Setup the skbuff rings */
284 	for (i = 0; i < priv->num_tx_queues; i++) {
285 		tx_queue = priv->tx_queue[i];
286 		tx_queue->tx_skbuff =
287 			kmalloc_array(tx_queue->tx_ring_size,
288 				      sizeof(*tx_queue->tx_skbuff),
289 				      GFP_KERNEL);
290 		if (!tx_queue->tx_skbuff)
291 			goto cleanup;
292 
293 		for (k = 0; k < tx_queue->tx_ring_size; k++)
294 			tx_queue->tx_skbuff[k] = NULL;
295 	}
296 
297 	for (i = 0; i < priv->num_rx_queues; i++) {
298 		rx_queue = priv->rx_queue[i];
299 		rx_queue->rx_skbuff =
300 			kmalloc_array(rx_queue->rx_ring_size,
301 				      sizeof(*rx_queue->rx_skbuff),
302 				      GFP_KERNEL);
303 		if (!rx_queue->rx_skbuff)
304 			goto cleanup;
305 
306 		for (j = 0; j < rx_queue->rx_ring_size; j++)
307 			rx_queue->rx_skbuff[j] = NULL;
308 	}
309 
310 	if (gfar_init_bds(ndev))
311 		goto cleanup;
312 
313 	return 0;
314 
315 cleanup:
316 	free_skb_resources(priv);
317 	return -ENOMEM;
318 }
319 
320 static void gfar_init_tx_rx_base(struct gfar_private *priv)
321 {
322 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
323 	u32 __iomem *baddr;
324 	int i;
325 
326 	baddr = &regs->tbase0;
327 	for (i = 0; i < priv->num_tx_queues; i++) {
328 		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
329 		baddr += 2;
330 	}
331 
332 	baddr = &regs->rbase0;
333 	for (i = 0; i < priv->num_rx_queues; i++) {
334 		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
335 		baddr += 2;
336 	}
337 }
338 
339 static void gfar_rx_buff_size_config(struct gfar_private *priv)
340 {
341 	int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
342 
343 	/* set this when rx hw offload (TOE) functions are being used */
344 	priv->uses_rxfcb = 0;
345 
346 	if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
347 		priv->uses_rxfcb = 1;
348 
349 	if (priv->hwts_rx_en)
350 		priv->uses_rxfcb = 1;
351 
352 	if (priv->uses_rxfcb)
353 		frame_size += GMAC_FCB_LEN;
354 
355 	frame_size += priv->padding;
356 
357 	frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
358 		     INCREMENTAL_BUFFER_SIZE;
359 
360 	priv->rx_buffer_size = frame_size;
361 }
362 
363 static void gfar_mac_rx_config(struct gfar_private *priv)
364 {
365 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
366 	u32 rctrl = 0;
367 
368 	if (priv->rx_filer_enable) {
369 		rctrl |= RCTRL_FILREN;
370 		/* Program the RIR0 reg with the required distribution */
371 		if (priv->poll_mode == GFAR_SQ_POLLING)
372 			gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
373 		else /* GFAR_MQ_POLLING */
374 			gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
375 	}
376 
377 	/* Restore PROMISC mode */
378 	if (priv->ndev->flags & IFF_PROMISC)
379 		rctrl |= RCTRL_PROM;
380 
381 	if (priv->ndev->features & NETIF_F_RXCSUM)
382 		rctrl |= RCTRL_CHECKSUMMING;
383 
384 	if (priv->extended_hash)
385 		rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
386 
387 	if (priv->padding) {
388 		rctrl &= ~RCTRL_PAL_MASK;
389 		rctrl |= RCTRL_PADDING(priv->padding);
390 	}
391 
392 	/* Enable HW time stamping if requested from user space */
393 	if (priv->hwts_rx_en)
394 		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
395 
396 	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
397 		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
398 
399 	/* Init rctrl based on our settings */
400 	gfar_write(&regs->rctrl, rctrl);
401 }
402 
403 static void gfar_mac_tx_config(struct gfar_private *priv)
404 {
405 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
406 	u32 tctrl = 0;
407 
408 	if (priv->ndev->features & NETIF_F_IP_CSUM)
409 		tctrl |= TCTRL_INIT_CSUM;
410 
411 	if (priv->prio_sched_en)
412 		tctrl |= TCTRL_TXSCHED_PRIO;
413 	else {
414 		tctrl |= TCTRL_TXSCHED_WRRS;
415 		gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
416 		gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
417 	}
418 
419 	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
420 		tctrl |= TCTRL_VLINS;
421 
422 	gfar_write(&regs->tctrl, tctrl);
423 }
424 
425 static void gfar_configure_coalescing(struct gfar_private *priv,
426 			       unsigned long tx_mask, unsigned long rx_mask)
427 {
428 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
429 	u32 __iomem *baddr;
430 
431 	if (priv->mode == MQ_MG_MODE) {
432 		int i = 0;
433 
434 		baddr = &regs->txic0;
435 		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
436 			gfar_write(baddr + i, 0);
437 			if (likely(priv->tx_queue[i]->txcoalescing))
438 				gfar_write(baddr + i, priv->tx_queue[i]->txic);
439 		}
440 
441 		baddr = &regs->rxic0;
442 		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
443 			gfar_write(baddr + i, 0);
444 			if (likely(priv->rx_queue[i]->rxcoalescing))
445 				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
446 		}
447 	} else {
448 		/* Backward compatible case -- even if we enable
449 		 * multiple queues, there's only single reg to program
450 		 */
451 		gfar_write(&regs->txic, 0);
452 		if (likely(priv->tx_queue[0]->txcoalescing))
453 			gfar_write(&regs->txic, priv->tx_queue[0]->txic);
454 
455 		gfar_write(&regs->rxic, 0);
456 		if (unlikely(priv->rx_queue[0]->rxcoalescing))
457 			gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
458 	}
459 }
460 
461 void gfar_configure_coalescing_all(struct gfar_private *priv)
462 {
463 	gfar_configure_coalescing(priv, 0xFF, 0xFF);
464 }
465 
466 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
467 {
468 	struct gfar_private *priv = netdev_priv(dev);
469 	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
470 	unsigned long tx_packets = 0, tx_bytes = 0;
471 	int i;
472 
473 	for (i = 0; i < priv->num_rx_queues; i++) {
474 		rx_packets += priv->rx_queue[i]->stats.rx_packets;
475 		rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
476 		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
477 	}
478 
479 	dev->stats.rx_packets = rx_packets;
480 	dev->stats.rx_bytes   = rx_bytes;
481 	dev->stats.rx_dropped = rx_dropped;
482 
483 	for (i = 0; i < priv->num_tx_queues; i++) {
484 		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
485 		tx_packets += priv->tx_queue[i]->stats.tx_packets;
486 	}
487 
488 	dev->stats.tx_bytes   = tx_bytes;
489 	dev->stats.tx_packets = tx_packets;
490 
491 	return &dev->stats;
492 }
493 
494 static const struct net_device_ops gfar_netdev_ops = {
495 	.ndo_open = gfar_enet_open,
496 	.ndo_start_xmit = gfar_start_xmit,
497 	.ndo_stop = gfar_close,
498 	.ndo_change_mtu = gfar_change_mtu,
499 	.ndo_set_features = gfar_set_features,
500 	.ndo_set_rx_mode = gfar_set_multi,
501 	.ndo_tx_timeout = gfar_timeout,
502 	.ndo_do_ioctl = gfar_ioctl,
503 	.ndo_get_stats = gfar_get_stats,
504 	.ndo_set_mac_address = eth_mac_addr,
505 	.ndo_validate_addr = eth_validate_addr,
506 #ifdef CONFIG_NET_POLL_CONTROLLER
507 	.ndo_poll_controller = gfar_netpoll,
508 #endif
509 };
510 
511 static void gfar_ints_disable(struct gfar_private *priv)
512 {
513 	int i;
514 	for (i = 0; i < priv->num_grps; i++) {
515 		struct gfar __iomem *regs = priv->gfargrp[i].regs;
516 		/* Clear IEVENT */
517 		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
518 
519 		/* Initialize IMASK */
520 		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
521 	}
522 }
523 
524 static void gfar_ints_enable(struct gfar_private *priv)
525 {
526 	int i;
527 	for (i = 0; i < priv->num_grps; i++) {
528 		struct gfar __iomem *regs = priv->gfargrp[i].regs;
529 		/* Unmask the interrupts we look for */
530 		gfar_write(&regs->imask, IMASK_DEFAULT);
531 	}
532 }
533 
534 void lock_tx_qs(struct gfar_private *priv)
535 {
536 	int i;
537 
538 	for (i = 0; i < priv->num_tx_queues; i++)
539 		spin_lock(&priv->tx_queue[i]->txlock);
540 }
541 
542 void unlock_tx_qs(struct gfar_private *priv)
543 {
544 	int i;
545 
546 	for (i = 0; i < priv->num_tx_queues; i++)
547 		spin_unlock(&priv->tx_queue[i]->txlock);
548 }
549 
550 static int gfar_alloc_tx_queues(struct gfar_private *priv)
551 {
552 	int i;
553 
554 	for (i = 0; i < priv->num_tx_queues; i++) {
555 		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
556 					    GFP_KERNEL);
557 		if (!priv->tx_queue[i])
558 			return -ENOMEM;
559 
560 		priv->tx_queue[i]->tx_skbuff = NULL;
561 		priv->tx_queue[i]->qindex = i;
562 		priv->tx_queue[i]->dev = priv->ndev;
563 		spin_lock_init(&(priv->tx_queue[i]->txlock));
564 	}
565 	return 0;
566 }
567 
568 static int gfar_alloc_rx_queues(struct gfar_private *priv)
569 {
570 	int i;
571 
572 	for (i = 0; i < priv->num_rx_queues; i++) {
573 		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
574 					    GFP_KERNEL);
575 		if (!priv->rx_queue[i])
576 			return -ENOMEM;
577 
578 		priv->rx_queue[i]->rx_skbuff = NULL;
579 		priv->rx_queue[i]->qindex = i;
580 		priv->rx_queue[i]->dev = priv->ndev;
581 	}
582 	return 0;
583 }
584 
585 static void gfar_free_tx_queues(struct gfar_private *priv)
586 {
587 	int i;
588 
589 	for (i = 0; i < priv->num_tx_queues; i++)
590 		kfree(priv->tx_queue[i]);
591 }
592 
593 static void gfar_free_rx_queues(struct gfar_private *priv)
594 {
595 	int i;
596 
597 	for (i = 0; i < priv->num_rx_queues; i++)
598 		kfree(priv->rx_queue[i]);
599 }
600 
601 static void unmap_group_regs(struct gfar_private *priv)
602 {
603 	int i;
604 
605 	for (i = 0; i < MAXGROUPS; i++)
606 		if (priv->gfargrp[i].regs)
607 			iounmap(priv->gfargrp[i].regs);
608 }
609 
610 static void free_gfar_dev(struct gfar_private *priv)
611 {
612 	int i, j;
613 
614 	for (i = 0; i < priv->num_grps; i++)
615 		for (j = 0; j < GFAR_NUM_IRQS; j++) {
616 			kfree(priv->gfargrp[i].irqinfo[j]);
617 			priv->gfargrp[i].irqinfo[j] = NULL;
618 		}
619 
620 	free_netdev(priv->ndev);
621 }
622 
623 static void disable_napi(struct gfar_private *priv)
624 {
625 	int i;
626 
627 	for (i = 0; i < priv->num_grps; i++) {
628 		napi_disable(&priv->gfargrp[i].napi_rx);
629 		napi_disable(&priv->gfargrp[i].napi_tx);
630 	}
631 }
632 
633 static void enable_napi(struct gfar_private *priv)
634 {
635 	int i;
636 
637 	for (i = 0; i < priv->num_grps; i++) {
638 		napi_enable(&priv->gfargrp[i].napi_rx);
639 		napi_enable(&priv->gfargrp[i].napi_tx);
640 	}
641 }
642 
643 static int gfar_parse_group(struct device_node *np,
644 			    struct gfar_private *priv, const char *model)
645 {
646 	struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
647 	int i;
648 
649 	for (i = 0; i < GFAR_NUM_IRQS; i++) {
650 		grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
651 					  GFP_KERNEL);
652 		if (!grp->irqinfo[i])
653 			return -ENOMEM;
654 	}
655 
656 	grp->regs = of_iomap(np, 0);
657 	if (!grp->regs)
658 		return -ENOMEM;
659 
660 	gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
661 
662 	/* If we aren't the FEC we have multiple interrupts */
663 	if (model && strcasecmp(model, "FEC")) {
664 		gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
665 		gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
666 		if (gfar_irq(grp, TX)->irq == NO_IRQ ||
667 		    gfar_irq(grp, RX)->irq == NO_IRQ ||
668 		    gfar_irq(grp, ER)->irq == NO_IRQ)
669 			return -EINVAL;
670 	}
671 
672 	grp->priv = priv;
673 	spin_lock_init(&grp->grplock);
674 	if (priv->mode == MQ_MG_MODE) {
675 		u32 *rxq_mask, *txq_mask;
676 		rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
677 		txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
678 
679 		if (priv->poll_mode == GFAR_SQ_POLLING) {
680 			/* One Q per interrupt group: Q0 to G0, Q1 to G1 */
681 			grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
682 			grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
683 		} else { /* GFAR_MQ_POLLING */
684 			grp->rx_bit_map = rxq_mask ?
685 			*rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
686 			grp->tx_bit_map = txq_mask ?
687 			*txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
688 		}
689 	} else {
690 		grp->rx_bit_map = 0xFF;
691 		grp->tx_bit_map = 0xFF;
692 	}
693 
694 	/* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
695 	 * right to left, so we need to revert the 8 bits to get the q index
696 	 */
697 	grp->rx_bit_map = bitrev8(grp->rx_bit_map);
698 	grp->tx_bit_map = bitrev8(grp->tx_bit_map);
699 
700 	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
701 	 * also assign queues to groups
702 	 */
703 	for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
704 		if (!grp->rx_queue)
705 			grp->rx_queue = priv->rx_queue[i];
706 		grp->num_rx_queues++;
707 		grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
708 		priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
709 		priv->rx_queue[i]->grp = grp;
710 	}
711 
712 	for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
713 		if (!grp->tx_queue)
714 			grp->tx_queue = priv->tx_queue[i];
715 		grp->num_tx_queues++;
716 		grp->tstat |= (TSTAT_CLEAR_THALT >> i);
717 		priv->tqueue |= (TQUEUE_EN0 >> i);
718 		priv->tx_queue[i]->grp = grp;
719 	}
720 
721 	priv->num_grps++;
722 
723 	return 0;
724 }
725 
726 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
727 {
728 	const char *model;
729 	const char *ctype;
730 	const void *mac_addr;
731 	int err = 0, i;
732 	struct net_device *dev = NULL;
733 	struct gfar_private *priv = NULL;
734 	struct device_node *np = ofdev->dev.of_node;
735 	struct device_node *child = NULL;
736 	const u32 *stash;
737 	const u32 *stash_len;
738 	const u32 *stash_idx;
739 	unsigned int num_tx_qs, num_rx_qs;
740 	u32 *tx_queues, *rx_queues;
741 	unsigned short mode, poll_mode;
742 
743 	if (!np || !of_device_is_available(np))
744 		return -ENODEV;
745 
746 	if (of_device_is_compatible(np, "fsl,etsec2")) {
747 		mode = MQ_MG_MODE;
748 		poll_mode = GFAR_SQ_POLLING;
749 	} else {
750 		mode = SQ_SG_MODE;
751 		poll_mode = GFAR_SQ_POLLING;
752 	}
753 
754 	/* parse the num of HW tx and rx queues */
755 	tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
756 	rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
757 
758 	if (mode == SQ_SG_MODE) {
759 		num_tx_qs = 1;
760 		num_rx_qs = 1;
761 	} else { /* MQ_MG_MODE */
762 		/* get the actual number of supported groups */
763 		unsigned int num_grps = of_get_available_child_count(np);
764 
765 		if (num_grps == 0 || num_grps > MAXGROUPS) {
766 			dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
767 				num_grps);
768 			pr_err("Cannot do alloc_etherdev, aborting\n");
769 			return -EINVAL;
770 		}
771 
772 		if (poll_mode == GFAR_SQ_POLLING) {
773 			num_tx_qs = num_grps; /* one txq per int group */
774 			num_rx_qs = num_grps; /* one rxq per int group */
775 		} else { /* GFAR_MQ_POLLING */
776 			num_tx_qs = tx_queues ? *tx_queues : 1;
777 			num_rx_qs = rx_queues ? *rx_queues : 1;
778 		}
779 	}
780 
781 	if (num_tx_qs > MAX_TX_QS) {
782 		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
783 		       num_tx_qs, MAX_TX_QS);
784 		pr_err("Cannot do alloc_etherdev, aborting\n");
785 		return -EINVAL;
786 	}
787 
788 	if (num_rx_qs > MAX_RX_QS) {
789 		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
790 		       num_rx_qs, MAX_RX_QS);
791 		pr_err("Cannot do alloc_etherdev, aborting\n");
792 		return -EINVAL;
793 	}
794 
795 	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
796 	dev = *pdev;
797 	if (NULL == dev)
798 		return -ENOMEM;
799 
800 	priv = netdev_priv(dev);
801 	priv->ndev = dev;
802 
803 	priv->mode = mode;
804 	priv->poll_mode = poll_mode;
805 
806 	priv->num_tx_queues = num_tx_qs;
807 	netif_set_real_num_rx_queues(dev, num_rx_qs);
808 	priv->num_rx_queues = num_rx_qs;
809 
810 	err = gfar_alloc_tx_queues(priv);
811 	if (err)
812 		goto tx_alloc_failed;
813 
814 	err = gfar_alloc_rx_queues(priv);
815 	if (err)
816 		goto rx_alloc_failed;
817 
818 	/* Init Rx queue filer rule set linked list */
819 	INIT_LIST_HEAD(&priv->rx_list.list);
820 	priv->rx_list.count = 0;
821 	mutex_init(&priv->rx_queue_access);
822 
823 	model = of_get_property(np, "model", NULL);
824 
825 	for (i = 0; i < MAXGROUPS; i++)
826 		priv->gfargrp[i].regs = NULL;
827 
828 	/* Parse and initialize group specific information */
829 	if (priv->mode == MQ_MG_MODE) {
830 		for_each_child_of_node(np, child) {
831 			err = gfar_parse_group(child, priv, model);
832 			if (err)
833 				goto err_grp_init;
834 		}
835 	} else { /* SQ_SG_MODE */
836 		err = gfar_parse_group(np, priv, model);
837 		if (err)
838 			goto err_grp_init;
839 	}
840 
841 	stash = of_get_property(np, "bd-stash", NULL);
842 
843 	if (stash) {
844 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
845 		priv->bd_stash_en = 1;
846 	}
847 
848 	stash_len = of_get_property(np, "rx-stash-len", NULL);
849 
850 	if (stash_len)
851 		priv->rx_stash_size = *stash_len;
852 
853 	stash_idx = of_get_property(np, "rx-stash-idx", NULL);
854 
855 	if (stash_idx)
856 		priv->rx_stash_index = *stash_idx;
857 
858 	if (stash_len || stash_idx)
859 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
860 
861 	mac_addr = of_get_mac_address(np);
862 
863 	if (mac_addr)
864 		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
865 
866 	if (model && !strcasecmp(model, "TSEC"))
867 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
868 				     FSL_GIANFAR_DEV_HAS_COALESCE |
869 				     FSL_GIANFAR_DEV_HAS_RMON |
870 				     FSL_GIANFAR_DEV_HAS_MULTI_INTR;
871 
872 	if (model && !strcasecmp(model, "eTSEC"))
873 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
874 				     FSL_GIANFAR_DEV_HAS_COALESCE |
875 				     FSL_GIANFAR_DEV_HAS_RMON |
876 				     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
877 				     FSL_GIANFAR_DEV_HAS_CSUM |
878 				     FSL_GIANFAR_DEV_HAS_VLAN |
879 				     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
880 				     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
881 				     FSL_GIANFAR_DEV_HAS_TIMER;
882 
883 	ctype = of_get_property(np, "phy-connection-type", NULL);
884 
885 	/* We only care about rgmii-id.  The rest are autodetected */
886 	if (ctype && !strcmp(ctype, "rgmii-id"))
887 		priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
888 	else
889 		priv->interface = PHY_INTERFACE_MODE_MII;
890 
891 	if (of_get_property(np, "fsl,magic-packet", NULL))
892 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
893 
894 	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
895 
896 	/* In the case of a fixed PHY, the DT node associated
897 	 * to the PHY is the Ethernet MAC DT node.
898 	 */
899 	if (!priv->phy_node && of_phy_is_fixed_link(np)) {
900 		err = of_phy_register_fixed_link(np);
901 		if (err)
902 			goto err_grp_init;
903 
904 		priv->phy_node = of_node_get(np);
905 	}
906 
907 	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
908 	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
909 
910 	return 0;
911 
912 err_grp_init:
913 	unmap_group_regs(priv);
914 rx_alloc_failed:
915 	gfar_free_rx_queues(priv);
916 tx_alloc_failed:
917 	gfar_free_tx_queues(priv);
918 	free_gfar_dev(priv);
919 	return err;
920 }
921 
922 static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
923 {
924 	struct hwtstamp_config config;
925 	struct gfar_private *priv = netdev_priv(netdev);
926 
927 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
928 		return -EFAULT;
929 
930 	/* reserved for future extensions */
931 	if (config.flags)
932 		return -EINVAL;
933 
934 	switch (config.tx_type) {
935 	case HWTSTAMP_TX_OFF:
936 		priv->hwts_tx_en = 0;
937 		break;
938 	case HWTSTAMP_TX_ON:
939 		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
940 			return -ERANGE;
941 		priv->hwts_tx_en = 1;
942 		break;
943 	default:
944 		return -ERANGE;
945 	}
946 
947 	switch (config.rx_filter) {
948 	case HWTSTAMP_FILTER_NONE:
949 		if (priv->hwts_rx_en) {
950 			priv->hwts_rx_en = 0;
951 			reset_gfar(netdev);
952 		}
953 		break;
954 	default:
955 		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
956 			return -ERANGE;
957 		if (!priv->hwts_rx_en) {
958 			priv->hwts_rx_en = 1;
959 			reset_gfar(netdev);
960 		}
961 		config.rx_filter = HWTSTAMP_FILTER_ALL;
962 		break;
963 	}
964 
965 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
966 		-EFAULT : 0;
967 }
968 
969 static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
970 {
971 	struct hwtstamp_config config;
972 	struct gfar_private *priv = netdev_priv(netdev);
973 
974 	config.flags = 0;
975 	config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
976 	config.rx_filter = (priv->hwts_rx_en ?
977 			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
978 
979 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
980 		-EFAULT : 0;
981 }
982 
983 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
984 {
985 	struct gfar_private *priv = netdev_priv(dev);
986 
987 	if (!netif_running(dev))
988 		return -EINVAL;
989 
990 	if (cmd == SIOCSHWTSTAMP)
991 		return gfar_hwtstamp_set(dev, rq);
992 	if (cmd == SIOCGHWTSTAMP)
993 		return gfar_hwtstamp_get(dev, rq);
994 
995 	if (!priv->phydev)
996 		return -ENODEV;
997 
998 	return phy_mii_ioctl(priv->phydev, rq, cmd);
999 }
1000 
1001 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
1002 				   u32 class)
1003 {
1004 	u32 rqfpr = FPR_FILER_MASK;
1005 	u32 rqfcr = 0x0;
1006 
1007 	rqfar--;
1008 	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
1009 	priv->ftp_rqfpr[rqfar] = rqfpr;
1010 	priv->ftp_rqfcr[rqfar] = rqfcr;
1011 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1012 
1013 	rqfar--;
1014 	rqfcr = RQFCR_CMP_NOMATCH;
1015 	priv->ftp_rqfpr[rqfar] = rqfpr;
1016 	priv->ftp_rqfcr[rqfar] = rqfcr;
1017 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1018 
1019 	rqfar--;
1020 	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
1021 	rqfpr = class;
1022 	priv->ftp_rqfcr[rqfar] = rqfcr;
1023 	priv->ftp_rqfpr[rqfar] = rqfpr;
1024 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1025 
1026 	rqfar--;
1027 	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
1028 	rqfpr = class;
1029 	priv->ftp_rqfcr[rqfar] = rqfcr;
1030 	priv->ftp_rqfpr[rqfar] = rqfpr;
1031 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1032 
1033 	return rqfar;
1034 }
1035 
1036 static void gfar_init_filer_table(struct gfar_private *priv)
1037 {
1038 	int i = 0x0;
1039 	u32 rqfar = MAX_FILER_IDX;
1040 	u32 rqfcr = 0x0;
1041 	u32 rqfpr = FPR_FILER_MASK;
1042 
1043 	/* Default rule */
1044 	rqfcr = RQFCR_CMP_MATCH;
1045 	priv->ftp_rqfcr[rqfar] = rqfcr;
1046 	priv->ftp_rqfpr[rqfar] = rqfpr;
1047 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1048 
1049 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
1050 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
1051 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
1052 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
1053 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
1054 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
1055 
1056 	/* cur_filer_idx indicated the first non-masked rule */
1057 	priv->cur_filer_idx = rqfar;
1058 
1059 	/* Rest are masked rules */
1060 	rqfcr = RQFCR_CMP_NOMATCH;
1061 	for (i = 0; i < rqfar; i++) {
1062 		priv->ftp_rqfcr[i] = rqfcr;
1063 		priv->ftp_rqfpr[i] = rqfpr;
1064 		gfar_write_filer(priv, i, rqfcr, rqfpr);
1065 	}
1066 }
1067 
1068 #ifdef CONFIG_PPC
1069 static void __gfar_detect_errata_83xx(struct gfar_private *priv)
1070 {
1071 	unsigned int pvr = mfspr(SPRN_PVR);
1072 	unsigned int svr = mfspr(SPRN_SVR);
1073 	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
1074 	unsigned int rev = svr & 0xffff;
1075 
1076 	/* MPC8313 Rev 2.0 and higher; All MPC837x */
1077 	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
1078 	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1079 		priv->errata |= GFAR_ERRATA_74;
1080 
1081 	/* MPC8313 and MPC837x all rev */
1082 	if ((pvr == 0x80850010 && mod == 0x80b0) ||
1083 	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1084 		priv->errata |= GFAR_ERRATA_76;
1085 
1086 	/* MPC8313 Rev < 2.0 */
1087 	if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
1088 		priv->errata |= GFAR_ERRATA_12;
1089 }
1090 
1091 static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1092 {
1093 	unsigned int svr = mfspr(SPRN_SVR);
1094 
1095 	if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1096 		priv->errata |= GFAR_ERRATA_12;
1097 	if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
1098 	    ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
1099 		priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
1100 }
1101 #endif
1102 
1103 static void gfar_detect_errata(struct gfar_private *priv)
1104 {
1105 	struct device *dev = &priv->ofdev->dev;
1106 
1107 	/* no plans to fix */
1108 	priv->errata |= GFAR_ERRATA_A002;
1109 
1110 #ifdef CONFIG_PPC
1111 	if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
1112 		__gfar_detect_errata_85xx(priv);
1113 	else /* non-mpc85xx parts, i.e. e300 core based */
1114 		__gfar_detect_errata_83xx(priv);
1115 #endif
1116 
1117 	if (priv->errata)
1118 		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
1119 			 priv->errata);
1120 }
1121 
1122 void gfar_mac_reset(struct gfar_private *priv)
1123 {
1124 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1125 	u32 tempval;
1126 
1127 	/* Reset MAC layer */
1128 	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
1129 
1130 	/* We need to delay at least 3 TX clocks */
1131 	udelay(3);
1132 
1133 	/* the soft reset bit is not self-resetting, so we need to
1134 	 * clear it before resuming normal operation
1135 	 */
1136 	gfar_write(&regs->maccfg1, 0);
1137 
1138 	udelay(3);
1139 
1140 	/* Compute rx_buff_size based on config flags */
1141 	gfar_rx_buff_size_config(priv);
1142 
1143 	/* Initialize the max receive frame/buffer lengths */
1144 	gfar_write(&regs->maxfrm, priv->rx_buffer_size);
1145 	gfar_write(&regs->mrblr, priv->rx_buffer_size);
1146 
1147 	/* Initialize the Minimum Frame Length Register */
1148 	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1149 
1150 	/* Initialize MACCFG2. */
1151 	tempval = MACCFG2_INIT_SETTINGS;
1152 
1153 	/* If the mtu is larger than the max size for standard
1154 	 * ethernet frames (ie, a jumbo frame), then set maccfg2
1155 	 * to allow huge frames, and to check the length
1156 	 */
1157 	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
1158 	    gfar_has_errata(priv, GFAR_ERRATA_74))
1159 		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1160 
1161 	gfar_write(&regs->maccfg2, tempval);
1162 
1163 	/* Clear mac addr hash registers */
1164 	gfar_write(&regs->igaddr0, 0);
1165 	gfar_write(&regs->igaddr1, 0);
1166 	gfar_write(&regs->igaddr2, 0);
1167 	gfar_write(&regs->igaddr3, 0);
1168 	gfar_write(&regs->igaddr4, 0);
1169 	gfar_write(&regs->igaddr5, 0);
1170 	gfar_write(&regs->igaddr6, 0);
1171 	gfar_write(&regs->igaddr7, 0);
1172 
1173 	gfar_write(&regs->gaddr0, 0);
1174 	gfar_write(&regs->gaddr1, 0);
1175 	gfar_write(&regs->gaddr2, 0);
1176 	gfar_write(&regs->gaddr3, 0);
1177 	gfar_write(&regs->gaddr4, 0);
1178 	gfar_write(&regs->gaddr5, 0);
1179 	gfar_write(&regs->gaddr6, 0);
1180 	gfar_write(&regs->gaddr7, 0);
1181 
1182 	if (priv->extended_hash)
1183 		gfar_clear_exact_match(priv->ndev);
1184 
1185 	gfar_mac_rx_config(priv);
1186 
1187 	gfar_mac_tx_config(priv);
1188 
1189 	gfar_set_mac_address(priv->ndev);
1190 
1191 	gfar_set_multi(priv->ndev);
1192 
1193 	/* clear ievent and imask before configuring coalescing */
1194 	gfar_ints_disable(priv);
1195 
1196 	/* Configure the coalescing support */
1197 	gfar_configure_coalescing_all(priv);
1198 }
1199 
1200 static void gfar_hw_init(struct gfar_private *priv)
1201 {
1202 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1203 	u32 attrs;
1204 
1205 	/* Stop the DMA engine now, in case it was running before
1206 	 * (The firmware could have used it, and left it running).
1207 	 */
1208 	gfar_halt(priv);
1209 
1210 	gfar_mac_reset(priv);
1211 
1212 	/* Zero out the rmon mib registers if it has them */
1213 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1214 		memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1215 
1216 		/* Mask off the CAM interrupts */
1217 		gfar_write(&regs->rmon.cam1, 0xffffffff);
1218 		gfar_write(&regs->rmon.cam2, 0xffffffff);
1219 	}
1220 
1221 	/* Initialize ECNTRL */
1222 	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
1223 
1224 	/* Set the extraction length and index */
1225 	attrs = ATTRELI_EL(priv->rx_stash_size) |
1226 		ATTRELI_EI(priv->rx_stash_index);
1227 
1228 	gfar_write(&regs->attreli, attrs);
1229 
1230 	/* Start with defaults, and add stashing
1231 	 * depending on driver parameters
1232 	 */
1233 	attrs = ATTR_INIT_SETTINGS;
1234 
1235 	if (priv->bd_stash_en)
1236 		attrs |= ATTR_BDSTASH;
1237 
1238 	if (priv->rx_stash_size != 0)
1239 		attrs |= ATTR_BUFSTASH;
1240 
1241 	gfar_write(&regs->attr, attrs);
1242 
1243 	/* FIFO configs */
1244 	gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1245 	gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1246 	gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1247 
1248 	/* Program the interrupt steering regs, only for MG devices */
1249 	if (priv->num_grps > 1)
1250 		gfar_write_isrg(priv);
1251 }
1252 
1253 static void gfar_init_addr_hash_table(struct gfar_private *priv)
1254 {
1255 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1256 
1257 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1258 		priv->extended_hash = 1;
1259 		priv->hash_width = 9;
1260 
1261 		priv->hash_regs[0] = &regs->igaddr0;
1262 		priv->hash_regs[1] = &regs->igaddr1;
1263 		priv->hash_regs[2] = &regs->igaddr2;
1264 		priv->hash_regs[3] = &regs->igaddr3;
1265 		priv->hash_regs[4] = &regs->igaddr4;
1266 		priv->hash_regs[5] = &regs->igaddr5;
1267 		priv->hash_regs[6] = &regs->igaddr6;
1268 		priv->hash_regs[7] = &regs->igaddr7;
1269 		priv->hash_regs[8] = &regs->gaddr0;
1270 		priv->hash_regs[9] = &regs->gaddr1;
1271 		priv->hash_regs[10] = &regs->gaddr2;
1272 		priv->hash_regs[11] = &regs->gaddr3;
1273 		priv->hash_regs[12] = &regs->gaddr4;
1274 		priv->hash_regs[13] = &regs->gaddr5;
1275 		priv->hash_regs[14] = &regs->gaddr6;
1276 		priv->hash_regs[15] = &regs->gaddr7;
1277 
1278 	} else {
1279 		priv->extended_hash = 0;
1280 		priv->hash_width = 8;
1281 
1282 		priv->hash_regs[0] = &regs->gaddr0;
1283 		priv->hash_regs[1] = &regs->gaddr1;
1284 		priv->hash_regs[2] = &regs->gaddr2;
1285 		priv->hash_regs[3] = &regs->gaddr3;
1286 		priv->hash_regs[4] = &regs->gaddr4;
1287 		priv->hash_regs[5] = &regs->gaddr5;
1288 		priv->hash_regs[6] = &regs->gaddr6;
1289 		priv->hash_regs[7] = &regs->gaddr7;
1290 	}
1291 }
1292 
1293 /* Set up the ethernet device structure, private data,
1294  * and anything else we need before we start
1295  */
1296 static int gfar_probe(struct platform_device *ofdev)
1297 {
1298 	struct net_device *dev = NULL;
1299 	struct gfar_private *priv = NULL;
1300 	int err = 0, i;
1301 
1302 	err = gfar_of_init(ofdev, &dev);
1303 
1304 	if (err)
1305 		return err;
1306 
1307 	priv = netdev_priv(dev);
1308 	priv->ndev = dev;
1309 	priv->ofdev = ofdev;
1310 	priv->dev = &ofdev->dev;
1311 	SET_NETDEV_DEV(dev, &ofdev->dev);
1312 
1313 	spin_lock_init(&priv->bflock);
1314 	INIT_WORK(&priv->reset_task, gfar_reset_task);
1315 
1316 	platform_set_drvdata(ofdev, priv);
1317 
1318 	gfar_detect_errata(priv);
1319 
1320 	/* Set the dev->base_addr to the gfar reg region */
1321 	dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1322 
1323 	/* Fill in the dev structure */
1324 	dev->watchdog_timeo = TX_TIMEOUT;
1325 	dev->mtu = 1500;
1326 	dev->netdev_ops = &gfar_netdev_ops;
1327 	dev->ethtool_ops = &gfar_ethtool_ops;
1328 
1329 	/* Register for napi ...We are registering NAPI for each grp */
1330 	for (i = 0; i < priv->num_grps; i++) {
1331 		if (priv->poll_mode == GFAR_SQ_POLLING) {
1332 			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1333 				       gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
1334 			netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1335 				       gfar_poll_tx_sq, 2);
1336 		} else {
1337 			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1338 				       gfar_poll_rx, GFAR_DEV_WEIGHT);
1339 			netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1340 				       gfar_poll_tx, 2);
1341 		}
1342 	}
1343 
1344 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1345 		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1346 				   NETIF_F_RXCSUM;
1347 		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1348 				 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1349 	}
1350 
1351 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1352 		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1353 				    NETIF_F_HW_VLAN_CTAG_RX;
1354 		dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1355 	}
1356 
1357 	gfar_init_addr_hash_table(priv);
1358 
1359 	/* Insert receive time stamps into padding alignment bytes */
1360 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1361 		priv->padding = 8;
1362 
1363 	if (dev->features & NETIF_F_IP_CSUM ||
1364 	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1365 		dev->needed_headroom = GMAC_FCB_LEN;
1366 
1367 	priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1368 
1369 	/* Initializing some of the rx/tx queue level parameters */
1370 	for (i = 0; i < priv->num_tx_queues; i++) {
1371 		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1372 		priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1373 		priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1374 		priv->tx_queue[i]->txic = DEFAULT_TXIC;
1375 	}
1376 
1377 	for (i = 0; i < priv->num_rx_queues; i++) {
1378 		priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1379 		priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1380 		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1381 	}
1382 
1383 	/* always enable rx filer */
1384 	priv->rx_filer_enable = 1;
1385 	/* Enable most messages by default */
1386 	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1387 	/* use pritority h/w tx queue scheduling for single queue devices */
1388 	if (priv->num_tx_queues == 1)
1389 		priv->prio_sched_en = 1;
1390 
1391 	set_bit(GFAR_DOWN, &priv->state);
1392 
1393 	gfar_hw_init(priv);
1394 
1395 	/* Carrier starts down, phylib will bring it up */
1396 	netif_carrier_off(dev);
1397 
1398 	err = register_netdev(dev);
1399 
1400 	if (err) {
1401 		pr_err("%s: Cannot register net device, aborting\n", dev->name);
1402 		goto register_fail;
1403 	}
1404 
1405 	device_init_wakeup(&dev->dev,
1406 			   priv->device_flags &
1407 			   FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1408 
1409 	/* fill out IRQ number and name fields */
1410 	for (i = 0; i < priv->num_grps; i++) {
1411 		struct gfar_priv_grp *grp = &priv->gfargrp[i];
1412 		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1413 			sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
1414 				dev->name, "_g", '0' + i, "_tx");
1415 			sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
1416 				dev->name, "_g", '0' + i, "_rx");
1417 			sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
1418 				dev->name, "_g", '0' + i, "_er");
1419 		} else
1420 			strcpy(gfar_irq(grp, TX)->name, dev->name);
1421 	}
1422 
1423 	/* Initialize the filer table */
1424 	gfar_init_filer_table(priv);
1425 
1426 	/* Print out the device info */
1427 	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1428 
1429 	/* Even more device info helps when determining which kernel
1430 	 * provided which set of benchmarks.
1431 	 */
1432 	netdev_info(dev, "Running with NAPI enabled\n");
1433 	for (i = 0; i < priv->num_rx_queues; i++)
1434 		netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1435 			    i, priv->rx_queue[i]->rx_ring_size);
1436 	for (i = 0; i < priv->num_tx_queues; i++)
1437 		netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1438 			    i, priv->tx_queue[i]->tx_ring_size);
1439 
1440 	return 0;
1441 
1442 register_fail:
1443 	unmap_group_regs(priv);
1444 	gfar_free_rx_queues(priv);
1445 	gfar_free_tx_queues(priv);
1446 	of_node_put(priv->phy_node);
1447 	of_node_put(priv->tbi_node);
1448 	free_gfar_dev(priv);
1449 	return err;
1450 }
1451 
1452 static int gfar_remove(struct platform_device *ofdev)
1453 {
1454 	struct gfar_private *priv = platform_get_drvdata(ofdev);
1455 
1456 	of_node_put(priv->phy_node);
1457 	of_node_put(priv->tbi_node);
1458 
1459 	unregister_netdev(priv->ndev);
1460 	unmap_group_regs(priv);
1461 	gfar_free_rx_queues(priv);
1462 	gfar_free_tx_queues(priv);
1463 	free_gfar_dev(priv);
1464 
1465 	return 0;
1466 }
1467 
1468 #ifdef CONFIG_PM
1469 
1470 static int gfar_suspend(struct device *dev)
1471 {
1472 	struct gfar_private *priv = dev_get_drvdata(dev);
1473 	struct net_device *ndev = priv->ndev;
1474 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1475 	unsigned long flags;
1476 	u32 tempval;
1477 
1478 	int magic_packet = priv->wol_en &&
1479 			   (priv->device_flags &
1480 			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1481 
1482 	netif_device_detach(ndev);
1483 
1484 	if (netif_running(ndev)) {
1485 
1486 		local_irq_save(flags);
1487 		lock_tx_qs(priv);
1488 
1489 		gfar_halt_nodisable(priv);
1490 
1491 		/* Disable Tx, and Rx if wake-on-LAN is disabled. */
1492 		tempval = gfar_read(&regs->maccfg1);
1493 
1494 		tempval &= ~MACCFG1_TX_EN;
1495 
1496 		if (!magic_packet)
1497 			tempval &= ~MACCFG1_RX_EN;
1498 
1499 		gfar_write(&regs->maccfg1, tempval);
1500 
1501 		unlock_tx_qs(priv);
1502 		local_irq_restore(flags);
1503 
1504 		disable_napi(priv);
1505 
1506 		if (magic_packet) {
1507 			/* Enable interrupt on Magic Packet */
1508 			gfar_write(&regs->imask, IMASK_MAG);
1509 
1510 			/* Enable Magic Packet mode */
1511 			tempval = gfar_read(&regs->maccfg2);
1512 			tempval |= MACCFG2_MPEN;
1513 			gfar_write(&regs->maccfg2, tempval);
1514 		} else {
1515 			phy_stop(priv->phydev);
1516 		}
1517 	}
1518 
1519 	return 0;
1520 }
1521 
1522 static int gfar_resume(struct device *dev)
1523 {
1524 	struct gfar_private *priv = dev_get_drvdata(dev);
1525 	struct net_device *ndev = priv->ndev;
1526 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1527 	unsigned long flags;
1528 	u32 tempval;
1529 	int magic_packet = priv->wol_en &&
1530 			   (priv->device_flags &
1531 			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1532 
1533 	if (!netif_running(ndev)) {
1534 		netif_device_attach(ndev);
1535 		return 0;
1536 	}
1537 
1538 	if (!magic_packet && priv->phydev)
1539 		phy_start(priv->phydev);
1540 
1541 	/* Disable Magic Packet mode, in case something
1542 	 * else woke us up.
1543 	 */
1544 	local_irq_save(flags);
1545 	lock_tx_qs(priv);
1546 
1547 	tempval = gfar_read(&regs->maccfg2);
1548 	tempval &= ~MACCFG2_MPEN;
1549 	gfar_write(&regs->maccfg2, tempval);
1550 
1551 	gfar_start(priv);
1552 
1553 	unlock_tx_qs(priv);
1554 	local_irq_restore(flags);
1555 
1556 	netif_device_attach(ndev);
1557 
1558 	enable_napi(priv);
1559 
1560 	return 0;
1561 }
1562 
1563 static int gfar_restore(struct device *dev)
1564 {
1565 	struct gfar_private *priv = dev_get_drvdata(dev);
1566 	struct net_device *ndev = priv->ndev;
1567 
1568 	if (!netif_running(ndev)) {
1569 		netif_device_attach(ndev);
1570 
1571 		return 0;
1572 	}
1573 
1574 	if (gfar_init_bds(ndev)) {
1575 		free_skb_resources(priv);
1576 		return -ENOMEM;
1577 	}
1578 
1579 	gfar_mac_reset(priv);
1580 
1581 	gfar_init_tx_rx_base(priv);
1582 
1583 	gfar_start(priv);
1584 
1585 	priv->oldlink = 0;
1586 	priv->oldspeed = 0;
1587 	priv->oldduplex = -1;
1588 
1589 	if (priv->phydev)
1590 		phy_start(priv->phydev);
1591 
1592 	netif_device_attach(ndev);
1593 	enable_napi(priv);
1594 
1595 	return 0;
1596 }
1597 
1598 static struct dev_pm_ops gfar_pm_ops = {
1599 	.suspend = gfar_suspend,
1600 	.resume = gfar_resume,
1601 	.freeze = gfar_suspend,
1602 	.thaw = gfar_resume,
1603 	.restore = gfar_restore,
1604 };
1605 
1606 #define GFAR_PM_OPS (&gfar_pm_ops)
1607 
1608 #else
1609 
1610 #define GFAR_PM_OPS NULL
1611 
1612 #endif
1613 
1614 /* Reads the controller's registers to determine what interface
1615  * connects it to the PHY.
1616  */
1617 static phy_interface_t gfar_get_interface(struct net_device *dev)
1618 {
1619 	struct gfar_private *priv = netdev_priv(dev);
1620 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1621 	u32 ecntrl;
1622 
1623 	ecntrl = gfar_read(&regs->ecntrl);
1624 
1625 	if (ecntrl & ECNTRL_SGMII_MODE)
1626 		return PHY_INTERFACE_MODE_SGMII;
1627 
1628 	if (ecntrl & ECNTRL_TBI_MODE) {
1629 		if (ecntrl & ECNTRL_REDUCED_MODE)
1630 			return PHY_INTERFACE_MODE_RTBI;
1631 		else
1632 			return PHY_INTERFACE_MODE_TBI;
1633 	}
1634 
1635 	if (ecntrl & ECNTRL_REDUCED_MODE) {
1636 		if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1637 			return PHY_INTERFACE_MODE_RMII;
1638 		}
1639 		else {
1640 			phy_interface_t interface = priv->interface;
1641 
1642 			/* This isn't autodetected right now, so it must
1643 			 * be set by the device tree or platform code.
1644 			 */
1645 			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1646 				return PHY_INTERFACE_MODE_RGMII_ID;
1647 
1648 			return PHY_INTERFACE_MODE_RGMII;
1649 		}
1650 	}
1651 
1652 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1653 		return PHY_INTERFACE_MODE_GMII;
1654 
1655 	return PHY_INTERFACE_MODE_MII;
1656 }
1657 
1658 
1659 /* Initializes driver's PHY state, and attaches to the PHY.
1660  * Returns 0 on success.
1661  */
1662 static int init_phy(struct net_device *dev)
1663 {
1664 	struct gfar_private *priv = netdev_priv(dev);
1665 	uint gigabit_support =
1666 		priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1667 		GFAR_SUPPORTED_GBIT : 0;
1668 	phy_interface_t interface;
1669 
1670 	priv->oldlink = 0;
1671 	priv->oldspeed = 0;
1672 	priv->oldduplex = -1;
1673 
1674 	interface = gfar_get_interface(dev);
1675 
1676 	priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1677 				      interface);
1678 	if (!priv->phydev) {
1679 		dev_err(&dev->dev, "could not attach to PHY\n");
1680 		return -ENODEV;
1681 	}
1682 
1683 	if (interface == PHY_INTERFACE_MODE_SGMII)
1684 		gfar_configure_serdes(dev);
1685 
1686 	/* Remove any features not supported by the controller */
1687 	priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1688 	priv->phydev->advertising = priv->phydev->supported;
1689 
1690 	return 0;
1691 }
1692 
1693 /* Initialize TBI PHY interface for communicating with the
1694  * SERDES lynx PHY on the chip.  We communicate with this PHY
1695  * through the MDIO bus on each controller, treating it as a
1696  * "normal" PHY at the address found in the TBIPA register.  We assume
1697  * that the TBIPA register is valid.  Either the MDIO bus code will set
1698  * it to a value that doesn't conflict with other PHYs on the bus, or the
1699  * value doesn't matter, as there are no other PHYs on the bus.
1700  */
1701 static void gfar_configure_serdes(struct net_device *dev)
1702 {
1703 	struct gfar_private *priv = netdev_priv(dev);
1704 	struct phy_device *tbiphy;
1705 
1706 	if (!priv->tbi_node) {
1707 		dev_warn(&dev->dev, "error: SGMII mode requires that the "
1708 				    "device tree specify a tbi-handle\n");
1709 		return;
1710 	}
1711 
1712 	tbiphy = of_phy_find_device(priv->tbi_node);
1713 	if (!tbiphy) {
1714 		dev_err(&dev->dev, "error: Could not get TBI device\n");
1715 		return;
1716 	}
1717 
1718 	/* If the link is already up, we must already be ok, and don't need to
1719 	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
1720 	 * everything for us?  Resetting it takes the link down and requires
1721 	 * several seconds for it to come back.
1722 	 */
1723 	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1724 		return;
1725 
1726 	/* Single clk mode, mii mode off(for serdes communication) */
1727 	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1728 
1729 	phy_write(tbiphy, MII_ADVERTISE,
1730 		  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1731 		  ADVERTISE_1000XPSE_ASYM);
1732 
1733 	phy_write(tbiphy, MII_BMCR,
1734 		  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1735 		  BMCR_SPEED1000);
1736 }
1737 
1738 static int __gfar_is_rx_idle(struct gfar_private *priv)
1739 {
1740 	u32 res;
1741 
1742 	/* Normaly TSEC should not hang on GRS commands, so we should
1743 	 * actually wait for IEVENT_GRSC flag.
1744 	 */
1745 	if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1746 		return 0;
1747 
1748 	/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1749 	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1750 	 * and the Rx can be safely reset.
1751 	 */
1752 	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1753 	res &= 0x7f807f80;
1754 	if ((res & 0xffff) == (res >> 16))
1755 		return 1;
1756 
1757 	return 0;
1758 }
1759 
1760 /* Halt the receive and transmit queues */
1761 static void gfar_halt_nodisable(struct gfar_private *priv)
1762 {
1763 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1764 	u32 tempval;
1765 	unsigned int timeout;
1766 	int stopped;
1767 
1768 	gfar_ints_disable(priv);
1769 
1770 	if (gfar_is_dma_stopped(priv))
1771 		return;
1772 
1773 	/* Stop the DMA, and wait for it to stop */
1774 	tempval = gfar_read(&regs->dmactrl);
1775 	tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1776 	gfar_write(&regs->dmactrl, tempval);
1777 
1778 retry:
1779 	timeout = 1000;
1780 	while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1781 		cpu_relax();
1782 		timeout--;
1783 	}
1784 
1785 	if (!timeout)
1786 		stopped = gfar_is_dma_stopped(priv);
1787 
1788 	if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1789 	    !__gfar_is_rx_idle(priv))
1790 		goto retry;
1791 }
1792 
1793 /* Halt the receive and transmit queues */
1794 void gfar_halt(struct gfar_private *priv)
1795 {
1796 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1797 	u32 tempval;
1798 
1799 	/* Dissable the Rx/Tx hw queues */
1800 	gfar_write(&regs->rqueue, 0);
1801 	gfar_write(&regs->tqueue, 0);
1802 
1803 	mdelay(10);
1804 
1805 	gfar_halt_nodisable(priv);
1806 
1807 	/* Disable Rx/Tx DMA */
1808 	tempval = gfar_read(&regs->maccfg1);
1809 	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1810 	gfar_write(&regs->maccfg1, tempval);
1811 }
1812 
1813 void stop_gfar(struct net_device *dev)
1814 {
1815 	struct gfar_private *priv = netdev_priv(dev);
1816 
1817 	netif_tx_stop_all_queues(dev);
1818 
1819 	smp_mb__before_atomic();
1820 	set_bit(GFAR_DOWN, &priv->state);
1821 	smp_mb__after_atomic();
1822 
1823 	disable_napi(priv);
1824 
1825 	/* disable ints and gracefully shut down Rx/Tx DMA */
1826 	gfar_halt(priv);
1827 
1828 	phy_stop(priv->phydev);
1829 
1830 	free_skb_resources(priv);
1831 }
1832 
1833 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1834 {
1835 	struct txbd8 *txbdp;
1836 	struct gfar_private *priv = netdev_priv(tx_queue->dev);
1837 	int i, j;
1838 
1839 	txbdp = tx_queue->tx_bd_base;
1840 
1841 	for (i = 0; i < tx_queue->tx_ring_size; i++) {
1842 		if (!tx_queue->tx_skbuff[i])
1843 			continue;
1844 
1845 		dma_unmap_single(priv->dev, txbdp->bufPtr,
1846 				 txbdp->length, DMA_TO_DEVICE);
1847 		txbdp->lstatus = 0;
1848 		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1849 		     j++) {
1850 			txbdp++;
1851 			dma_unmap_page(priv->dev, txbdp->bufPtr,
1852 				       txbdp->length, DMA_TO_DEVICE);
1853 		}
1854 		txbdp++;
1855 		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1856 		tx_queue->tx_skbuff[i] = NULL;
1857 	}
1858 	kfree(tx_queue->tx_skbuff);
1859 	tx_queue->tx_skbuff = NULL;
1860 }
1861 
1862 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1863 {
1864 	struct rxbd8 *rxbdp;
1865 	struct gfar_private *priv = netdev_priv(rx_queue->dev);
1866 	int i;
1867 
1868 	rxbdp = rx_queue->rx_bd_base;
1869 
1870 	for (i = 0; i < rx_queue->rx_ring_size; i++) {
1871 		if (rx_queue->rx_skbuff[i]) {
1872 			dma_unmap_single(priv->dev, rxbdp->bufPtr,
1873 					 priv->rx_buffer_size,
1874 					 DMA_FROM_DEVICE);
1875 			dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1876 			rx_queue->rx_skbuff[i] = NULL;
1877 		}
1878 		rxbdp->lstatus = 0;
1879 		rxbdp->bufPtr = 0;
1880 		rxbdp++;
1881 	}
1882 	kfree(rx_queue->rx_skbuff);
1883 	rx_queue->rx_skbuff = NULL;
1884 }
1885 
1886 /* If there are any tx skbs or rx skbs still around, free them.
1887  * Then free tx_skbuff and rx_skbuff
1888  */
1889 static void free_skb_resources(struct gfar_private *priv)
1890 {
1891 	struct gfar_priv_tx_q *tx_queue = NULL;
1892 	struct gfar_priv_rx_q *rx_queue = NULL;
1893 	int i;
1894 
1895 	/* Go through all the buffer descriptors and free their data buffers */
1896 	for (i = 0; i < priv->num_tx_queues; i++) {
1897 		struct netdev_queue *txq;
1898 
1899 		tx_queue = priv->tx_queue[i];
1900 		txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1901 		if (tx_queue->tx_skbuff)
1902 			free_skb_tx_queue(tx_queue);
1903 		netdev_tx_reset_queue(txq);
1904 	}
1905 
1906 	for (i = 0; i < priv->num_rx_queues; i++) {
1907 		rx_queue = priv->rx_queue[i];
1908 		if (rx_queue->rx_skbuff)
1909 			free_skb_rx_queue(rx_queue);
1910 	}
1911 
1912 	dma_free_coherent(priv->dev,
1913 			  sizeof(struct txbd8) * priv->total_tx_ring_size +
1914 			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
1915 			  priv->tx_queue[0]->tx_bd_base,
1916 			  priv->tx_queue[0]->tx_bd_dma_base);
1917 }
1918 
1919 void gfar_start(struct gfar_private *priv)
1920 {
1921 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1922 	u32 tempval;
1923 	int i = 0;
1924 
1925 	/* Enable Rx/Tx hw queues */
1926 	gfar_write(&regs->rqueue, priv->rqueue);
1927 	gfar_write(&regs->tqueue, priv->tqueue);
1928 
1929 	/* Initialize DMACTRL to have WWR and WOP */
1930 	tempval = gfar_read(&regs->dmactrl);
1931 	tempval |= DMACTRL_INIT_SETTINGS;
1932 	gfar_write(&regs->dmactrl, tempval);
1933 
1934 	/* Make sure we aren't stopped */
1935 	tempval = gfar_read(&regs->dmactrl);
1936 	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1937 	gfar_write(&regs->dmactrl, tempval);
1938 
1939 	for (i = 0; i < priv->num_grps; i++) {
1940 		regs = priv->gfargrp[i].regs;
1941 		/* Clear THLT/RHLT, so that the DMA starts polling now */
1942 		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1943 		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1944 	}
1945 
1946 	/* Enable Rx/Tx DMA */
1947 	tempval = gfar_read(&regs->maccfg1);
1948 	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1949 	gfar_write(&regs->maccfg1, tempval);
1950 
1951 	gfar_ints_enable(priv);
1952 
1953 	priv->ndev->trans_start = jiffies; /* prevent tx timeout */
1954 }
1955 
1956 static void free_grp_irqs(struct gfar_priv_grp *grp)
1957 {
1958 	free_irq(gfar_irq(grp, TX)->irq, grp);
1959 	free_irq(gfar_irq(grp, RX)->irq, grp);
1960 	free_irq(gfar_irq(grp, ER)->irq, grp);
1961 }
1962 
1963 static int register_grp_irqs(struct gfar_priv_grp *grp)
1964 {
1965 	struct gfar_private *priv = grp->priv;
1966 	struct net_device *dev = priv->ndev;
1967 	int err;
1968 
1969 	/* If the device has multiple interrupts, register for
1970 	 * them.  Otherwise, only register for the one
1971 	 */
1972 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1973 		/* Install our interrupt handlers for Error,
1974 		 * Transmit, and Receive
1975 		 */
1976 		err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
1977 				  gfar_irq(grp, ER)->name, grp);
1978 		if (err < 0) {
1979 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1980 				  gfar_irq(grp, ER)->irq);
1981 
1982 			goto err_irq_fail;
1983 		}
1984 		err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
1985 				  gfar_irq(grp, TX)->name, grp);
1986 		if (err < 0) {
1987 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1988 				  gfar_irq(grp, TX)->irq);
1989 			goto tx_irq_fail;
1990 		}
1991 		err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
1992 				  gfar_irq(grp, RX)->name, grp);
1993 		if (err < 0) {
1994 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1995 				  gfar_irq(grp, RX)->irq);
1996 			goto rx_irq_fail;
1997 		}
1998 	} else {
1999 		err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
2000 				  gfar_irq(grp, TX)->name, grp);
2001 		if (err < 0) {
2002 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2003 				  gfar_irq(grp, TX)->irq);
2004 			goto err_irq_fail;
2005 		}
2006 	}
2007 
2008 	return 0;
2009 
2010 rx_irq_fail:
2011 	free_irq(gfar_irq(grp, TX)->irq, grp);
2012 tx_irq_fail:
2013 	free_irq(gfar_irq(grp, ER)->irq, grp);
2014 err_irq_fail:
2015 	return err;
2016 
2017 }
2018 
2019 static void gfar_free_irq(struct gfar_private *priv)
2020 {
2021 	int i;
2022 
2023 	/* Free the IRQs */
2024 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2025 		for (i = 0; i < priv->num_grps; i++)
2026 			free_grp_irqs(&priv->gfargrp[i]);
2027 	} else {
2028 		for (i = 0; i < priv->num_grps; i++)
2029 			free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2030 				 &priv->gfargrp[i]);
2031 	}
2032 }
2033 
2034 static int gfar_request_irq(struct gfar_private *priv)
2035 {
2036 	int err, i, j;
2037 
2038 	for (i = 0; i < priv->num_grps; i++) {
2039 		err = register_grp_irqs(&priv->gfargrp[i]);
2040 		if (err) {
2041 			for (j = 0; j < i; j++)
2042 				free_grp_irqs(&priv->gfargrp[j]);
2043 			return err;
2044 		}
2045 	}
2046 
2047 	return 0;
2048 }
2049 
2050 /* Bring the controller up and running */
2051 int startup_gfar(struct net_device *ndev)
2052 {
2053 	struct gfar_private *priv = netdev_priv(ndev);
2054 	int err;
2055 
2056 	gfar_mac_reset(priv);
2057 
2058 	err = gfar_alloc_skb_resources(ndev);
2059 	if (err)
2060 		return err;
2061 
2062 	gfar_init_tx_rx_base(priv);
2063 
2064 	smp_mb__before_atomic();
2065 	clear_bit(GFAR_DOWN, &priv->state);
2066 	smp_mb__after_atomic();
2067 
2068 	/* Start Rx/Tx DMA and enable the interrupts */
2069 	gfar_start(priv);
2070 
2071 	phy_start(priv->phydev);
2072 
2073 	enable_napi(priv);
2074 
2075 	netif_tx_wake_all_queues(ndev);
2076 
2077 	return 0;
2078 }
2079 
2080 /* Called when something needs to use the ethernet device
2081  * Returns 0 for success.
2082  */
2083 static int gfar_enet_open(struct net_device *dev)
2084 {
2085 	struct gfar_private *priv = netdev_priv(dev);
2086 	int err;
2087 
2088 	err = init_phy(dev);
2089 	if (err)
2090 		return err;
2091 
2092 	err = gfar_request_irq(priv);
2093 	if (err)
2094 		return err;
2095 
2096 	err = startup_gfar(dev);
2097 	if (err)
2098 		return err;
2099 
2100 	device_set_wakeup_enable(&dev->dev, priv->wol_en);
2101 
2102 	return err;
2103 }
2104 
2105 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
2106 {
2107 	struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
2108 
2109 	memset(fcb, 0, GMAC_FCB_LEN);
2110 
2111 	return fcb;
2112 }
2113 
2114 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2115 				    int fcb_length)
2116 {
2117 	/* If we're here, it's a IP packet with a TCP or UDP
2118 	 * payload.  We set it to checksum, using a pseudo-header
2119 	 * we provide
2120 	 */
2121 	u8 flags = TXFCB_DEFAULT;
2122 
2123 	/* Tell the controller what the protocol is
2124 	 * And provide the already calculated phcs
2125 	 */
2126 	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2127 		flags |= TXFCB_UDP;
2128 		fcb->phcs = udp_hdr(skb)->check;
2129 	} else
2130 		fcb->phcs = tcp_hdr(skb)->check;
2131 
2132 	/* l3os is the distance between the start of the
2133 	 * frame (skb->data) and the start of the IP hdr.
2134 	 * l4os is the distance between the start of the
2135 	 * l3 hdr and the l4 hdr
2136 	 */
2137 	fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
2138 	fcb->l4os = skb_network_header_len(skb);
2139 
2140 	fcb->flags = flags;
2141 }
2142 
2143 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2144 {
2145 	fcb->flags |= TXFCB_VLN;
2146 	fcb->vlctl = vlan_tx_tag_get(skb);
2147 }
2148 
2149 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2150 				      struct txbd8 *base, int ring_size)
2151 {
2152 	struct txbd8 *new_bd = bdp + stride;
2153 
2154 	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2155 }
2156 
2157 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2158 				      int ring_size)
2159 {
2160 	return skip_txbd(bdp, 1, base, ring_size);
2161 }
2162 
2163 /* eTSEC12: csum generation not supported for some fcb offsets */
2164 static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2165 				       unsigned long fcb_addr)
2166 {
2167 	return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2168 	       (fcb_addr % 0x20) > 0x18);
2169 }
2170 
2171 /* eTSEC76: csum generation for frames larger than 2500 may
2172  * cause excess delays before start of transmission
2173  */
2174 static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2175 				       unsigned int len)
2176 {
2177 	return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2178 	       (len > 2500));
2179 }
2180 
2181 /* This is called by the kernel when a frame is ready for transmission.
2182  * It is pointed to by the dev->hard_start_xmit function pointer
2183  */
2184 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2185 {
2186 	struct gfar_private *priv = netdev_priv(dev);
2187 	struct gfar_priv_tx_q *tx_queue = NULL;
2188 	struct netdev_queue *txq;
2189 	struct gfar __iomem *regs = NULL;
2190 	struct txfcb *fcb = NULL;
2191 	struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2192 	u32 lstatus;
2193 	int i, rq = 0;
2194 	int do_tstamp, do_csum, do_vlan;
2195 	u32 bufaddr;
2196 	unsigned long flags;
2197 	unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
2198 
2199 	rq = skb->queue_mapping;
2200 	tx_queue = priv->tx_queue[rq];
2201 	txq = netdev_get_tx_queue(dev, rq);
2202 	base = tx_queue->tx_bd_base;
2203 	regs = tx_queue->grp->regs;
2204 
2205 	do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
2206 	do_vlan = vlan_tx_tag_present(skb);
2207 	do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2208 		    priv->hwts_tx_en;
2209 
2210 	if (do_csum || do_vlan)
2211 		fcb_len = GMAC_FCB_LEN;
2212 
2213 	/* check if time stamp should be generated */
2214 	if (unlikely(do_tstamp))
2215 		fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2216 
2217 	/* make space for additional header when fcb is needed */
2218 	if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
2219 		struct sk_buff *skb_new;
2220 
2221 		skb_new = skb_realloc_headroom(skb, fcb_len);
2222 		if (!skb_new) {
2223 			dev->stats.tx_errors++;
2224 			dev_kfree_skb_any(skb);
2225 			return NETDEV_TX_OK;
2226 		}
2227 
2228 		if (skb->sk)
2229 			skb_set_owner_w(skb_new, skb->sk);
2230 		dev_consume_skb_any(skb);
2231 		skb = skb_new;
2232 	}
2233 
2234 	/* total number of fragments in the SKB */
2235 	nr_frags = skb_shinfo(skb)->nr_frags;
2236 
2237 	/* calculate the required number of TxBDs for this skb */
2238 	if (unlikely(do_tstamp))
2239 		nr_txbds = nr_frags + 2;
2240 	else
2241 		nr_txbds = nr_frags + 1;
2242 
2243 	/* check if there is space to queue this packet */
2244 	if (nr_txbds > tx_queue->num_txbdfree) {
2245 		/* no space, stop the queue */
2246 		netif_tx_stop_queue(txq);
2247 		dev->stats.tx_fifo_errors++;
2248 		return NETDEV_TX_BUSY;
2249 	}
2250 
2251 	/* Update transmit stats */
2252 	bytes_sent = skb->len;
2253 	tx_queue->stats.tx_bytes += bytes_sent;
2254 	/* keep Tx bytes on wire for BQL accounting */
2255 	GFAR_CB(skb)->bytes_sent = bytes_sent;
2256 	tx_queue->stats.tx_packets++;
2257 
2258 	txbdp = txbdp_start = tx_queue->cur_tx;
2259 	lstatus = txbdp->lstatus;
2260 
2261 	/* Time stamp insertion requires one additional TxBD */
2262 	if (unlikely(do_tstamp))
2263 		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2264 						 tx_queue->tx_ring_size);
2265 
2266 	if (nr_frags == 0) {
2267 		if (unlikely(do_tstamp))
2268 			txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2269 							  TXBD_INTERRUPT);
2270 		else
2271 			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2272 	} else {
2273 		/* Place the fragment addresses and lengths into the TxBDs */
2274 		for (i = 0; i < nr_frags; i++) {
2275 			unsigned int frag_len;
2276 			/* Point at the next BD, wrapping as needed */
2277 			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2278 
2279 			frag_len = skb_shinfo(skb)->frags[i].size;
2280 
2281 			lstatus = txbdp->lstatus | frag_len |
2282 				  BD_LFLAG(TXBD_READY);
2283 
2284 			/* Handle the last BD specially */
2285 			if (i == nr_frags - 1)
2286 				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2287 
2288 			bufaddr = skb_frag_dma_map(priv->dev,
2289 						   &skb_shinfo(skb)->frags[i],
2290 						   0,
2291 						   frag_len,
2292 						   DMA_TO_DEVICE);
2293 
2294 			/* set the TxBD length and buffer pointer */
2295 			txbdp->bufPtr = bufaddr;
2296 			txbdp->lstatus = lstatus;
2297 		}
2298 
2299 		lstatus = txbdp_start->lstatus;
2300 	}
2301 
2302 	/* Add TxPAL between FCB and frame if required */
2303 	if (unlikely(do_tstamp)) {
2304 		skb_push(skb, GMAC_TXPAL_LEN);
2305 		memset(skb->data, 0, GMAC_TXPAL_LEN);
2306 	}
2307 
2308 	/* Add TxFCB if required */
2309 	if (fcb_len) {
2310 		fcb = gfar_add_fcb(skb);
2311 		lstatus |= BD_LFLAG(TXBD_TOE);
2312 	}
2313 
2314 	/* Set up checksumming */
2315 	if (do_csum) {
2316 		gfar_tx_checksum(skb, fcb, fcb_len);
2317 
2318 		if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2319 		    unlikely(gfar_csum_errata_76(priv, skb->len))) {
2320 			__skb_pull(skb, GMAC_FCB_LEN);
2321 			skb_checksum_help(skb);
2322 			if (do_vlan || do_tstamp) {
2323 				/* put back a new fcb for vlan/tstamp TOE */
2324 				fcb = gfar_add_fcb(skb);
2325 			} else {
2326 				/* Tx TOE not used */
2327 				lstatus &= ~(BD_LFLAG(TXBD_TOE));
2328 				fcb = NULL;
2329 			}
2330 		}
2331 	}
2332 
2333 	if (do_vlan)
2334 		gfar_tx_vlan(skb, fcb);
2335 
2336 	/* Setup tx hardware time stamping if requested */
2337 	if (unlikely(do_tstamp)) {
2338 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2339 		fcb->ptp = 1;
2340 	}
2341 
2342 	txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
2343 					     skb_headlen(skb), DMA_TO_DEVICE);
2344 
2345 	/* If time stamping is requested one additional TxBD must be set up. The
2346 	 * first TxBD points to the FCB and must have a data length of
2347 	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2348 	 * the full frame length.
2349 	 */
2350 	if (unlikely(do_tstamp)) {
2351 		txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
2352 		txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2353 					 (skb_headlen(skb) - fcb_len);
2354 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2355 	} else {
2356 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2357 	}
2358 
2359 	netdev_tx_sent_queue(txq, bytes_sent);
2360 
2361 	/* We can work in parallel with gfar_clean_tx_ring(), except
2362 	 * when modifying num_txbdfree. Note that we didn't grab the lock
2363 	 * when we were reading the num_txbdfree and checking for available
2364 	 * space, that's because outside of this function it can only grow,
2365 	 * and once we've got needed space, it cannot suddenly disappear.
2366 	 *
2367 	 * The lock also protects us from gfar_error(), which can modify
2368 	 * regs->tstat and thus retrigger the transfers, which is why we
2369 	 * also must grab the lock before setting ready bit for the first
2370 	 * to be transmitted BD.
2371 	 */
2372 	spin_lock_irqsave(&tx_queue->txlock, flags);
2373 
2374 	gfar_wmb();
2375 
2376 	txbdp_start->lstatus = lstatus;
2377 
2378 	gfar_wmb(); /* force lstatus write before tx_skbuff */
2379 
2380 	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2381 
2382 	/* Update the current skb pointer to the next entry we will use
2383 	 * (wrapping if necessary)
2384 	 */
2385 	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2386 			      TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2387 
2388 	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2389 
2390 	/* reduce TxBD free count */
2391 	tx_queue->num_txbdfree -= (nr_txbds);
2392 
2393 	/* If the next BD still needs to be cleaned up, then the bds
2394 	 * are full.  We need to tell the kernel to stop sending us stuff.
2395 	 */
2396 	if (!tx_queue->num_txbdfree) {
2397 		netif_tx_stop_queue(txq);
2398 
2399 		dev->stats.tx_fifo_errors++;
2400 	}
2401 
2402 	/* Tell the DMA to go go go */
2403 	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2404 
2405 	/* Unlock priv */
2406 	spin_unlock_irqrestore(&tx_queue->txlock, flags);
2407 
2408 	return NETDEV_TX_OK;
2409 }
2410 
2411 /* Stops the kernel queue, and halts the controller */
2412 static int gfar_close(struct net_device *dev)
2413 {
2414 	struct gfar_private *priv = netdev_priv(dev);
2415 
2416 	cancel_work_sync(&priv->reset_task);
2417 	stop_gfar(dev);
2418 
2419 	/* Disconnect from the PHY */
2420 	phy_disconnect(priv->phydev);
2421 	priv->phydev = NULL;
2422 
2423 	gfar_free_irq(priv);
2424 
2425 	return 0;
2426 }
2427 
2428 /* Changes the mac address if the controller is not running. */
2429 static int gfar_set_mac_address(struct net_device *dev)
2430 {
2431 	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2432 
2433 	return 0;
2434 }
2435 
2436 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2437 {
2438 	struct gfar_private *priv = netdev_priv(dev);
2439 	int frame_size = new_mtu + ETH_HLEN;
2440 
2441 	if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2442 		netif_err(priv, drv, dev, "Invalid MTU setting\n");
2443 		return -EINVAL;
2444 	}
2445 
2446 	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2447 		cpu_relax();
2448 
2449 	if (dev->flags & IFF_UP)
2450 		stop_gfar(dev);
2451 
2452 	dev->mtu = new_mtu;
2453 
2454 	if (dev->flags & IFF_UP)
2455 		startup_gfar(dev);
2456 
2457 	clear_bit_unlock(GFAR_RESETTING, &priv->state);
2458 
2459 	return 0;
2460 }
2461 
2462 void reset_gfar(struct net_device *ndev)
2463 {
2464 	struct gfar_private *priv = netdev_priv(ndev);
2465 
2466 	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2467 		cpu_relax();
2468 
2469 	stop_gfar(ndev);
2470 	startup_gfar(ndev);
2471 
2472 	clear_bit_unlock(GFAR_RESETTING, &priv->state);
2473 }
2474 
2475 /* gfar_reset_task gets scheduled when a packet has not been
2476  * transmitted after a set amount of time.
2477  * For now, assume that clearing out all the structures, and
2478  * starting over will fix the problem.
2479  */
2480 static void gfar_reset_task(struct work_struct *work)
2481 {
2482 	struct gfar_private *priv = container_of(work, struct gfar_private,
2483 						 reset_task);
2484 	reset_gfar(priv->ndev);
2485 }
2486 
2487 static void gfar_timeout(struct net_device *dev)
2488 {
2489 	struct gfar_private *priv = netdev_priv(dev);
2490 
2491 	dev->stats.tx_errors++;
2492 	schedule_work(&priv->reset_task);
2493 }
2494 
2495 static void gfar_align_skb(struct sk_buff *skb)
2496 {
2497 	/* We need the data buffer to be aligned properly.  We will reserve
2498 	 * as many bytes as needed to align the data properly
2499 	 */
2500 	skb_reserve(skb, RXBUF_ALIGNMENT -
2501 		    (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2502 }
2503 
2504 /* Interrupt Handler for Transmit complete */
2505 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2506 {
2507 	struct net_device *dev = tx_queue->dev;
2508 	struct netdev_queue *txq;
2509 	struct gfar_private *priv = netdev_priv(dev);
2510 	struct txbd8 *bdp, *next = NULL;
2511 	struct txbd8 *lbdp = NULL;
2512 	struct txbd8 *base = tx_queue->tx_bd_base;
2513 	struct sk_buff *skb;
2514 	int skb_dirtytx;
2515 	int tx_ring_size = tx_queue->tx_ring_size;
2516 	int frags = 0, nr_txbds = 0;
2517 	int i;
2518 	int howmany = 0;
2519 	int tqi = tx_queue->qindex;
2520 	unsigned int bytes_sent = 0;
2521 	u32 lstatus;
2522 	size_t buflen;
2523 
2524 	txq = netdev_get_tx_queue(dev, tqi);
2525 	bdp = tx_queue->dirty_tx;
2526 	skb_dirtytx = tx_queue->skb_dirtytx;
2527 
2528 	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2529 		unsigned long flags;
2530 
2531 		frags = skb_shinfo(skb)->nr_frags;
2532 
2533 		/* When time stamping, one additional TxBD must be freed.
2534 		 * Also, we need to dma_unmap_single() the TxPAL.
2535 		 */
2536 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2537 			nr_txbds = frags + 2;
2538 		else
2539 			nr_txbds = frags + 1;
2540 
2541 		lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2542 
2543 		lstatus = lbdp->lstatus;
2544 
2545 		/* Only clean completed frames */
2546 		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2547 		    (lstatus & BD_LENGTH_MASK))
2548 			break;
2549 
2550 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2551 			next = next_txbd(bdp, base, tx_ring_size);
2552 			buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2553 		} else
2554 			buflen = bdp->length;
2555 
2556 		dma_unmap_single(priv->dev, bdp->bufPtr,
2557 				 buflen, DMA_TO_DEVICE);
2558 
2559 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2560 			struct skb_shared_hwtstamps shhwtstamps;
2561 			u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2562 
2563 			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2564 			shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2565 			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2566 			skb_tstamp_tx(skb, &shhwtstamps);
2567 			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2568 			bdp = next;
2569 		}
2570 
2571 		bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2572 		bdp = next_txbd(bdp, base, tx_ring_size);
2573 
2574 		for (i = 0; i < frags; i++) {
2575 			dma_unmap_page(priv->dev, bdp->bufPtr,
2576 				       bdp->length, DMA_TO_DEVICE);
2577 			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2578 			bdp = next_txbd(bdp, base, tx_ring_size);
2579 		}
2580 
2581 		bytes_sent += GFAR_CB(skb)->bytes_sent;
2582 
2583 		dev_kfree_skb_any(skb);
2584 
2585 		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2586 
2587 		skb_dirtytx = (skb_dirtytx + 1) &
2588 			      TX_RING_MOD_MASK(tx_ring_size);
2589 
2590 		howmany++;
2591 		spin_lock_irqsave(&tx_queue->txlock, flags);
2592 		tx_queue->num_txbdfree += nr_txbds;
2593 		spin_unlock_irqrestore(&tx_queue->txlock, flags);
2594 	}
2595 
2596 	/* If we freed a buffer, we can restart transmission, if necessary */
2597 	if (tx_queue->num_txbdfree &&
2598 	    netif_tx_queue_stopped(txq) &&
2599 	    !(test_bit(GFAR_DOWN, &priv->state)))
2600 		netif_wake_subqueue(priv->ndev, tqi);
2601 
2602 	/* Update dirty indicators */
2603 	tx_queue->skb_dirtytx = skb_dirtytx;
2604 	tx_queue->dirty_tx = bdp;
2605 
2606 	netdev_tx_completed_queue(txq, howmany, bytes_sent);
2607 }
2608 
2609 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2610 			   struct sk_buff *skb)
2611 {
2612 	struct net_device *dev = rx_queue->dev;
2613 	struct gfar_private *priv = netdev_priv(dev);
2614 	dma_addr_t buf;
2615 
2616 	buf = dma_map_single(priv->dev, skb->data,
2617 			     priv->rx_buffer_size, DMA_FROM_DEVICE);
2618 	gfar_init_rxbdp(rx_queue, bdp, buf);
2619 }
2620 
2621 static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2622 {
2623 	struct gfar_private *priv = netdev_priv(dev);
2624 	struct sk_buff *skb;
2625 
2626 	skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2627 	if (!skb)
2628 		return NULL;
2629 
2630 	gfar_align_skb(skb);
2631 
2632 	return skb;
2633 }
2634 
2635 struct sk_buff *gfar_new_skb(struct net_device *dev)
2636 {
2637 	return gfar_alloc_skb(dev);
2638 }
2639 
2640 static inline void count_errors(unsigned short status, struct net_device *dev)
2641 {
2642 	struct gfar_private *priv = netdev_priv(dev);
2643 	struct net_device_stats *stats = &dev->stats;
2644 	struct gfar_extra_stats *estats = &priv->extra_stats;
2645 
2646 	/* If the packet was truncated, none of the other errors matter */
2647 	if (status & RXBD_TRUNCATED) {
2648 		stats->rx_length_errors++;
2649 
2650 		atomic64_inc(&estats->rx_trunc);
2651 
2652 		return;
2653 	}
2654 	/* Count the errors, if there were any */
2655 	if (status & (RXBD_LARGE | RXBD_SHORT)) {
2656 		stats->rx_length_errors++;
2657 
2658 		if (status & RXBD_LARGE)
2659 			atomic64_inc(&estats->rx_large);
2660 		else
2661 			atomic64_inc(&estats->rx_short);
2662 	}
2663 	if (status & RXBD_NONOCTET) {
2664 		stats->rx_frame_errors++;
2665 		atomic64_inc(&estats->rx_nonoctet);
2666 	}
2667 	if (status & RXBD_CRCERR) {
2668 		atomic64_inc(&estats->rx_crcerr);
2669 		stats->rx_crc_errors++;
2670 	}
2671 	if (status & RXBD_OVERRUN) {
2672 		atomic64_inc(&estats->rx_overrun);
2673 		stats->rx_crc_errors++;
2674 	}
2675 }
2676 
2677 irqreturn_t gfar_receive(int irq, void *grp_id)
2678 {
2679 	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2680 	unsigned long flags;
2681 	u32 imask;
2682 
2683 	if (likely(napi_schedule_prep(&grp->napi_rx))) {
2684 		spin_lock_irqsave(&grp->grplock, flags);
2685 		imask = gfar_read(&grp->regs->imask);
2686 		imask &= IMASK_RX_DISABLED;
2687 		gfar_write(&grp->regs->imask, imask);
2688 		spin_unlock_irqrestore(&grp->grplock, flags);
2689 		__napi_schedule(&grp->napi_rx);
2690 	} else {
2691 		/* Clear IEVENT, so interrupts aren't called again
2692 		 * because of the packets that have already arrived.
2693 		 */
2694 		gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2695 	}
2696 
2697 	return IRQ_HANDLED;
2698 }
2699 
2700 /* Interrupt Handler for Transmit complete */
2701 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2702 {
2703 	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2704 	unsigned long flags;
2705 	u32 imask;
2706 
2707 	if (likely(napi_schedule_prep(&grp->napi_tx))) {
2708 		spin_lock_irqsave(&grp->grplock, flags);
2709 		imask = gfar_read(&grp->regs->imask);
2710 		imask &= IMASK_TX_DISABLED;
2711 		gfar_write(&grp->regs->imask, imask);
2712 		spin_unlock_irqrestore(&grp->grplock, flags);
2713 		__napi_schedule(&grp->napi_tx);
2714 	} else {
2715 		/* Clear IEVENT, so interrupts aren't called again
2716 		 * because of the packets that have already arrived.
2717 		 */
2718 		gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2719 	}
2720 
2721 	return IRQ_HANDLED;
2722 }
2723 
2724 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2725 {
2726 	/* If valid headers were found, and valid sums
2727 	 * were verified, then we tell the kernel that no
2728 	 * checksumming is necessary.  Otherwise, it is [FIXME]
2729 	 */
2730 	if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2731 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2732 	else
2733 		skb_checksum_none_assert(skb);
2734 }
2735 
2736 
2737 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2738 static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2739 			       int amount_pull, struct napi_struct *napi)
2740 {
2741 	struct gfar_private *priv = netdev_priv(dev);
2742 	struct rxfcb *fcb = NULL;
2743 
2744 	/* fcb is at the beginning if exists */
2745 	fcb = (struct rxfcb *)skb->data;
2746 
2747 	/* Remove the FCB from the skb
2748 	 * Remove the padded bytes, if there are any
2749 	 */
2750 	if (amount_pull) {
2751 		skb_record_rx_queue(skb, fcb->rq);
2752 		skb_pull(skb, amount_pull);
2753 	}
2754 
2755 	/* Get receive timestamp from the skb */
2756 	if (priv->hwts_rx_en) {
2757 		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2758 		u64 *ns = (u64 *) skb->data;
2759 
2760 		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2761 		shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2762 	}
2763 
2764 	if (priv->padding)
2765 		skb_pull(skb, priv->padding);
2766 
2767 	if (dev->features & NETIF_F_RXCSUM)
2768 		gfar_rx_checksum(skb, fcb);
2769 
2770 	/* Tell the skb what kind of packet this is */
2771 	skb->protocol = eth_type_trans(skb, dev);
2772 
2773 	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2774 	 * Even if vlan rx accel is disabled, on some chips
2775 	 * RXFCB_VLN is pseudo randomly set.
2776 	 */
2777 	if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2778 	    fcb->flags & RXFCB_VLN)
2779 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl);
2780 
2781 	/* Send the packet up the stack */
2782 	napi_gro_receive(napi, skb);
2783 
2784 }
2785 
2786 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2787  * until the budget/quota has been reached. Returns the number
2788  * of frames handled
2789  */
2790 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2791 {
2792 	struct net_device *dev = rx_queue->dev;
2793 	struct rxbd8 *bdp, *base;
2794 	struct sk_buff *skb;
2795 	int pkt_len;
2796 	int amount_pull;
2797 	int howmany = 0;
2798 	struct gfar_private *priv = netdev_priv(dev);
2799 
2800 	/* Get the first full descriptor */
2801 	bdp = rx_queue->cur_rx;
2802 	base = rx_queue->rx_bd_base;
2803 
2804 	amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
2805 
2806 	while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2807 		struct sk_buff *newskb;
2808 
2809 		rmb();
2810 
2811 		/* Add another skb for the future */
2812 		newskb = gfar_new_skb(dev);
2813 
2814 		skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2815 
2816 		dma_unmap_single(priv->dev, bdp->bufPtr,
2817 				 priv->rx_buffer_size, DMA_FROM_DEVICE);
2818 
2819 		if (unlikely(!(bdp->status & RXBD_ERR) &&
2820 			     bdp->length > priv->rx_buffer_size))
2821 			bdp->status = RXBD_LARGE;
2822 
2823 		/* We drop the frame if we failed to allocate a new buffer */
2824 		if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2825 			     bdp->status & RXBD_ERR)) {
2826 			count_errors(bdp->status, dev);
2827 
2828 			if (unlikely(!newskb))
2829 				newskb = skb;
2830 			else if (skb)
2831 				dev_kfree_skb(skb);
2832 		} else {
2833 			/* Increment the number of packets */
2834 			rx_queue->stats.rx_packets++;
2835 			howmany++;
2836 
2837 			if (likely(skb)) {
2838 				pkt_len = bdp->length - ETH_FCS_LEN;
2839 				/* Remove the FCS from the packet length */
2840 				skb_put(skb, pkt_len);
2841 				rx_queue->stats.rx_bytes += pkt_len;
2842 				skb_record_rx_queue(skb, rx_queue->qindex);
2843 				gfar_process_frame(dev, skb, amount_pull,
2844 						   &rx_queue->grp->napi_rx);
2845 
2846 			} else {
2847 				netif_warn(priv, rx_err, dev, "Missing skb!\n");
2848 				rx_queue->stats.rx_dropped++;
2849 				atomic64_inc(&priv->extra_stats.rx_skbmissing);
2850 			}
2851 
2852 		}
2853 
2854 		rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2855 
2856 		/* Setup the new bdp */
2857 		gfar_new_rxbdp(rx_queue, bdp, newskb);
2858 
2859 		/* Update to the next pointer */
2860 		bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2861 
2862 		/* update to point at the next skb */
2863 		rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2864 				      RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2865 	}
2866 
2867 	/* Update the current rxbd pointer to be the next one */
2868 	rx_queue->cur_rx = bdp;
2869 
2870 	return howmany;
2871 }
2872 
2873 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2874 {
2875 	struct gfar_priv_grp *gfargrp =
2876 		container_of(napi, struct gfar_priv_grp, napi_rx);
2877 	struct gfar __iomem *regs = gfargrp->regs;
2878 	struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2879 	int work_done = 0;
2880 
2881 	/* Clear IEVENT, so interrupts aren't called again
2882 	 * because of the packets that have already arrived
2883 	 */
2884 	gfar_write(&regs->ievent, IEVENT_RX_MASK);
2885 
2886 	work_done = gfar_clean_rx_ring(rx_queue, budget);
2887 
2888 	if (work_done < budget) {
2889 		u32 imask;
2890 		napi_complete(napi);
2891 		/* Clear the halt bit in RSTAT */
2892 		gfar_write(&regs->rstat, gfargrp->rstat);
2893 
2894 		spin_lock_irq(&gfargrp->grplock);
2895 		imask = gfar_read(&regs->imask);
2896 		imask |= IMASK_RX_DEFAULT;
2897 		gfar_write(&regs->imask, imask);
2898 		spin_unlock_irq(&gfargrp->grplock);
2899 	}
2900 
2901 	return work_done;
2902 }
2903 
2904 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2905 {
2906 	struct gfar_priv_grp *gfargrp =
2907 		container_of(napi, struct gfar_priv_grp, napi_tx);
2908 	struct gfar __iomem *regs = gfargrp->regs;
2909 	struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2910 	u32 imask;
2911 
2912 	/* Clear IEVENT, so interrupts aren't called again
2913 	 * because of the packets that have already arrived
2914 	 */
2915 	gfar_write(&regs->ievent, IEVENT_TX_MASK);
2916 
2917 	/* run Tx cleanup to completion */
2918 	if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2919 		gfar_clean_tx_ring(tx_queue);
2920 
2921 	napi_complete(napi);
2922 
2923 	spin_lock_irq(&gfargrp->grplock);
2924 	imask = gfar_read(&regs->imask);
2925 	imask |= IMASK_TX_DEFAULT;
2926 	gfar_write(&regs->imask, imask);
2927 	spin_unlock_irq(&gfargrp->grplock);
2928 
2929 	return 0;
2930 }
2931 
2932 static int gfar_poll_rx(struct napi_struct *napi, int budget)
2933 {
2934 	struct gfar_priv_grp *gfargrp =
2935 		container_of(napi, struct gfar_priv_grp, napi_rx);
2936 	struct gfar_private *priv = gfargrp->priv;
2937 	struct gfar __iomem *regs = gfargrp->regs;
2938 	struct gfar_priv_rx_q *rx_queue = NULL;
2939 	int work_done = 0, work_done_per_q = 0;
2940 	int i, budget_per_q = 0;
2941 	unsigned long rstat_rxf;
2942 	int num_act_queues;
2943 
2944 	/* Clear IEVENT, so interrupts aren't called again
2945 	 * because of the packets that have already arrived
2946 	 */
2947 	gfar_write(&regs->ievent, IEVENT_RX_MASK);
2948 
2949 	rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
2950 
2951 	num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
2952 	if (num_act_queues)
2953 		budget_per_q = budget/num_act_queues;
2954 
2955 	for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2956 		/* skip queue if not active */
2957 		if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
2958 			continue;
2959 
2960 		rx_queue = priv->rx_queue[i];
2961 		work_done_per_q =
2962 			gfar_clean_rx_ring(rx_queue, budget_per_q);
2963 		work_done += work_done_per_q;
2964 
2965 		/* finished processing this queue */
2966 		if (work_done_per_q < budget_per_q) {
2967 			/* clear active queue hw indication */
2968 			gfar_write(&regs->rstat,
2969 				   RSTAT_CLEAR_RXF0 >> i);
2970 			num_act_queues--;
2971 
2972 			if (!num_act_queues)
2973 				break;
2974 		}
2975 	}
2976 
2977 	if (!num_act_queues) {
2978 		u32 imask;
2979 		napi_complete(napi);
2980 
2981 		/* Clear the halt bit in RSTAT */
2982 		gfar_write(&regs->rstat, gfargrp->rstat);
2983 
2984 		spin_lock_irq(&gfargrp->grplock);
2985 		imask = gfar_read(&regs->imask);
2986 		imask |= IMASK_RX_DEFAULT;
2987 		gfar_write(&regs->imask, imask);
2988 		spin_unlock_irq(&gfargrp->grplock);
2989 	}
2990 
2991 	return work_done;
2992 }
2993 
2994 static int gfar_poll_tx(struct napi_struct *napi, int budget)
2995 {
2996 	struct gfar_priv_grp *gfargrp =
2997 		container_of(napi, struct gfar_priv_grp, napi_tx);
2998 	struct gfar_private *priv = gfargrp->priv;
2999 	struct gfar __iomem *regs = gfargrp->regs;
3000 	struct gfar_priv_tx_q *tx_queue = NULL;
3001 	int has_tx_work = 0;
3002 	int i;
3003 
3004 	/* Clear IEVENT, so interrupts aren't called again
3005 	 * because of the packets that have already arrived
3006 	 */
3007 	gfar_write(&regs->ievent, IEVENT_TX_MASK);
3008 
3009 	for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
3010 		tx_queue = priv->tx_queue[i];
3011 		/* run Tx cleanup to completion */
3012 		if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3013 			gfar_clean_tx_ring(tx_queue);
3014 			has_tx_work = 1;
3015 		}
3016 	}
3017 
3018 	if (!has_tx_work) {
3019 		u32 imask;
3020 		napi_complete(napi);
3021 
3022 		spin_lock_irq(&gfargrp->grplock);
3023 		imask = gfar_read(&regs->imask);
3024 		imask |= IMASK_TX_DEFAULT;
3025 		gfar_write(&regs->imask, imask);
3026 		spin_unlock_irq(&gfargrp->grplock);
3027 	}
3028 
3029 	return 0;
3030 }
3031 
3032 
3033 #ifdef CONFIG_NET_POLL_CONTROLLER
3034 /* Polling 'interrupt' - used by things like netconsole to send skbs
3035  * without having to re-enable interrupts. It's not called while
3036  * the interrupt routine is executing.
3037  */
3038 static void gfar_netpoll(struct net_device *dev)
3039 {
3040 	struct gfar_private *priv = netdev_priv(dev);
3041 	int i;
3042 
3043 	/* If the device has multiple interrupts, run tx/rx */
3044 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3045 		for (i = 0; i < priv->num_grps; i++) {
3046 			struct gfar_priv_grp *grp = &priv->gfargrp[i];
3047 
3048 			disable_irq(gfar_irq(grp, TX)->irq);
3049 			disable_irq(gfar_irq(grp, RX)->irq);
3050 			disable_irq(gfar_irq(grp, ER)->irq);
3051 			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3052 			enable_irq(gfar_irq(grp, ER)->irq);
3053 			enable_irq(gfar_irq(grp, RX)->irq);
3054 			enable_irq(gfar_irq(grp, TX)->irq);
3055 		}
3056 	} else {
3057 		for (i = 0; i < priv->num_grps; i++) {
3058 			struct gfar_priv_grp *grp = &priv->gfargrp[i];
3059 
3060 			disable_irq(gfar_irq(grp, TX)->irq);
3061 			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3062 			enable_irq(gfar_irq(grp, TX)->irq);
3063 		}
3064 	}
3065 }
3066 #endif
3067 
3068 /* The interrupt handler for devices with one interrupt */
3069 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
3070 {
3071 	struct gfar_priv_grp *gfargrp = grp_id;
3072 
3073 	/* Save ievent for future reference */
3074 	u32 events = gfar_read(&gfargrp->regs->ievent);
3075 
3076 	/* Check for reception */
3077 	if (events & IEVENT_RX_MASK)
3078 		gfar_receive(irq, grp_id);
3079 
3080 	/* Check for transmit completion */
3081 	if (events & IEVENT_TX_MASK)
3082 		gfar_transmit(irq, grp_id);
3083 
3084 	/* Check for errors */
3085 	if (events & IEVENT_ERR_MASK)
3086 		gfar_error(irq, grp_id);
3087 
3088 	return IRQ_HANDLED;
3089 }
3090 
3091 /* Called every time the controller might need to be made
3092  * aware of new link state.  The PHY code conveys this
3093  * information through variables in the phydev structure, and this
3094  * function converts those variables into the appropriate
3095  * register values, and can bring down the device if needed.
3096  */
3097 static void adjust_link(struct net_device *dev)
3098 {
3099 	struct gfar_private *priv = netdev_priv(dev);
3100 	struct phy_device *phydev = priv->phydev;
3101 
3102 	if (unlikely(phydev->link != priv->oldlink ||
3103 		     phydev->duplex != priv->oldduplex ||
3104 		     phydev->speed != priv->oldspeed))
3105 		gfar_update_link_state(priv);
3106 }
3107 
3108 /* Update the hash table based on the current list of multicast
3109  * addresses we subscribe to.  Also, change the promiscuity of
3110  * the device based on the flags (this function is called
3111  * whenever dev->flags is changed
3112  */
3113 static void gfar_set_multi(struct net_device *dev)
3114 {
3115 	struct netdev_hw_addr *ha;
3116 	struct gfar_private *priv = netdev_priv(dev);
3117 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3118 	u32 tempval;
3119 
3120 	if (dev->flags & IFF_PROMISC) {
3121 		/* Set RCTRL to PROM */
3122 		tempval = gfar_read(&regs->rctrl);
3123 		tempval |= RCTRL_PROM;
3124 		gfar_write(&regs->rctrl, tempval);
3125 	} else {
3126 		/* Set RCTRL to not PROM */
3127 		tempval = gfar_read(&regs->rctrl);
3128 		tempval &= ~(RCTRL_PROM);
3129 		gfar_write(&regs->rctrl, tempval);
3130 	}
3131 
3132 	if (dev->flags & IFF_ALLMULTI) {
3133 		/* Set the hash to rx all multicast frames */
3134 		gfar_write(&regs->igaddr0, 0xffffffff);
3135 		gfar_write(&regs->igaddr1, 0xffffffff);
3136 		gfar_write(&regs->igaddr2, 0xffffffff);
3137 		gfar_write(&regs->igaddr3, 0xffffffff);
3138 		gfar_write(&regs->igaddr4, 0xffffffff);
3139 		gfar_write(&regs->igaddr5, 0xffffffff);
3140 		gfar_write(&regs->igaddr6, 0xffffffff);
3141 		gfar_write(&regs->igaddr7, 0xffffffff);
3142 		gfar_write(&regs->gaddr0, 0xffffffff);
3143 		gfar_write(&regs->gaddr1, 0xffffffff);
3144 		gfar_write(&regs->gaddr2, 0xffffffff);
3145 		gfar_write(&regs->gaddr3, 0xffffffff);
3146 		gfar_write(&regs->gaddr4, 0xffffffff);
3147 		gfar_write(&regs->gaddr5, 0xffffffff);
3148 		gfar_write(&regs->gaddr6, 0xffffffff);
3149 		gfar_write(&regs->gaddr7, 0xffffffff);
3150 	} else {
3151 		int em_num;
3152 		int idx;
3153 
3154 		/* zero out the hash */
3155 		gfar_write(&regs->igaddr0, 0x0);
3156 		gfar_write(&regs->igaddr1, 0x0);
3157 		gfar_write(&regs->igaddr2, 0x0);
3158 		gfar_write(&regs->igaddr3, 0x0);
3159 		gfar_write(&regs->igaddr4, 0x0);
3160 		gfar_write(&regs->igaddr5, 0x0);
3161 		gfar_write(&regs->igaddr6, 0x0);
3162 		gfar_write(&regs->igaddr7, 0x0);
3163 		gfar_write(&regs->gaddr0, 0x0);
3164 		gfar_write(&regs->gaddr1, 0x0);
3165 		gfar_write(&regs->gaddr2, 0x0);
3166 		gfar_write(&regs->gaddr3, 0x0);
3167 		gfar_write(&regs->gaddr4, 0x0);
3168 		gfar_write(&regs->gaddr5, 0x0);
3169 		gfar_write(&regs->gaddr6, 0x0);
3170 		gfar_write(&regs->gaddr7, 0x0);
3171 
3172 		/* If we have extended hash tables, we need to
3173 		 * clear the exact match registers to prepare for
3174 		 * setting them
3175 		 */
3176 		if (priv->extended_hash) {
3177 			em_num = GFAR_EM_NUM + 1;
3178 			gfar_clear_exact_match(dev);
3179 			idx = 1;
3180 		} else {
3181 			idx = 0;
3182 			em_num = 0;
3183 		}
3184 
3185 		if (netdev_mc_empty(dev))
3186 			return;
3187 
3188 		/* Parse the list, and set the appropriate bits */
3189 		netdev_for_each_mc_addr(ha, dev) {
3190 			if (idx < em_num) {
3191 				gfar_set_mac_for_addr(dev, idx, ha->addr);
3192 				idx++;
3193 			} else
3194 				gfar_set_hash_for_addr(dev, ha->addr);
3195 		}
3196 	}
3197 }
3198 
3199 
3200 /* Clears each of the exact match registers to zero, so they
3201  * don't interfere with normal reception
3202  */
3203 static void gfar_clear_exact_match(struct net_device *dev)
3204 {
3205 	int idx;
3206 	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3207 
3208 	for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3209 		gfar_set_mac_for_addr(dev, idx, zero_arr);
3210 }
3211 
3212 /* Set the appropriate hash bit for the given addr */
3213 /* The algorithm works like so:
3214  * 1) Take the Destination Address (ie the multicast address), and
3215  * do a CRC on it (little endian), and reverse the bits of the
3216  * result.
3217  * 2) Use the 8 most significant bits as a hash into a 256-entry
3218  * table.  The table is controlled through 8 32-bit registers:
3219  * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
3220  * gaddr7.  This means that the 3 most significant bits in the
3221  * hash index which gaddr register to use, and the 5 other bits
3222  * indicate which bit (assuming an IBM numbering scheme, which
3223  * for PowerPC (tm) is usually the case) in the register holds
3224  * the entry.
3225  */
3226 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3227 {
3228 	u32 tempval;
3229 	struct gfar_private *priv = netdev_priv(dev);
3230 	u32 result = ether_crc(ETH_ALEN, addr);
3231 	int width = priv->hash_width;
3232 	u8 whichbit = (result >> (32 - width)) & 0x1f;
3233 	u8 whichreg = result >> (32 - width + 5);
3234 	u32 value = (1 << (31-whichbit));
3235 
3236 	tempval = gfar_read(priv->hash_regs[whichreg]);
3237 	tempval |= value;
3238 	gfar_write(priv->hash_regs[whichreg], tempval);
3239 }
3240 
3241 
3242 /* There are multiple MAC Address register pairs on some controllers
3243  * This function sets the numth pair to a given address
3244  */
3245 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3246 				  const u8 *addr)
3247 {
3248 	struct gfar_private *priv = netdev_priv(dev);
3249 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3250 	u32 tempval;
3251 	u32 __iomem *macptr = &regs->macstnaddr1;
3252 
3253 	macptr += num*2;
3254 
3255 	/* For a station address of 0x12345678ABCD in transmission
3256 	 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
3257 	 * MACnADDR2 is set to 0x34120000.
3258 	 */
3259 	tempval = (addr[5] << 24) | (addr[4] << 16) |
3260 		  (addr[3] << 8)  |  addr[2];
3261 
3262 	gfar_write(macptr, tempval);
3263 
3264 	tempval = (addr[1] << 24) | (addr[0] << 16);
3265 
3266 	gfar_write(macptr+1, tempval);
3267 }
3268 
3269 /* GFAR error interrupt handler */
3270 static irqreturn_t gfar_error(int irq, void *grp_id)
3271 {
3272 	struct gfar_priv_grp *gfargrp = grp_id;
3273 	struct gfar __iomem *regs = gfargrp->regs;
3274 	struct gfar_private *priv= gfargrp->priv;
3275 	struct net_device *dev = priv->ndev;
3276 
3277 	/* Save ievent for future reference */
3278 	u32 events = gfar_read(&regs->ievent);
3279 
3280 	/* Clear IEVENT */
3281 	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
3282 
3283 	/* Magic Packet is not an error. */
3284 	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3285 	    (events & IEVENT_MAG))
3286 		events &= ~IEVENT_MAG;
3287 
3288 	/* Hmm... */
3289 	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3290 		netdev_dbg(dev,
3291 			   "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3292 			   events, gfar_read(&regs->imask));
3293 
3294 	/* Update the error counters */
3295 	if (events & IEVENT_TXE) {
3296 		dev->stats.tx_errors++;
3297 
3298 		if (events & IEVENT_LC)
3299 			dev->stats.tx_window_errors++;
3300 		if (events & IEVENT_CRL)
3301 			dev->stats.tx_aborted_errors++;
3302 		if (events & IEVENT_XFUN) {
3303 			unsigned long flags;
3304 
3305 			netif_dbg(priv, tx_err, dev,
3306 				  "TX FIFO underrun, packet dropped\n");
3307 			dev->stats.tx_dropped++;
3308 			atomic64_inc(&priv->extra_stats.tx_underrun);
3309 
3310 			local_irq_save(flags);
3311 			lock_tx_qs(priv);
3312 
3313 			/* Reactivate the Tx Queues */
3314 			gfar_write(&regs->tstat, gfargrp->tstat);
3315 
3316 			unlock_tx_qs(priv);
3317 			local_irq_restore(flags);
3318 		}
3319 		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3320 	}
3321 	if (events & IEVENT_BSY) {
3322 		dev->stats.rx_errors++;
3323 		atomic64_inc(&priv->extra_stats.rx_bsy);
3324 
3325 		gfar_receive(irq, grp_id);
3326 
3327 		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3328 			  gfar_read(&regs->rstat));
3329 	}
3330 	if (events & IEVENT_BABR) {
3331 		dev->stats.rx_errors++;
3332 		atomic64_inc(&priv->extra_stats.rx_babr);
3333 
3334 		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
3335 	}
3336 	if (events & IEVENT_EBERR) {
3337 		atomic64_inc(&priv->extra_stats.eberr);
3338 		netif_dbg(priv, rx_err, dev, "bus error\n");
3339 	}
3340 	if (events & IEVENT_RXC)
3341 		netif_dbg(priv, rx_status, dev, "control frame\n");
3342 
3343 	if (events & IEVENT_BABT) {
3344 		atomic64_inc(&priv->extra_stats.tx_babt);
3345 		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
3346 	}
3347 	return IRQ_HANDLED;
3348 }
3349 
3350 static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3351 {
3352 	struct phy_device *phydev = priv->phydev;
3353 	u32 val = 0;
3354 
3355 	if (!phydev->duplex)
3356 		return val;
3357 
3358 	if (!priv->pause_aneg_en) {
3359 		if (priv->tx_pause_en)
3360 			val |= MACCFG1_TX_FLOW;
3361 		if (priv->rx_pause_en)
3362 			val |= MACCFG1_RX_FLOW;
3363 	} else {
3364 		u16 lcl_adv, rmt_adv;
3365 		u8 flowctrl;
3366 		/* get link partner capabilities */
3367 		rmt_adv = 0;
3368 		if (phydev->pause)
3369 			rmt_adv = LPA_PAUSE_CAP;
3370 		if (phydev->asym_pause)
3371 			rmt_adv |= LPA_PAUSE_ASYM;
3372 
3373 		lcl_adv = mii_advertise_flowctrl(phydev->advertising);
3374 
3375 		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3376 		if (flowctrl & FLOW_CTRL_TX)
3377 			val |= MACCFG1_TX_FLOW;
3378 		if (flowctrl & FLOW_CTRL_RX)
3379 			val |= MACCFG1_RX_FLOW;
3380 	}
3381 
3382 	return val;
3383 }
3384 
3385 static noinline void gfar_update_link_state(struct gfar_private *priv)
3386 {
3387 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3388 	struct phy_device *phydev = priv->phydev;
3389 
3390 	if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3391 		return;
3392 
3393 	if (phydev->link) {
3394 		u32 tempval1 = gfar_read(&regs->maccfg1);
3395 		u32 tempval = gfar_read(&regs->maccfg2);
3396 		u32 ecntrl = gfar_read(&regs->ecntrl);
3397 
3398 		if (phydev->duplex != priv->oldduplex) {
3399 			if (!(phydev->duplex))
3400 				tempval &= ~(MACCFG2_FULL_DUPLEX);
3401 			else
3402 				tempval |= MACCFG2_FULL_DUPLEX;
3403 
3404 			priv->oldduplex = phydev->duplex;
3405 		}
3406 
3407 		if (phydev->speed != priv->oldspeed) {
3408 			switch (phydev->speed) {
3409 			case 1000:
3410 				tempval =
3411 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3412 
3413 				ecntrl &= ~(ECNTRL_R100);
3414 				break;
3415 			case 100:
3416 			case 10:
3417 				tempval =
3418 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3419 
3420 				/* Reduced mode distinguishes
3421 				 * between 10 and 100
3422 				 */
3423 				if (phydev->speed == SPEED_100)
3424 					ecntrl |= ECNTRL_R100;
3425 				else
3426 					ecntrl &= ~(ECNTRL_R100);
3427 				break;
3428 			default:
3429 				netif_warn(priv, link, priv->ndev,
3430 					   "Ack!  Speed (%d) is not 10/100/1000!\n",
3431 					   phydev->speed);
3432 				break;
3433 			}
3434 
3435 			priv->oldspeed = phydev->speed;
3436 		}
3437 
3438 		tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3439 		tempval1 |= gfar_get_flowctrl_cfg(priv);
3440 
3441 		gfar_write(&regs->maccfg1, tempval1);
3442 		gfar_write(&regs->maccfg2, tempval);
3443 		gfar_write(&regs->ecntrl, ecntrl);
3444 
3445 		if (!priv->oldlink)
3446 			priv->oldlink = 1;
3447 
3448 	} else if (priv->oldlink) {
3449 		priv->oldlink = 0;
3450 		priv->oldspeed = 0;
3451 		priv->oldduplex = -1;
3452 	}
3453 
3454 	if (netif_msg_link(priv))
3455 		phy_print_status(phydev);
3456 }
3457 
3458 static struct of_device_id gfar_match[] =
3459 {
3460 	{
3461 		.type = "network",
3462 		.compatible = "gianfar",
3463 	},
3464 	{
3465 		.compatible = "fsl,etsec2",
3466 	},
3467 	{},
3468 };
3469 MODULE_DEVICE_TABLE(of, gfar_match);
3470 
3471 /* Structure for a device driver */
3472 static struct platform_driver gfar_driver = {
3473 	.driver = {
3474 		.name = "fsl-gianfar",
3475 		.owner = THIS_MODULE,
3476 		.pm = GFAR_PM_OPS,
3477 		.of_match_table = gfar_match,
3478 	},
3479 	.probe = gfar_probe,
3480 	.remove = gfar_remove,
3481 };
3482 
3483 module_platform_driver(gfar_driver);
3484