xref: /openbmc/linux/drivers/net/ethernet/freescale/gianfar.c (revision a4feee89ce4590c7a4aead49ca5a4853dc6ea5dc)
1 /* drivers/net/ethernet/freescale/gianfar.c
2  *
3  * Gianfar Ethernet Driver
4  * This driver is designed for the non-CPM ethernet controllers
5  * on the 85xx and 83xx family of integrated processors
6  * Based on 8260_io/fcc_enet.c
7  *
8  * Author: Andy Fleming
9  * Maintainer: Kumar Gala
10  * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11  *
12  * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
13  * Copyright 2007 MontaVista Software, Inc.
14  *
15  * This program is free software; you can redistribute  it and/or modify it
16  * under  the terms of  the GNU General  Public License as published by the
17  * Free Software Foundation;  either version 2 of the  License, or (at your
18  * option) any later version.
19  *
20  *  Gianfar:  AKA Lambda Draconis, "Dragon"
21  *  RA 11 31 24.2
22  *  Dec +69 19 52
23  *  V 3.84
24  *  B-V +1.62
25  *
26  *  Theory of operation
27  *
28  *  The driver is initialized through of_device. Configuration information
29  *  is therefore conveyed through an OF-style device tree.
30  *
31  *  The Gianfar Ethernet Controller uses a ring of buffer
32  *  descriptors.  The beginning is indicated by a register
33  *  pointing to the physical address of the start of the ring.
34  *  The end is determined by a "wrap" bit being set in the
35  *  last descriptor of the ring.
36  *
37  *  When a packet is received, the RXF bit in the
38  *  IEVENT register is set, triggering an interrupt when the
39  *  corresponding bit in the IMASK register is also set (if
40  *  interrupt coalescing is active, then the interrupt may not
41  *  happen immediately, but will wait until either a set number
42  *  of frames or amount of time have passed).  In NAPI, the
43  *  interrupt handler will signal there is work to be done, and
44  *  exit. This method will start at the last known empty
45  *  descriptor, and process every subsequent descriptor until there
46  *  are none left with data (NAPI will stop after a set number of
47  *  packets to give time to other tasks, but will eventually
48  *  process all the packets).  The data arrives inside a
49  *  pre-allocated skb, and so after the skb is passed up to the
50  *  stack, a new skb must be allocated, and the address field in
51  *  the buffer descriptor must be updated to indicate this new
52  *  skb.
53  *
54  *  When the kernel requests that a packet be transmitted, the
55  *  driver starts where it left off last time, and points the
56  *  descriptor at the buffer which was passed in.  The driver
57  *  then informs the DMA engine that there are packets ready to
58  *  be transmitted.  Once the controller is finished transmitting
59  *  the packet, an interrupt may be triggered (under the same
60  *  conditions as for reception, but depending on the TXF bit).
61  *  The driver then cleans up the buffer.
62  */
63 
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65 #define DEBUG
66 
67 #include <linux/kernel.h>
68 #include <linux/string.h>
69 #include <linux/errno.h>
70 #include <linux/unistd.h>
71 #include <linux/slab.h>
72 #include <linux/interrupt.h>
73 #include <linux/delay.h>
74 #include <linux/netdevice.h>
75 #include <linux/etherdevice.h>
76 #include <linux/skbuff.h>
77 #include <linux/if_vlan.h>
78 #include <linux/spinlock.h>
79 #include <linux/mm.h>
80 #include <linux/of_address.h>
81 #include <linux/of_irq.h>
82 #include <linux/of_mdio.h>
83 #include <linux/of_platform.h>
84 #include <linux/ip.h>
85 #include <linux/tcp.h>
86 #include <linux/udp.h>
87 #include <linux/in.h>
88 #include <linux/net_tstamp.h>
89 
90 #include <asm/io.h>
91 #ifdef CONFIG_PPC
92 #include <asm/reg.h>
93 #include <asm/mpc85xx.h>
94 #endif
95 #include <asm/irq.h>
96 #include <asm/uaccess.h>
97 #include <linux/module.h>
98 #include <linux/dma-mapping.h>
99 #include <linux/crc32.h>
100 #include <linux/mii.h>
101 #include <linux/phy.h>
102 #include <linux/phy_fixed.h>
103 #include <linux/of.h>
104 #include <linux/of_net.h>
105 #include <linux/of_address.h>
106 #include <linux/of_irq.h>
107 
108 #include "gianfar.h"
109 
110 #define TX_TIMEOUT      (1*HZ)
111 
112 const char gfar_driver_version[] = "1.3";
113 
114 static int gfar_enet_open(struct net_device *dev);
115 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
116 static void gfar_reset_task(struct work_struct *work);
117 static void gfar_timeout(struct net_device *dev);
118 static int gfar_close(struct net_device *dev);
119 struct sk_buff *gfar_new_skb(struct net_device *dev);
120 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
121 			   struct sk_buff *skb);
122 static int gfar_set_mac_address(struct net_device *dev);
123 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
124 static irqreturn_t gfar_error(int irq, void *dev_id);
125 static irqreturn_t gfar_transmit(int irq, void *dev_id);
126 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
127 static void adjust_link(struct net_device *dev);
128 static noinline void gfar_update_link_state(struct gfar_private *priv);
129 static int init_phy(struct net_device *dev);
130 static int gfar_probe(struct platform_device *ofdev);
131 static int gfar_remove(struct platform_device *ofdev);
132 static void free_skb_resources(struct gfar_private *priv);
133 static void gfar_set_multi(struct net_device *dev);
134 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
135 static void gfar_configure_serdes(struct net_device *dev);
136 static int gfar_poll_rx(struct napi_struct *napi, int budget);
137 static int gfar_poll_tx(struct napi_struct *napi, int budget);
138 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
139 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
140 #ifdef CONFIG_NET_POLL_CONTROLLER
141 static void gfar_netpoll(struct net_device *dev);
142 #endif
143 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
144 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
145 static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
146 			       int amount_pull, struct napi_struct *napi);
147 static void gfar_halt_nodisable(struct gfar_private *priv);
148 static void gfar_clear_exact_match(struct net_device *dev);
149 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
150 				  const u8 *addr);
151 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
152 
153 MODULE_AUTHOR("Freescale Semiconductor, Inc");
154 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
155 MODULE_LICENSE("GPL");
156 
157 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
158 			    dma_addr_t buf)
159 {
160 	u32 lstatus;
161 
162 	bdp->bufPtr = buf;
163 
164 	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
165 	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
166 		lstatus |= BD_LFLAG(RXBD_WRAP);
167 
168 	eieio();
169 
170 	bdp->lstatus = lstatus;
171 }
172 
173 static int gfar_init_bds(struct net_device *ndev)
174 {
175 	struct gfar_private *priv = netdev_priv(ndev);
176 	struct gfar_priv_tx_q *tx_queue = NULL;
177 	struct gfar_priv_rx_q *rx_queue = NULL;
178 	struct txbd8 *txbdp;
179 	struct rxbd8 *rxbdp;
180 	int i, j;
181 
182 	for (i = 0; i < priv->num_tx_queues; i++) {
183 		tx_queue = priv->tx_queue[i];
184 		/* Initialize some variables in our dev structure */
185 		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
186 		tx_queue->dirty_tx = tx_queue->tx_bd_base;
187 		tx_queue->cur_tx = tx_queue->tx_bd_base;
188 		tx_queue->skb_curtx = 0;
189 		tx_queue->skb_dirtytx = 0;
190 
191 		/* Initialize Transmit Descriptor Ring */
192 		txbdp = tx_queue->tx_bd_base;
193 		for (j = 0; j < tx_queue->tx_ring_size; j++) {
194 			txbdp->lstatus = 0;
195 			txbdp->bufPtr = 0;
196 			txbdp++;
197 		}
198 
199 		/* Set the last descriptor in the ring to indicate wrap */
200 		txbdp--;
201 		txbdp->status |= TXBD_WRAP;
202 	}
203 
204 	for (i = 0; i < priv->num_rx_queues; i++) {
205 		rx_queue = priv->rx_queue[i];
206 		rx_queue->cur_rx = rx_queue->rx_bd_base;
207 		rx_queue->skb_currx = 0;
208 		rxbdp = rx_queue->rx_bd_base;
209 
210 		for (j = 0; j < rx_queue->rx_ring_size; j++) {
211 			struct sk_buff *skb = rx_queue->rx_skbuff[j];
212 
213 			if (skb) {
214 				gfar_init_rxbdp(rx_queue, rxbdp,
215 						rxbdp->bufPtr);
216 			} else {
217 				skb = gfar_new_skb(ndev);
218 				if (!skb) {
219 					netdev_err(ndev, "Can't allocate RX buffers\n");
220 					return -ENOMEM;
221 				}
222 				rx_queue->rx_skbuff[j] = skb;
223 
224 				gfar_new_rxbdp(rx_queue, rxbdp, skb);
225 			}
226 
227 			rxbdp++;
228 		}
229 
230 	}
231 
232 	return 0;
233 }
234 
235 static int gfar_alloc_skb_resources(struct net_device *ndev)
236 {
237 	void *vaddr;
238 	dma_addr_t addr;
239 	int i, j, k;
240 	struct gfar_private *priv = netdev_priv(ndev);
241 	struct device *dev = priv->dev;
242 	struct gfar_priv_tx_q *tx_queue = NULL;
243 	struct gfar_priv_rx_q *rx_queue = NULL;
244 
245 	priv->total_tx_ring_size = 0;
246 	for (i = 0; i < priv->num_tx_queues; i++)
247 		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
248 
249 	priv->total_rx_ring_size = 0;
250 	for (i = 0; i < priv->num_rx_queues; i++)
251 		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
252 
253 	/* Allocate memory for the buffer descriptors */
254 	vaddr = dma_alloc_coherent(dev,
255 				   (priv->total_tx_ring_size *
256 				    sizeof(struct txbd8)) +
257 				   (priv->total_rx_ring_size *
258 				    sizeof(struct rxbd8)),
259 				   &addr, GFP_KERNEL);
260 	if (!vaddr)
261 		return -ENOMEM;
262 
263 	for (i = 0; i < priv->num_tx_queues; i++) {
264 		tx_queue = priv->tx_queue[i];
265 		tx_queue->tx_bd_base = vaddr;
266 		tx_queue->tx_bd_dma_base = addr;
267 		tx_queue->dev = ndev;
268 		/* enet DMA only understands physical addresses */
269 		addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
270 		vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
271 	}
272 
273 	/* Start the rx descriptor ring where the tx ring leaves off */
274 	for (i = 0; i < priv->num_rx_queues; i++) {
275 		rx_queue = priv->rx_queue[i];
276 		rx_queue->rx_bd_base = vaddr;
277 		rx_queue->rx_bd_dma_base = addr;
278 		rx_queue->dev = ndev;
279 		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
280 		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
281 	}
282 
283 	/* Setup the skbuff rings */
284 	for (i = 0; i < priv->num_tx_queues; i++) {
285 		tx_queue = priv->tx_queue[i];
286 		tx_queue->tx_skbuff =
287 			kmalloc_array(tx_queue->tx_ring_size,
288 				      sizeof(*tx_queue->tx_skbuff),
289 				      GFP_KERNEL);
290 		if (!tx_queue->tx_skbuff)
291 			goto cleanup;
292 
293 		for (k = 0; k < tx_queue->tx_ring_size; k++)
294 			tx_queue->tx_skbuff[k] = NULL;
295 	}
296 
297 	for (i = 0; i < priv->num_rx_queues; i++) {
298 		rx_queue = priv->rx_queue[i];
299 		rx_queue->rx_skbuff =
300 			kmalloc_array(rx_queue->rx_ring_size,
301 				      sizeof(*rx_queue->rx_skbuff),
302 				      GFP_KERNEL);
303 		if (!rx_queue->rx_skbuff)
304 			goto cleanup;
305 
306 		for (j = 0; j < rx_queue->rx_ring_size; j++)
307 			rx_queue->rx_skbuff[j] = NULL;
308 	}
309 
310 	if (gfar_init_bds(ndev))
311 		goto cleanup;
312 
313 	return 0;
314 
315 cleanup:
316 	free_skb_resources(priv);
317 	return -ENOMEM;
318 }
319 
320 static void gfar_init_tx_rx_base(struct gfar_private *priv)
321 {
322 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
323 	u32 __iomem *baddr;
324 	int i;
325 
326 	baddr = &regs->tbase0;
327 	for (i = 0; i < priv->num_tx_queues; i++) {
328 		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
329 		baddr += 2;
330 	}
331 
332 	baddr = &regs->rbase0;
333 	for (i = 0; i < priv->num_rx_queues; i++) {
334 		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
335 		baddr += 2;
336 	}
337 }
338 
339 static void gfar_rx_buff_size_config(struct gfar_private *priv)
340 {
341 	int frame_size = priv->ndev->mtu + ETH_HLEN;
342 
343 	/* set this when rx hw offload (TOE) functions are being used */
344 	priv->uses_rxfcb = 0;
345 
346 	if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
347 		priv->uses_rxfcb = 1;
348 
349 	if (priv->hwts_rx_en)
350 		priv->uses_rxfcb = 1;
351 
352 	if (priv->uses_rxfcb)
353 		frame_size += GMAC_FCB_LEN;
354 
355 	frame_size += priv->padding;
356 
357 	frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
358 		     INCREMENTAL_BUFFER_SIZE;
359 
360 	priv->rx_buffer_size = frame_size;
361 }
362 
363 static void gfar_mac_rx_config(struct gfar_private *priv)
364 {
365 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
366 	u32 rctrl = 0;
367 
368 	if (priv->rx_filer_enable) {
369 		rctrl |= RCTRL_FILREN;
370 		/* Program the RIR0 reg with the required distribution */
371 		if (priv->poll_mode == GFAR_SQ_POLLING)
372 			gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
373 		else /* GFAR_MQ_POLLING */
374 			gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
375 	}
376 
377 	/* Restore PROMISC mode */
378 	if (priv->ndev->flags & IFF_PROMISC)
379 		rctrl |= RCTRL_PROM;
380 
381 	if (priv->ndev->features & NETIF_F_RXCSUM)
382 		rctrl |= RCTRL_CHECKSUMMING;
383 
384 	if (priv->extended_hash)
385 		rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
386 
387 	if (priv->padding) {
388 		rctrl &= ~RCTRL_PAL_MASK;
389 		rctrl |= RCTRL_PADDING(priv->padding);
390 	}
391 
392 	/* Enable HW time stamping if requested from user space */
393 	if (priv->hwts_rx_en)
394 		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
395 
396 	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
397 		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
398 
399 	/* Init rctrl based on our settings */
400 	gfar_write(&regs->rctrl, rctrl);
401 }
402 
403 static void gfar_mac_tx_config(struct gfar_private *priv)
404 {
405 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
406 	u32 tctrl = 0;
407 
408 	if (priv->ndev->features & NETIF_F_IP_CSUM)
409 		tctrl |= TCTRL_INIT_CSUM;
410 
411 	if (priv->prio_sched_en)
412 		tctrl |= TCTRL_TXSCHED_PRIO;
413 	else {
414 		tctrl |= TCTRL_TXSCHED_WRRS;
415 		gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
416 		gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
417 	}
418 
419 	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
420 		tctrl |= TCTRL_VLINS;
421 
422 	gfar_write(&regs->tctrl, tctrl);
423 }
424 
425 static void gfar_configure_coalescing(struct gfar_private *priv,
426 			       unsigned long tx_mask, unsigned long rx_mask)
427 {
428 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
429 	u32 __iomem *baddr;
430 
431 	if (priv->mode == MQ_MG_MODE) {
432 		int i = 0;
433 
434 		baddr = &regs->txic0;
435 		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
436 			gfar_write(baddr + i, 0);
437 			if (likely(priv->tx_queue[i]->txcoalescing))
438 				gfar_write(baddr + i, priv->tx_queue[i]->txic);
439 		}
440 
441 		baddr = &regs->rxic0;
442 		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
443 			gfar_write(baddr + i, 0);
444 			if (likely(priv->rx_queue[i]->rxcoalescing))
445 				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
446 		}
447 	} else {
448 		/* Backward compatible case -- even if we enable
449 		 * multiple queues, there's only single reg to program
450 		 */
451 		gfar_write(&regs->txic, 0);
452 		if (likely(priv->tx_queue[0]->txcoalescing))
453 			gfar_write(&regs->txic, priv->tx_queue[0]->txic);
454 
455 		gfar_write(&regs->rxic, 0);
456 		if (unlikely(priv->rx_queue[0]->rxcoalescing))
457 			gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
458 	}
459 }
460 
461 void gfar_configure_coalescing_all(struct gfar_private *priv)
462 {
463 	gfar_configure_coalescing(priv, 0xFF, 0xFF);
464 }
465 
466 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
467 {
468 	struct gfar_private *priv = netdev_priv(dev);
469 	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
470 	unsigned long tx_packets = 0, tx_bytes = 0;
471 	int i;
472 
473 	for (i = 0; i < priv->num_rx_queues; i++) {
474 		rx_packets += priv->rx_queue[i]->stats.rx_packets;
475 		rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
476 		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
477 	}
478 
479 	dev->stats.rx_packets = rx_packets;
480 	dev->stats.rx_bytes   = rx_bytes;
481 	dev->stats.rx_dropped = rx_dropped;
482 
483 	for (i = 0; i < priv->num_tx_queues; i++) {
484 		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
485 		tx_packets += priv->tx_queue[i]->stats.tx_packets;
486 	}
487 
488 	dev->stats.tx_bytes   = tx_bytes;
489 	dev->stats.tx_packets = tx_packets;
490 
491 	return &dev->stats;
492 }
493 
494 static const struct net_device_ops gfar_netdev_ops = {
495 	.ndo_open = gfar_enet_open,
496 	.ndo_start_xmit = gfar_start_xmit,
497 	.ndo_stop = gfar_close,
498 	.ndo_change_mtu = gfar_change_mtu,
499 	.ndo_set_features = gfar_set_features,
500 	.ndo_set_rx_mode = gfar_set_multi,
501 	.ndo_tx_timeout = gfar_timeout,
502 	.ndo_do_ioctl = gfar_ioctl,
503 	.ndo_get_stats = gfar_get_stats,
504 	.ndo_set_mac_address = eth_mac_addr,
505 	.ndo_validate_addr = eth_validate_addr,
506 #ifdef CONFIG_NET_POLL_CONTROLLER
507 	.ndo_poll_controller = gfar_netpoll,
508 #endif
509 };
510 
511 static void gfar_ints_disable(struct gfar_private *priv)
512 {
513 	int i;
514 	for (i = 0; i < priv->num_grps; i++) {
515 		struct gfar __iomem *regs = priv->gfargrp[i].regs;
516 		/* Clear IEVENT */
517 		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
518 
519 		/* Initialize IMASK */
520 		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
521 	}
522 }
523 
524 static void gfar_ints_enable(struct gfar_private *priv)
525 {
526 	int i;
527 	for (i = 0; i < priv->num_grps; i++) {
528 		struct gfar __iomem *regs = priv->gfargrp[i].regs;
529 		/* Unmask the interrupts we look for */
530 		gfar_write(&regs->imask, IMASK_DEFAULT);
531 	}
532 }
533 
534 void lock_tx_qs(struct gfar_private *priv)
535 {
536 	int i;
537 
538 	for (i = 0; i < priv->num_tx_queues; i++)
539 		spin_lock(&priv->tx_queue[i]->txlock);
540 }
541 
542 void unlock_tx_qs(struct gfar_private *priv)
543 {
544 	int i;
545 
546 	for (i = 0; i < priv->num_tx_queues; i++)
547 		spin_unlock(&priv->tx_queue[i]->txlock);
548 }
549 
550 static int gfar_alloc_tx_queues(struct gfar_private *priv)
551 {
552 	int i;
553 
554 	for (i = 0; i < priv->num_tx_queues; i++) {
555 		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
556 					    GFP_KERNEL);
557 		if (!priv->tx_queue[i])
558 			return -ENOMEM;
559 
560 		priv->tx_queue[i]->tx_skbuff = NULL;
561 		priv->tx_queue[i]->qindex = i;
562 		priv->tx_queue[i]->dev = priv->ndev;
563 		spin_lock_init(&(priv->tx_queue[i]->txlock));
564 	}
565 	return 0;
566 }
567 
568 static int gfar_alloc_rx_queues(struct gfar_private *priv)
569 {
570 	int i;
571 
572 	for (i = 0; i < priv->num_rx_queues; i++) {
573 		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
574 					    GFP_KERNEL);
575 		if (!priv->rx_queue[i])
576 			return -ENOMEM;
577 
578 		priv->rx_queue[i]->rx_skbuff = NULL;
579 		priv->rx_queue[i]->qindex = i;
580 		priv->rx_queue[i]->dev = priv->ndev;
581 	}
582 	return 0;
583 }
584 
585 static void gfar_free_tx_queues(struct gfar_private *priv)
586 {
587 	int i;
588 
589 	for (i = 0; i < priv->num_tx_queues; i++)
590 		kfree(priv->tx_queue[i]);
591 }
592 
593 static void gfar_free_rx_queues(struct gfar_private *priv)
594 {
595 	int i;
596 
597 	for (i = 0; i < priv->num_rx_queues; i++)
598 		kfree(priv->rx_queue[i]);
599 }
600 
601 static void unmap_group_regs(struct gfar_private *priv)
602 {
603 	int i;
604 
605 	for (i = 0; i < MAXGROUPS; i++)
606 		if (priv->gfargrp[i].regs)
607 			iounmap(priv->gfargrp[i].regs);
608 }
609 
610 static void free_gfar_dev(struct gfar_private *priv)
611 {
612 	int i, j;
613 
614 	for (i = 0; i < priv->num_grps; i++)
615 		for (j = 0; j < GFAR_NUM_IRQS; j++) {
616 			kfree(priv->gfargrp[i].irqinfo[j]);
617 			priv->gfargrp[i].irqinfo[j] = NULL;
618 		}
619 
620 	free_netdev(priv->ndev);
621 }
622 
623 static void disable_napi(struct gfar_private *priv)
624 {
625 	int i;
626 
627 	for (i = 0; i < priv->num_grps; i++) {
628 		napi_disable(&priv->gfargrp[i].napi_rx);
629 		napi_disable(&priv->gfargrp[i].napi_tx);
630 	}
631 }
632 
633 static void enable_napi(struct gfar_private *priv)
634 {
635 	int i;
636 
637 	for (i = 0; i < priv->num_grps; i++) {
638 		napi_enable(&priv->gfargrp[i].napi_rx);
639 		napi_enable(&priv->gfargrp[i].napi_tx);
640 	}
641 }
642 
643 static int gfar_parse_group(struct device_node *np,
644 			    struct gfar_private *priv, const char *model)
645 {
646 	struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
647 	int i;
648 
649 	for (i = 0; i < GFAR_NUM_IRQS; i++) {
650 		grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
651 					  GFP_KERNEL);
652 		if (!grp->irqinfo[i])
653 			return -ENOMEM;
654 	}
655 
656 	grp->regs = of_iomap(np, 0);
657 	if (!grp->regs)
658 		return -ENOMEM;
659 
660 	gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
661 
662 	/* If we aren't the FEC we have multiple interrupts */
663 	if (model && strcasecmp(model, "FEC")) {
664 		gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
665 		gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
666 		if (gfar_irq(grp, TX)->irq == NO_IRQ ||
667 		    gfar_irq(grp, RX)->irq == NO_IRQ ||
668 		    gfar_irq(grp, ER)->irq == NO_IRQ)
669 			return -EINVAL;
670 	}
671 
672 	grp->priv = priv;
673 	spin_lock_init(&grp->grplock);
674 	if (priv->mode == MQ_MG_MODE) {
675 		u32 *rxq_mask, *txq_mask;
676 		rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
677 		txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
678 
679 		if (priv->poll_mode == GFAR_SQ_POLLING) {
680 			/* One Q per interrupt group: Q0 to G0, Q1 to G1 */
681 			grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
682 			grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
683 		} else { /* GFAR_MQ_POLLING */
684 			grp->rx_bit_map = rxq_mask ?
685 			*rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
686 			grp->tx_bit_map = txq_mask ?
687 			*txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
688 		}
689 	} else {
690 		grp->rx_bit_map = 0xFF;
691 		grp->tx_bit_map = 0xFF;
692 	}
693 
694 	/* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
695 	 * right to left, so we need to revert the 8 bits to get the q index
696 	 */
697 	grp->rx_bit_map = bitrev8(grp->rx_bit_map);
698 	grp->tx_bit_map = bitrev8(grp->tx_bit_map);
699 
700 	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
701 	 * also assign queues to groups
702 	 */
703 	for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
704 		if (!grp->rx_queue)
705 			grp->rx_queue = priv->rx_queue[i];
706 		grp->num_rx_queues++;
707 		grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
708 		priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
709 		priv->rx_queue[i]->grp = grp;
710 	}
711 
712 	for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
713 		if (!grp->tx_queue)
714 			grp->tx_queue = priv->tx_queue[i];
715 		grp->num_tx_queues++;
716 		grp->tstat |= (TSTAT_CLEAR_THALT >> i);
717 		priv->tqueue |= (TQUEUE_EN0 >> i);
718 		priv->tx_queue[i]->grp = grp;
719 	}
720 
721 	priv->num_grps++;
722 
723 	return 0;
724 }
725 
726 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
727 {
728 	const char *model;
729 	const char *ctype;
730 	const void *mac_addr;
731 	int err = 0, i;
732 	struct net_device *dev = NULL;
733 	struct gfar_private *priv = NULL;
734 	struct device_node *np = ofdev->dev.of_node;
735 	struct device_node *child = NULL;
736 	const u32 *stash;
737 	const u32 *stash_len;
738 	const u32 *stash_idx;
739 	unsigned int num_tx_qs, num_rx_qs;
740 	u32 *tx_queues, *rx_queues;
741 	unsigned short mode, poll_mode;
742 
743 	if (!np || !of_device_is_available(np))
744 		return -ENODEV;
745 
746 	if (of_device_is_compatible(np, "fsl,etsec2")) {
747 		mode = MQ_MG_MODE;
748 		poll_mode = GFAR_SQ_POLLING;
749 	} else {
750 		mode = SQ_SG_MODE;
751 		poll_mode = GFAR_SQ_POLLING;
752 	}
753 
754 	/* parse the num of HW tx and rx queues */
755 	tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
756 	rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
757 
758 	if (mode == SQ_SG_MODE) {
759 		num_tx_qs = 1;
760 		num_rx_qs = 1;
761 	} else { /* MQ_MG_MODE */
762 		/* get the actual number of supported groups */
763 		unsigned int num_grps = of_get_available_child_count(np);
764 
765 		if (num_grps == 0 || num_grps > MAXGROUPS) {
766 			dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
767 				num_grps);
768 			pr_err("Cannot do alloc_etherdev, aborting\n");
769 			return -EINVAL;
770 		}
771 
772 		if (poll_mode == GFAR_SQ_POLLING) {
773 			num_tx_qs = num_grps; /* one txq per int group */
774 			num_rx_qs = num_grps; /* one rxq per int group */
775 		} else { /* GFAR_MQ_POLLING */
776 			num_tx_qs = tx_queues ? *tx_queues : 1;
777 			num_rx_qs = rx_queues ? *rx_queues : 1;
778 		}
779 	}
780 
781 	if (num_tx_qs > MAX_TX_QS) {
782 		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
783 		       num_tx_qs, MAX_TX_QS);
784 		pr_err("Cannot do alloc_etherdev, aborting\n");
785 		return -EINVAL;
786 	}
787 
788 	if (num_rx_qs > MAX_RX_QS) {
789 		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
790 		       num_rx_qs, MAX_RX_QS);
791 		pr_err("Cannot do alloc_etherdev, aborting\n");
792 		return -EINVAL;
793 	}
794 
795 	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
796 	dev = *pdev;
797 	if (NULL == dev)
798 		return -ENOMEM;
799 
800 	priv = netdev_priv(dev);
801 	priv->ndev = dev;
802 
803 	priv->mode = mode;
804 	priv->poll_mode = poll_mode;
805 
806 	priv->num_tx_queues = num_tx_qs;
807 	netif_set_real_num_rx_queues(dev, num_rx_qs);
808 	priv->num_rx_queues = num_rx_qs;
809 
810 	err = gfar_alloc_tx_queues(priv);
811 	if (err)
812 		goto tx_alloc_failed;
813 
814 	err = gfar_alloc_rx_queues(priv);
815 	if (err)
816 		goto rx_alloc_failed;
817 
818 	/* Init Rx queue filer rule set linked list */
819 	INIT_LIST_HEAD(&priv->rx_list.list);
820 	priv->rx_list.count = 0;
821 	mutex_init(&priv->rx_queue_access);
822 
823 	model = of_get_property(np, "model", NULL);
824 
825 	for (i = 0; i < MAXGROUPS; i++)
826 		priv->gfargrp[i].regs = NULL;
827 
828 	/* Parse and initialize group specific information */
829 	if (priv->mode == MQ_MG_MODE) {
830 		for_each_child_of_node(np, child) {
831 			err = gfar_parse_group(child, priv, model);
832 			if (err)
833 				goto err_grp_init;
834 		}
835 	} else { /* SQ_SG_MODE */
836 		err = gfar_parse_group(np, priv, model);
837 		if (err)
838 			goto err_grp_init;
839 	}
840 
841 	stash = of_get_property(np, "bd-stash", NULL);
842 
843 	if (stash) {
844 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
845 		priv->bd_stash_en = 1;
846 	}
847 
848 	stash_len = of_get_property(np, "rx-stash-len", NULL);
849 
850 	if (stash_len)
851 		priv->rx_stash_size = *stash_len;
852 
853 	stash_idx = of_get_property(np, "rx-stash-idx", NULL);
854 
855 	if (stash_idx)
856 		priv->rx_stash_index = *stash_idx;
857 
858 	if (stash_len || stash_idx)
859 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
860 
861 	mac_addr = of_get_mac_address(np);
862 
863 	if (mac_addr)
864 		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
865 
866 	if (model && !strcasecmp(model, "TSEC"))
867 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
868 				     FSL_GIANFAR_DEV_HAS_COALESCE |
869 				     FSL_GIANFAR_DEV_HAS_RMON |
870 				     FSL_GIANFAR_DEV_HAS_MULTI_INTR;
871 
872 	if (model && !strcasecmp(model, "eTSEC"))
873 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
874 				     FSL_GIANFAR_DEV_HAS_COALESCE |
875 				     FSL_GIANFAR_DEV_HAS_RMON |
876 				     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
877 				     FSL_GIANFAR_DEV_HAS_CSUM |
878 				     FSL_GIANFAR_DEV_HAS_VLAN |
879 				     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
880 				     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
881 				     FSL_GIANFAR_DEV_HAS_TIMER;
882 
883 	ctype = of_get_property(np, "phy-connection-type", NULL);
884 
885 	/* We only care about rgmii-id.  The rest are autodetected */
886 	if (ctype && !strcmp(ctype, "rgmii-id"))
887 		priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
888 	else
889 		priv->interface = PHY_INTERFACE_MODE_MII;
890 
891 	if (of_get_property(np, "fsl,magic-packet", NULL))
892 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
893 
894 	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
895 
896 	/* In the case of a fixed PHY, the DT node associated
897 	 * to the PHY is the Ethernet MAC DT node.
898 	 */
899 	if (!priv->phy_node && of_phy_is_fixed_link(np)) {
900 		err = of_phy_register_fixed_link(np);
901 		if (err)
902 			goto err_grp_init;
903 
904 		priv->phy_node = of_node_get(np);
905 	}
906 
907 	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
908 	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
909 
910 	return 0;
911 
912 err_grp_init:
913 	unmap_group_regs(priv);
914 rx_alloc_failed:
915 	gfar_free_rx_queues(priv);
916 tx_alloc_failed:
917 	gfar_free_tx_queues(priv);
918 	free_gfar_dev(priv);
919 	return err;
920 }
921 
922 static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
923 {
924 	struct hwtstamp_config config;
925 	struct gfar_private *priv = netdev_priv(netdev);
926 
927 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
928 		return -EFAULT;
929 
930 	/* reserved for future extensions */
931 	if (config.flags)
932 		return -EINVAL;
933 
934 	switch (config.tx_type) {
935 	case HWTSTAMP_TX_OFF:
936 		priv->hwts_tx_en = 0;
937 		break;
938 	case HWTSTAMP_TX_ON:
939 		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
940 			return -ERANGE;
941 		priv->hwts_tx_en = 1;
942 		break;
943 	default:
944 		return -ERANGE;
945 	}
946 
947 	switch (config.rx_filter) {
948 	case HWTSTAMP_FILTER_NONE:
949 		if (priv->hwts_rx_en) {
950 			priv->hwts_rx_en = 0;
951 			reset_gfar(netdev);
952 		}
953 		break;
954 	default:
955 		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
956 			return -ERANGE;
957 		if (!priv->hwts_rx_en) {
958 			priv->hwts_rx_en = 1;
959 			reset_gfar(netdev);
960 		}
961 		config.rx_filter = HWTSTAMP_FILTER_ALL;
962 		break;
963 	}
964 
965 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
966 		-EFAULT : 0;
967 }
968 
969 static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
970 {
971 	struct hwtstamp_config config;
972 	struct gfar_private *priv = netdev_priv(netdev);
973 
974 	config.flags = 0;
975 	config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
976 	config.rx_filter = (priv->hwts_rx_en ?
977 			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
978 
979 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
980 		-EFAULT : 0;
981 }
982 
983 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
984 {
985 	struct gfar_private *priv = netdev_priv(dev);
986 
987 	if (!netif_running(dev))
988 		return -EINVAL;
989 
990 	if (cmd == SIOCSHWTSTAMP)
991 		return gfar_hwtstamp_set(dev, rq);
992 	if (cmd == SIOCGHWTSTAMP)
993 		return gfar_hwtstamp_get(dev, rq);
994 
995 	if (!priv->phydev)
996 		return -ENODEV;
997 
998 	return phy_mii_ioctl(priv->phydev, rq, cmd);
999 }
1000 
1001 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
1002 				   u32 class)
1003 {
1004 	u32 rqfpr = FPR_FILER_MASK;
1005 	u32 rqfcr = 0x0;
1006 
1007 	rqfar--;
1008 	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
1009 	priv->ftp_rqfpr[rqfar] = rqfpr;
1010 	priv->ftp_rqfcr[rqfar] = rqfcr;
1011 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1012 
1013 	rqfar--;
1014 	rqfcr = RQFCR_CMP_NOMATCH;
1015 	priv->ftp_rqfpr[rqfar] = rqfpr;
1016 	priv->ftp_rqfcr[rqfar] = rqfcr;
1017 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1018 
1019 	rqfar--;
1020 	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
1021 	rqfpr = class;
1022 	priv->ftp_rqfcr[rqfar] = rqfcr;
1023 	priv->ftp_rqfpr[rqfar] = rqfpr;
1024 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1025 
1026 	rqfar--;
1027 	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
1028 	rqfpr = class;
1029 	priv->ftp_rqfcr[rqfar] = rqfcr;
1030 	priv->ftp_rqfpr[rqfar] = rqfpr;
1031 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1032 
1033 	return rqfar;
1034 }
1035 
1036 static void gfar_init_filer_table(struct gfar_private *priv)
1037 {
1038 	int i = 0x0;
1039 	u32 rqfar = MAX_FILER_IDX;
1040 	u32 rqfcr = 0x0;
1041 	u32 rqfpr = FPR_FILER_MASK;
1042 
1043 	/* Default rule */
1044 	rqfcr = RQFCR_CMP_MATCH;
1045 	priv->ftp_rqfcr[rqfar] = rqfcr;
1046 	priv->ftp_rqfpr[rqfar] = rqfpr;
1047 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1048 
1049 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
1050 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
1051 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
1052 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
1053 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
1054 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
1055 
1056 	/* cur_filer_idx indicated the first non-masked rule */
1057 	priv->cur_filer_idx = rqfar;
1058 
1059 	/* Rest are masked rules */
1060 	rqfcr = RQFCR_CMP_NOMATCH;
1061 	for (i = 0; i < rqfar; i++) {
1062 		priv->ftp_rqfcr[i] = rqfcr;
1063 		priv->ftp_rqfpr[i] = rqfpr;
1064 		gfar_write_filer(priv, i, rqfcr, rqfpr);
1065 	}
1066 }
1067 
1068 #ifdef CONFIG_PPC
1069 static void __gfar_detect_errata_83xx(struct gfar_private *priv)
1070 {
1071 	unsigned int pvr = mfspr(SPRN_PVR);
1072 	unsigned int svr = mfspr(SPRN_SVR);
1073 	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
1074 	unsigned int rev = svr & 0xffff;
1075 
1076 	/* MPC8313 Rev 2.0 and higher; All MPC837x */
1077 	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
1078 	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1079 		priv->errata |= GFAR_ERRATA_74;
1080 
1081 	/* MPC8313 and MPC837x all rev */
1082 	if ((pvr == 0x80850010 && mod == 0x80b0) ||
1083 	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1084 		priv->errata |= GFAR_ERRATA_76;
1085 
1086 	/* MPC8313 Rev < 2.0 */
1087 	if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
1088 		priv->errata |= GFAR_ERRATA_12;
1089 }
1090 
1091 static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1092 {
1093 	unsigned int svr = mfspr(SPRN_SVR);
1094 
1095 	if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1096 		priv->errata |= GFAR_ERRATA_12;
1097 	if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
1098 	    ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
1099 		priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
1100 }
1101 #endif
1102 
1103 static void gfar_detect_errata(struct gfar_private *priv)
1104 {
1105 	struct device *dev = &priv->ofdev->dev;
1106 
1107 	/* no plans to fix */
1108 	priv->errata |= GFAR_ERRATA_A002;
1109 
1110 #ifdef CONFIG_PPC
1111 	if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
1112 		__gfar_detect_errata_85xx(priv);
1113 	else /* non-mpc85xx parts, i.e. e300 core based */
1114 		__gfar_detect_errata_83xx(priv);
1115 #endif
1116 
1117 	if (priv->errata)
1118 		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
1119 			 priv->errata);
1120 }
1121 
1122 void gfar_mac_reset(struct gfar_private *priv)
1123 {
1124 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1125 	u32 tempval;
1126 
1127 	/* Reset MAC layer */
1128 	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
1129 
1130 	/* We need to delay at least 3 TX clocks */
1131 	udelay(3);
1132 
1133 	/* the soft reset bit is not self-resetting, so we need to
1134 	 * clear it before resuming normal operation
1135 	 */
1136 	gfar_write(&regs->maccfg1, 0);
1137 
1138 	udelay(3);
1139 
1140 	/* Compute rx_buff_size based on config flags */
1141 	gfar_rx_buff_size_config(priv);
1142 
1143 	/* Initialize the max receive frame/buffer lengths */
1144 	gfar_write(&regs->maxfrm, priv->rx_buffer_size);
1145 	gfar_write(&regs->mrblr, priv->rx_buffer_size);
1146 
1147 	/* Initialize the Minimum Frame Length Register */
1148 	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1149 
1150 	/* Initialize MACCFG2. */
1151 	tempval = MACCFG2_INIT_SETTINGS;
1152 
1153 	/* If the mtu is larger than the max size for standard
1154 	 * ethernet frames (ie, a jumbo frame), then set maccfg2
1155 	 * to allow huge frames, and to check the length
1156 	 */
1157 	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
1158 	    gfar_has_errata(priv, GFAR_ERRATA_74))
1159 		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1160 
1161 	gfar_write(&regs->maccfg2, tempval);
1162 
1163 	/* Clear mac addr hash registers */
1164 	gfar_write(&regs->igaddr0, 0);
1165 	gfar_write(&regs->igaddr1, 0);
1166 	gfar_write(&regs->igaddr2, 0);
1167 	gfar_write(&regs->igaddr3, 0);
1168 	gfar_write(&regs->igaddr4, 0);
1169 	gfar_write(&regs->igaddr5, 0);
1170 	gfar_write(&regs->igaddr6, 0);
1171 	gfar_write(&regs->igaddr7, 0);
1172 
1173 	gfar_write(&regs->gaddr0, 0);
1174 	gfar_write(&regs->gaddr1, 0);
1175 	gfar_write(&regs->gaddr2, 0);
1176 	gfar_write(&regs->gaddr3, 0);
1177 	gfar_write(&regs->gaddr4, 0);
1178 	gfar_write(&regs->gaddr5, 0);
1179 	gfar_write(&regs->gaddr6, 0);
1180 	gfar_write(&regs->gaddr7, 0);
1181 
1182 	if (priv->extended_hash)
1183 		gfar_clear_exact_match(priv->ndev);
1184 
1185 	gfar_mac_rx_config(priv);
1186 
1187 	gfar_mac_tx_config(priv);
1188 
1189 	gfar_set_mac_address(priv->ndev);
1190 
1191 	gfar_set_multi(priv->ndev);
1192 
1193 	/* clear ievent and imask before configuring coalescing */
1194 	gfar_ints_disable(priv);
1195 
1196 	/* Configure the coalescing support */
1197 	gfar_configure_coalescing_all(priv);
1198 }
1199 
1200 static void gfar_hw_init(struct gfar_private *priv)
1201 {
1202 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1203 	u32 attrs;
1204 
1205 	/* Stop the DMA engine now, in case it was running before
1206 	 * (The firmware could have used it, and left it running).
1207 	 */
1208 	gfar_halt(priv);
1209 
1210 	gfar_mac_reset(priv);
1211 
1212 	/* Zero out the rmon mib registers if it has them */
1213 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1214 		memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1215 
1216 		/* Mask off the CAM interrupts */
1217 		gfar_write(&regs->rmon.cam1, 0xffffffff);
1218 		gfar_write(&regs->rmon.cam2, 0xffffffff);
1219 	}
1220 
1221 	/* Initialize ECNTRL */
1222 	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
1223 
1224 	/* Set the extraction length and index */
1225 	attrs = ATTRELI_EL(priv->rx_stash_size) |
1226 		ATTRELI_EI(priv->rx_stash_index);
1227 
1228 	gfar_write(&regs->attreli, attrs);
1229 
1230 	/* Start with defaults, and add stashing
1231 	 * depending on driver parameters
1232 	 */
1233 	attrs = ATTR_INIT_SETTINGS;
1234 
1235 	if (priv->bd_stash_en)
1236 		attrs |= ATTR_BDSTASH;
1237 
1238 	if (priv->rx_stash_size != 0)
1239 		attrs |= ATTR_BUFSTASH;
1240 
1241 	gfar_write(&regs->attr, attrs);
1242 
1243 	/* FIFO configs */
1244 	gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1245 	gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1246 	gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1247 
1248 	/* Program the interrupt steering regs, only for MG devices */
1249 	if (priv->num_grps > 1)
1250 		gfar_write_isrg(priv);
1251 }
1252 
1253 static void gfar_init_addr_hash_table(struct gfar_private *priv)
1254 {
1255 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1256 
1257 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1258 		priv->extended_hash = 1;
1259 		priv->hash_width = 9;
1260 
1261 		priv->hash_regs[0] = &regs->igaddr0;
1262 		priv->hash_regs[1] = &regs->igaddr1;
1263 		priv->hash_regs[2] = &regs->igaddr2;
1264 		priv->hash_regs[3] = &regs->igaddr3;
1265 		priv->hash_regs[4] = &regs->igaddr4;
1266 		priv->hash_regs[5] = &regs->igaddr5;
1267 		priv->hash_regs[6] = &regs->igaddr6;
1268 		priv->hash_regs[7] = &regs->igaddr7;
1269 		priv->hash_regs[8] = &regs->gaddr0;
1270 		priv->hash_regs[9] = &regs->gaddr1;
1271 		priv->hash_regs[10] = &regs->gaddr2;
1272 		priv->hash_regs[11] = &regs->gaddr3;
1273 		priv->hash_regs[12] = &regs->gaddr4;
1274 		priv->hash_regs[13] = &regs->gaddr5;
1275 		priv->hash_regs[14] = &regs->gaddr6;
1276 		priv->hash_regs[15] = &regs->gaddr7;
1277 
1278 	} else {
1279 		priv->extended_hash = 0;
1280 		priv->hash_width = 8;
1281 
1282 		priv->hash_regs[0] = &regs->gaddr0;
1283 		priv->hash_regs[1] = &regs->gaddr1;
1284 		priv->hash_regs[2] = &regs->gaddr2;
1285 		priv->hash_regs[3] = &regs->gaddr3;
1286 		priv->hash_regs[4] = &regs->gaddr4;
1287 		priv->hash_regs[5] = &regs->gaddr5;
1288 		priv->hash_regs[6] = &regs->gaddr6;
1289 		priv->hash_regs[7] = &regs->gaddr7;
1290 	}
1291 }
1292 
1293 /* Set up the ethernet device structure, private data,
1294  * and anything else we need before we start
1295  */
1296 static int gfar_probe(struct platform_device *ofdev)
1297 {
1298 	struct net_device *dev = NULL;
1299 	struct gfar_private *priv = NULL;
1300 	int err = 0, i;
1301 
1302 	err = gfar_of_init(ofdev, &dev);
1303 
1304 	if (err)
1305 		return err;
1306 
1307 	priv = netdev_priv(dev);
1308 	priv->ndev = dev;
1309 	priv->ofdev = ofdev;
1310 	priv->dev = &ofdev->dev;
1311 	SET_NETDEV_DEV(dev, &ofdev->dev);
1312 
1313 	spin_lock_init(&priv->bflock);
1314 	INIT_WORK(&priv->reset_task, gfar_reset_task);
1315 
1316 	platform_set_drvdata(ofdev, priv);
1317 
1318 	gfar_detect_errata(priv);
1319 
1320 	/* Set the dev->base_addr to the gfar reg region */
1321 	dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1322 
1323 	/* Fill in the dev structure */
1324 	dev->watchdog_timeo = TX_TIMEOUT;
1325 	dev->mtu = 1500;
1326 	dev->netdev_ops = &gfar_netdev_ops;
1327 	dev->ethtool_ops = &gfar_ethtool_ops;
1328 
1329 	/* Register for napi ...We are registering NAPI for each grp */
1330 	for (i = 0; i < priv->num_grps; i++) {
1331 		if (priv->poll_mode == GFAR_SQ_POLLING) {
1332 			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1333 				       gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
1334 			netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1335 				       gfar_poll_tx_sq, 2);
1336 		} else {
1337 			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1338 				       gfar_poll_rx, GFAR_DEV_WEIGHT);
1339 			netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1340 				       gfar_poll_tx, 2);
1341 		}
1342 	}
1343 
1344 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1345 		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1346 				   NETIF_F_RXCSUM;
1347 		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1348 				 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1349 	}
1350 
1351 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1352 		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1353 				    NETIF_F_HW_VLAN_CTAG_RX;
1354 		dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1355 	}
1356 
1357 	gfar_init_addr_hash_table(priv);
1358 
1359 	/* Insert receive time stamps into padding alignment bytes */
1360 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1361 		priv->padding = 8;
1362 
1363 	if (dev->features & NETIF_F_IP_CSUM ||
1364 	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1365 		dev->needed_headroom = GMAC_FCB_LEN;
1366 
1367 	priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1368 
1369 	/* Initializing some of the rx/tx queue level parameters */
1370 	for (i = 0; i < priv->num_tx_queues; i++) {
1371 		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1372 		priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1373 		priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1374 		priv->tx_queue[i]->txic = DEFAULT_TXIC;
1375 	}
1376 
1377 	for (i = 0; i < priv->num_rx_queues; i++) {
1378 		priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1379 		priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1380 		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1381 	}
1382 
1383 	/* always enable rx filer */
1384 	priv->rx_filer_enable = 1;
1385 	/* Enable most messages by default */
1386 	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1387 	/* use pritority h/w tx queue scheduling for single queue devices */
1388 	if (priv->num_tx_queues == 1)
1389 		priv->prio_sched_en = 1;
1390 
1391 	set_bit(GFAR_DOWN, &priv->state);
1392 
1393 	gfar_hw_init(priv);
1394 
1395 	/* Carrier starts down, phylib will bring it up */
1396 	netif_carrier_off(dev);
1397 
1398 	err = register_netdev(dev);
1399 
1400 	if (err) {
1401 		pr_err("%s: Cannot register net device, aborting\n", dev->name);
1402 		goto register_fail;
1403 	}
1404 
1405 	device_init_wakeup(&dev->dev,
1406 			   priv->device_flags &
1407 			   FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1408 
1409 	/* fill out IRQ number and name fields */
1410 	for (i = 0; i < priv->num_grps; i++) {
1411 		struct gfar_priv_grp *grp = &priv->gfargrp[i];
1412 		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1413 			sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
1414 				dev->name, "_g", '0' + i, "_tx");
1415 			sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
1416 				dev->name, "_g", '0' + i, "_rx");
1417 			sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
1418 				dev->name, "_g", '0' + i, "_er");
1419 		} else
1420 			strcpy(gfar_irq(grp, TX)->name, dev->name);
1421 	}
1422 
1423 	/* Initialize the filer table */
1424 	gfar_init_filer_table(priv);
1425 
1426 	/* Print out the device info */
1427 	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1428 
1429 	/* Even more device info helps when determining which kernel
1430 	 * provided which set of benchmarks.
1431 	 */
1432 	netdev_info(dev, "Running with NAPI enabled\n");
1433 	for (i = 0; i < priv->num_rx_queues; i++)
1434 		netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1435 			    i, priv->rx_queue[i]->rx_ring_size);
1436 	for (i = 0; i < priv->num_tx_queues; i++)
1437 		netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1438 			    i, priv->tx_queue[i]->tx_ring_size);
1439 
1440 	return 0;
1441 
1442 register_fail:
1443 	unmap_group_regs(priv);
1444 	gfar_free_rx_queues(priv);
1445 	gfar_free_tx_queues(priv);
1446 	of_node_put(priv->phy_node);
1447 	of_node_put(priv->tbi_node);
1448 	free_gfar_dev(priv);
1449 	return err;
1450 }
1451 
1452 static int gfar_remove(struct platform_device *ofdev)
1453 {
1454 	struct gfar_private *priv = platform_get_drvdata(ofdev);
1455 
1456 	of_node_put(priv->phy_node);
1457 	of_node_put(priv->tbi_node);
1458 
1459 	unregister_netdev(priv->ndev);
1460 	unmap_group_regs(priv);
1461 	gfar_free_rx_queues(priv);
1462 	gfar_free_tx_queues(priv);
1463 	free_gfar_dev(priv);
1464 
1465 	return 0;
1466 }
1467 
1468 #ifdef CONFIG_PM
1469 
1470 static int gfar_suspend(struct device *dev)
1471 {
1472 	struct gfar_private *priv = dev_get_drvdata(dev);
1473 	struct net_device *ndev = priv->ndev;
1474 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1475 	unsigned long flags;
1476 	u32 tempval;
1477 
1478 	int magic_packet = priv->wol_en &&
1479 			   (priv->device_flags &
1480 			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1481 
1482 	netif_device_detach(ndev);
1483 
1484 	if (netif_running(ndev)) {
1485 
1486 		local_irq_save(flags);
1487 		lock_tx_qs(priv);
1488 
1489 		gfar_halt_nodisable(priv);
1490 
1491 		/* Disable Tx, and Rx if wake-on-LAN is disabled. */
1492 		tempval = gfar_read(&regs->maccfg1);
1493 
1494 		tempval &= ~MACCFG1_TX_EN;
1495 
1496 		if (!magic_packet)
1497 			tempval &= ~MACCFG1_RX_EN;
1498 
1499 		gfar_write(&regs->maccfg1, tempval);
1500 
1501 		unlock_tx_qs(priv);
1502 		local_irq_restore(flags);
1503 
1504 		disable_napi(priv);
1505 
1506 		if (magic_packet) {
1507 			/* Enable interrupt on Magic Packet */
1508 			gfar_write(&regs->imask, IMASK_MAG);
1509 
1510 			/* Enable Magic Packet mode */
1511 			tempval = gfar_read(&regs->maccfg2);
1512 			tempval |= MACCFG2_MPEN;
1513 			gfar_write(&regs->maccfg2, tempval);
1514 		} else {
1515 			phy_stop(priv->phydev);
1516 		}
1517 	}
1518 
1519 	return 0;
1520 }
1521 
1522 static int gfar_resume(struct device *dev)
1523 {
1524 	struct gfar_private *priv = dev_get_drvdata(dev);
1525 	struct net_device *ndev = priv->ndev;
1526 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1527 	unsigned long flags;
1528 	u32 tempval;
1529 	int magic_packet = priv->wol_en &&
1530 			   (priv->device_flags &
1531 			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1532 
1533 	if (!netif_running(ndev)) {
1534 		netif_device_attach(ndev);
1535 		return 0;
1536 	}
1537 
1538 	if (!magic_packet && priv->phydev)
1539 		phy_start(priv->phydev);
1540 
1541 	/* Disable Magic Packet mode, in case something
1542 	 * else woke us up.
1543 	 */
1544 	local_irq_save(flags);
1545 	lock_tx_qs(priv);
1546 
1547 	tempval = gfar_read(&regs->maccfg2);
1548 	tempval &= ~MACCFG2_MPEN;
1549 	gfar_write(&regs->maccfg2, tempval);
1550 
1551 	gfar_start(priv);
1552 
1553 	unlock_tx_qs(priv);
1554 	local_irq_restore(flags);
1555 
1556 	netif_device_attach(ndev);
1557 
1558 	enable_napi(priv);
1559 
1560 	return 0;
1561 }
1562 
1563 static int gfar_restore(struct device *dev)
1564 {
1565 	struct gfar_private *priv = dev_get_drvdata(dev);
1566 	struct net_device *ndev = priv->ndev;
1567 
1568 	if (!netif_running(ndev)) {
1569 		netif_device_attach(ndev);
1570 
1571 		return 0;
1572 	}
1573 
1574 	if (gfar_init_bds(ndev)) {
1575 		free_skb_resources(priv);
1576 		return -ENOMEM;
1577 	}
1578 
1579 	gfar_mac_reset(priv);
1580 
1581 	gfar_init_tx_rx_base(priv);
1582 
1583 	gfar_start(priv);
1584 
1585 	priv->oldlink = 0;
1586 	priv->oldspeed = 0;
1587 	priv->oldduplex = -1;
1588 
1589 	if (priv->phydev)
1590 		phy_start(priv->phydev);
1591 
1592 	netif_device_attach(ndev);
1593 	enable_napi(priv);
1594 
1595 	return 0;
1596 }
1597 
1598 static struct dev_pm_ops gfar_pm_ops = {
1599 	.suspend = gfar_suspend,
1600 	.resume = gfar_resume,
1601 	.freeze = gfar_suspend,
1602 	.thaw = gfar_resume,
1603 	.restore = gfar_restore,
1604 };
1605 
1606 #define GFAR_PM_OPS (&gfar_pm_ops)
1607 
1608 #else
1609 
1610 #define GFAR_PM_OPS NULL
1611 
1612 #endif
1613 
1614 /* Reads the controller's registers to determine what interface
1615  * connects it to the PHY.
1616  */
1617 static phy_interface_t gfar_get_interface(struct net_device *dev)
1618 {
1619 	struct gfar_private *priv = netdev_priv(dev);
1620 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1621 	u32 ecntrl;
1622 
1623 	ecntrl = gfar_read(&regs->ecntrl);
1624 
1625 	if (ecntrl & ECNTRL_SGMII_MODE)
1626 		return PHY_INTERFACE_MODE_SGMII;
1627 
1628 	if (ecntrl & ECNTRL_TBI_MODE) {
1629 		if (ecntrl & ECNTRL_REDUCED_MODE)
1630 			return PHY_INTERFACE_MODE_RTBI;
1631 		else
1632 			return PHY_INTERFACE_MODE_TBI;
1633 	}
1634 
1635 	if (ecntrl & ECNTRL_REDUCED_MODE) {
1636 		if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1637 			return PHY_INTERFACE_MODE_RMII;
1638 		}
1639 		else {
1640 			phy_interface_t interface = priv->interface;
1641 
1642 			/* This isn't autodetected right now, so it must
1643 			 * be set by the device tree or platform code.
1644 			 */
1645 			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1646 				return PHY_INTERFACE_MODE_RGMII_ID;
1647 
1648 			return PHY_INTERFACE_MODE_RGMII;
1649 		}
1650 	}
1651 
1652 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1653 		return PHY_INTERFACE_MODE_GMII;
1654 
1655 	return PHY_INTERFACE_MODE_MII;
1656 }
1657 
1658 
1659 /* Initializes driver's PHY state, and attaches to the PHY.
1660  * Returns 0 on success.
1661  */
1662 static int init_phy(struct net_device *dev)
1663 {
1664 	struct gfar_private *priv = netdev_priv(dev);
1665 	uint gigabit_support =
1666 		priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1667 		GFAR_SUPPORTED_GBIT : 0;
1668 	phy_interface_t interface;
1669 
1670 	priv->oldlink = 0;
1671 	priv->oldspeed = 0;
1672 	priv->oldduplex = -1;
1673 
1674 	interface = gfar_get_interface(dev);
1675 
1676 	priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1677 				      interface);
1678 	if (!priv->phydev) {
1679 		dev_err(&dev->dev, "could not attach to PHY\n");
1680 		return -ENODEV;
1681 	}
1682 
1683 	if (interface == PHY_INTERFACE_MODE_SGMII)
1684 		gfar_configure_serdes(dev);
1685 
1686 	/* Remove any features not supported by the controller */
1687 	priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1688 	priv->phydev->advertising = priv->phydev->supported;
1689 
1690 	return 0;
1691 }
1692 
1693 /* Initialize TBI PHY interface for communicating with the
1694  * SERDES lynx PHY on the chip.  We communicate with this PHY
1695  * through the MDIO bus on each controller, treating it as a
1696  * "normal" PHY at the address found in the TBIPA register.  We assume
1697  * that the TBIPA register is valid.  Either the MDIO bus code will set
1698  * it to a value that doesn't conflict with other PHYs on the bus, or the
1699  * value doesn't matter, as there are no other PHYs on the bus.
1700  */
1701 static void gfar_configure_serdes(struct net_device *dev)
1702 {
1703 	struct gfar_private *priv = netdev_priv(dev);
1704 	struct phy_device *tbiphy;
1705 
1706 	if (!priv->tbi_node) {
1707 		dev_warn(&dev->dev, "error: SGMII mode requires that the "
1708 				    "device tree specify a tbi-handle\n");
1709 		return;
1710 	}
1711 
1712 	tbiphy = of_phy_find_device(priv->tbi_node);
1713 	if (!tbiphy) {
1714 		dev_err(&dev->dev, "error: Could not get TBI device\n");
1715 		return;
1716 	}
1717 
1718 	/* If the link is already up, we must already be ok, and don't need to
1719 	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
1720 	 * everything for us?  Resetting it takes the link down and requires
1721 	 * several seconds for it to come back.
1722 	 */
1723 	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1724 		return;
1725 
1726 	/* Single clk mode, mii mode off(for serdes communication) */
1727 	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1728 
1729 	phy_write(tbiphy, MII_ADVERTISE,
1730 		  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1731 		  ADVERTISE_1000XPSE_ASYM);
1732 
1733 	phy_write(tbiphy, MII_BMCR,
1734 		  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1735 		  BMCR_SPEED1000);
1736 }
1737 
1738 static int __gfar_is_rx_idle(struct gfar_private *priv)
1739 {
1740 	u32 res;
1741 
1742 	/* Normaly TSEC should not hang on GRS commands, so we should
1743 	 * actually wait for IEVENT_GRSC flag.
1744 	 */
1745 	if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1746 		return 0;
1747 
1748 	/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1749 	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1750 	 * and the Rx can be safely reset.
1751 	 */
1752 	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1753 	res &= 0x7f807f80;
1754 	if ((res & 0xffff) == (res >> 16))
1755 		return 1;
1756 
1757 	return 0;
1758 }
1759 
1760 /* Halt the receive and transmit queues */
1761 static void gfar_halt_nodisable(struct gfar_private *priv)
1762 {
1763 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1764 	u32 tempval;
1765 	unsigned int timeout;
1766 	int stopped;
1767 
1768 	gfar_ints_disable(priv);
1769 
1770 	if (gfar_is_dma_stopped(priv))
1771 		return;
1772 
1773 	/* Stop the DMA, and wait for it to stop */
1774 	tempval = gfar_read(&regs->dmactrl);
1775 	tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1776 	gfar_write(&regs->dmactrl, tempval);
1777 
1778 retry:
1779 	timeout = 1000;
1780 	while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1781 		cpu_relax();
1782 		timeout--;
1783 	}
1784 
1785 	if (!timeout)
1786 		stopped = gfar_is_dma_stopped(priv);
1787 
1788 	if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1789 	    !__gfar_is_rx_idle(priv))
1790 		goto retry;
1791 }
1792 
1793 /* Halt the receive and transmit queues */
1794 void gfar_halt(struct gfar_private *priv)
1795 {
1796 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1797 	u32 tempval;
1798 
1799 	/* Dissable the Rx/Tx hw queues */
1800 	gfar_write(&regs->rqueue, 0);
1801 	gfar_write(&regs->tqueue, 0);
1802 
1803 	mdelay(10);
1804 
1805 	gfar_halt_nodisable(priv);
1806 
1807 	/* Disable Rx/Tx DMA */
1808 	tempval = gfar_read(&regs->maccfg1);
1809 	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1810 	gfar_write(&regs->maccfg1, tempval);
1811 }
1812 
1813 void stop_gfar(struct net_device *dev)
1814 {
1815 	struct gfar_private *priv = netdev_priv(dev);
1816 
1817 	netif_tx_stop_all_queues(dev);
1818 
1819 	smp_mb__before_atomic();
1820 	set_bit(GFAR_DOWN, &priv->state);
1821 	smp_mb__after_atomic();
1822 
1823 	disable_napi(priv);
1824 
1825 	/* disable ints and gracefully shut down Rx/Tx DMA */
1826 	gfar_halt(priv);
1827 
1828 	phy_stop(priv->phydev);
1829 
1830 	free_skb_resources(priv);
1831 }
1832 
1833 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1834 {
1835 	struct txbd8 *txbdp;
1836 	struct gfar_private *priv = netdev_priv(tx_queue->dev);
1837 	int i, j;
1838 
1839 	txbdp = tx_queue->tx_bd_base;
1840 
1841 	for (i = 0; i < tx_queue->tx_ring_size; i++) {
1842 		if (!tx_queue->tx_skbuff[i])
1843 			continue;
1844 
1845 		dma_unmap_single(priv->dev, txbdp->bufPtr,
1846 				 txbdp->length, DMA_TO_DEVICE);
1847 		txbdp->lstatus = 0;
1848 		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1849 		     j++) {
1850 			txbdp++;
1851 			dma_unmap_page(priv->dev, txbdp->bufPtr,
1852 				       txbdp->length, DMA_TO_DEVICE);
1853 		}
1854 		txbdp++;
1855 		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1856 		tx_queue->tx_skbuff[i] = NULL;
1857 	}
1858 	kfree(tx_queue->tx_skbuff);
1859 	tx_queue->tx_skbuff = NULL;
1860 }
1861 
1862 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1863 {
1864 	struct rxbd8 *rxbdp;
1865 	struct gfar_private *priv = netdev_priv(rx_queue->dev);
1866 	int i;
1867 
1868 	rxbdp = rx_queue->rx_bd_base;
1869 
1870 	for (i = 0; i < rx_queue->rx_ring_size; i++) {
1871 		if (rx_queue->rx_skbuff[i]) {
1872 			dma_unmap_single(priv->dev, rxbdp->bufPtr,
1873 					 priv->rx_buffer_size,
1874 					 DMA_FROM_DEVICE);
1875 			dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1876 			rx_queue->rx_skbuff[i] = NULL;
1877 		}
1878 		rxbdp->lstatus = 0;
1879 		rxbdp->bufPtr = 0;
1880 		rxbdp++;
1881 	}
1882 	kfree(rx_queue->rx_skbuff);
1883 	rx_queue->rx_skbuff = NULL;
1884 }
1885 
1886 /* If there are any tx skbs or rx skbs still around, free them.
1887  * Then free tx_skbuff and rx_skbuff
1888  */
1889 static void free_skb_resources(struct gfar_private *priv)
1890 {
1891 	struct gfar_priv_tx_q *tx_queue = NULL;
1892 	struct gfar_priv_rx_q *rx_queue = NULL;
1893 	int i;
1894 
1895 	/* Go through all the buffer descriptors and free their data buffers */
1896 	for (i = 0; i < priv->num_tx_queues; i++) {
1897 		struct netdev_queue *txq;
1898 
1899 		tx_queue = priv->tx_queue[i];
1900 		txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1901 		if (tx_queue->tx_skbuff)
1902 			free_skb_tx_queue(tx_queue);
1903 		netdev_tx_reset_queue(txq);
1904 	}
1905 
1906 	for (i = 0; i < priv->num_rx_queues; i++) {
1907 		rx_queue = priv->rx_queue[i];
1908 		if (rx_queue->rx_skbuff)
1909 			free_skb_rx_queue(rx_queue);
1910 	}
1911 
1912 	dma_free_coherent(priv->dev,
1913 			  sizeof(struct txbd8) * priv->total_tx_ring_size +
1914 			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
1915 			  priv->tx_queue[0]->tx_bd_base,
1916 			  priv->tx_queue[0]->tx_bd_dma_base);
1917 }
1918 
1919 void gfar_start(struct gfar_private *priv)
1920 {
1921 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1922 	u32 tempval;
1923 	int i = 0;
1924 
1925 	/* Enable Rx/Tx hw queues */
1926 	gfar_write(&regs->rqueue, priv->rqueue);
1927 	gfar_write(&regs->tqueue, priv->tqueue);
1928 
1929 	/* Initialize DMACTRL to have WWR and WOP */
1930 	tempval = gfar_read(&regs->dmactrl);
1931 	tempval |= DMACTRL_INIT_SETTINGS;
1932 	gfar_write(&regs->dmactrl, tempval);
1933 
1934 	/* Make sure we aren't stopped */
1935 	tempval = gfar_read(&regs->dmactrl);
1936 	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1937 	gfar_write(&regs->dmactrl, tempval);
1938 
1939 	for (i = 0; i < priv->num_grps; i++) {
1940 		regs = priv->gfargrp[i].regs;
1941 		/* Clear THLT/RHLT, so that the DMA starts polling now */
1942 		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1943 		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1944 	}
1945 
1946 	/* Enable Rx/Tx DMA */
1947 	tempval = gfar_read(&regs->maccfg1);
1948 	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1949 	gfar_write(&regs->maccfg1, tempval);
1950 
1951 	gfar_ints_enable(priv);
1952 
1953 	priv->ndev->trans_start = jiffies; /* prevent tx timeout */
1954 }
1955 
1956 static void free_grp_irqs(struct gfar_priv_grp *grp)
1957 {
1958 	free_irq(gfar_irq(grp, TX)->irq, grp);
1959 	free_irq(gfar_irq(grp, RX)->irq, grp);
1960 	free_irq(gfar_irq(grp, ER)->irq, grp);
1961 }
1962 
1963 static int register_grp_irqs(struct gfar_priv_grp *grp)
1964 {
1965 	struct gfar_private *priv = grp->priv;
1966 	struct net_device *dev = priv->ndev;
1967 	int err;
1968 
1969 	/* If the device has multiple interrupts, register for
1970 	 * them.  Otherwise, only register for the one
1971 	 */
1972 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1973 		/* Install our interrupt handlers for Error,
1974 		 * Transmit, and Receive
1975 		 */
1976 		err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
1977 				  gfar_irq(grp, ER)->name, grp);
1978 		if (err < 0) {
1979 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1980 				  gfar_irq(grp, ER)->irq);
1981 
1982 			goto err_irq_fail;
1983 		}
1984 		err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
1985 				  gfar_irq(grp, TX)->name, grp);
1986 		if (err < 0) {
1987 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1988 				  gfar_irq(grp, TX)->irq);
1989 			goto tx_irq_fail;
1990 		}
1991 		err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
1992 				  gfar_irq(grp, RX)->name, grp);
1993 		if (err < 0) {
1994 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1995 				  gfar_irq(grp, RX)->irq);
1996 			goto rx_irq_fail;
1997 		}
1998 	} else {
1999 		err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
2000 				  gfar_irq(grp, TX)->name, grp);
2001 		if (err < 0) {
2002 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2003 				  gfar_irq(grp, TX)->irq);
2004 			goto err_irq_fail;
2005 		}
2006 	}
2007 
2008 	return 0;
2009 
2010 rx_irq_fail:
2011 	free_irq(gfar_irq(grp, TX)->irq, grp);
2012 tx_irq_fail:
2013 	free_irq(gfar_irq(grp, ER)->irq, grp);
2014 err_irq_fail:
2015 	return err;
2016 
2017 }
2018 
2019 static void gfar_free_irq(struct gfar_private *priv)
2020 {
2021 	int i;
2022 
2023 	/* Free the IRQs */
2024 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2025 		for (i = 0; i < priv->num_grps; i++)
2026 			free_grp_irqs(&priv->gfargrp[i]);
2027 	} else {
2028 		for (i = 0; i < priv->num_grps; i++)
2029 			free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2030 				 &priv->gfargrp[i]);
2031 	}
2032 }
2033 
2034 static int gfar_request_irq(struct gfar_private *priv)
2035 {
2036 	int err, i, j;
2037 
2038 	for (i = 0; i < priv->num_grps; i++) {
2039 		err = register_grp_irqs(&priv->gfargrp[i]);
2040 		if (err) {
2041 			for (j = 0; j < i; j++)
2042 				free_grp_irqs(&priv->gfargrp[j]);
2043 			return err;
2044 		}
2045 	}
2046 
2047 	return 0;
2048 }
2049 
2050 /* Bring the controller up and running */
2051 int startup_gfar(struct net_device *ndev)
2052 {
2053 	struct gfar_private *priv = netdev_priv(ndev);
2054 	int err;
2055 
2056 	gfar_mac_reset(priv);
2057 
2058 	err = gfar_alloc_skb_resources(ndev);
2059 	if (err)
2060 		return err;
2061 
2062 	gfar_init_tx_rx_base(priv);
2063 
2064 	smp_mb__before_atomic();
2065 	clear_bit(GFAR_DOWN, &priv->state);
2066 	smp_mb__after_atomic();
2067 
2068 	/* Start Rx/Tx DMA and enable the interrupts */
2069 	gfar_start(priv);
2070 
2071 	phy_start(priv->phydev);
2072 
2073 	enable_napi(priv);
2074 
2075 	netif_tx_wake_all_queues(ndev);
2076 
2077 	return 0;
2078 }
2079 
2080 /* Called when something needs to use the ethernet device
2081  * Returns 0 for success.
2082  */
2083 static int gfar_enet_open(struct net_device *dev)
2084 {
2085 	struct gfar_private *priv = netdev_priv(dev);
2086 	int err;
2087 
2088 	err = init_phy(dev);
2089 	if (err)
2090 		return err;
2091 
2092 	err = gfar_request_irq(priv);
2093 	if (err)
2094 		return err;
2095 
2096 	err = startup_gfar(dev);
2097 	if (err)
2098 		return err;
2099 
2100 	device_set_wakeup_enable(&dev->dev, priv->wol_en);
2101 
2102 	return err;
2103 }
2104 
2105 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
2106 {
2107 	struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
2108 
2109 	memset(fcb, 0, GMAC_FCB_LEN);
2110 
2111 	return fcb;
2112 }
2113 
2114 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2115 				    int fcb_length)
2116 {
2117 	/* If we're here, it's a IP packet with a TCP or UDP
2118 	 * payload.  We set it to checksum, using a pseudo-header
2119 	 * we provide
2120 	 */
2121 	u8 flags = TXFCB_DEFAULT;
2122 
2123 	/* Tell the controller what the protocol is
2124 	 * And provide the already calculated phcs
2125 	 */
2126 	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2127 		flags |= TXFCB_UDP;
2128 		fcb->phcs = udp_hdr(skb)->check;
2129 	} else
2130 		fcb->phcs = tcp_hdr(skb)->check;
2131 
2132 	/* l3os is the distance between the start of the
2133 	 * frame (skb->data) and the start of the IP hdr.
2134 	 * l4os is the distance between the start of the
2135 	 * l3 hdr and the l4 hdr
2136 	 */
2137 	fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
2138 	fcb->l4os = skb_network_header_len(skb);
2139 
2140 	fcb->flags = flags;
2141 }
2142 
2143 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2144 {
2145 	fcb->flags |= TXFCB_VLN;
2146 	fcb->vlctl = vlan_tx_tag_get(skb);
2147 }
2148 
2149 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2150 				      struct txbd8 *base, int ring_size)
2151 {
2152 	struct txbd8 *new_bd = bdp + stride;
2153 
2154 	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2155 }
2156 
2157 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2158 				      int ring_size)
2159 {
2160 	return skip_txbd(bdp, 1, base, ring_size);
2161 }
2162 
2163 /* eTSEC12: csum generation not supported for some fcb offsets */
2164 static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2165 				       unsigned long fcb_addr)
2166 {
2167 	return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2168 	       (fcb_addr % 0x20) > 0x18);
2169 }
2170 
2171 /* eTSEC76: csum generation for frames larger than 2500 may
2172  * cause excess delays before start of transmission
2173  */
2174 static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2175 				       unsigned int len)
2176 {
2177 	return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2178 	       (len > 2500));
2179 }
2180 
2181 /* This is called by the kernel when a frame is ready for transmission.
2182  * It is pointed to by the dev->hard_start_xmit function pointer
2183  */
2184 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2185 {
2186 	struct gfar_private *priv = netdev_priv(dev);
2187 	struct gfar_priv_tx_q *tx_queue = NULL;
2188 	struct netdev_queue *txq;
2189 	struct gfar __iomem *regs = NULL;
2190 	struct txfcb *fcb = NULL;
2191 	struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2192 	u32 lstatus;
2193 	int i, rq = 0;
2194 	int do_tstamp, do_csum, do_vlan;
2195 	u32 bufaddr;
2196 	unsigned long flags;
2197 	unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
2198 
2199 	rq = skb->queue_mapping;
2200 	tx_queue = priv->tx_queue[rq];
2201 	txq = netdev_get_tx_queue(dev, rq);
2202 	base = tx_queue->tx_bd_base;
2203 	regs = tx_queue->grp->regs;
2204 
2205 	do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
2206 	do_vlan = vlan_tx_tag_present(skb);
2207 	do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2208 		    priv->hwts_tx_en;
2209 
2210 	if (do_csum || do_vlan)
2211 		fcb_len = GMAC_FCB_LEN;
2212 
2213 	/* check if time stamp should be generated */
2214 	if (unlikely(do_tstamp))
2215 		fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2216 
2217 	/* make space for additional header when fcb is needed */
2218 	if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
2219 		struct sk_buff *skb_new;
2220 
2221 		skb_new = skb_realloc_headroom(skb, fcb_len);
2222 		if (!skb_new) {
2223 			dev->stats.tx_errors++;
2224 			dev_kfree_skb_any(skb);
2225 			return NETDEV_TX_OK;
2226 		}
2227 
2228 		if (skb->sk)
2229 			skb_set_owner_w(skb_new, skb->sk);
2230 		dev_consume_skb_any(skb);
2231 		skb = skb_new;
2232 	}
2233 
2234 	/* total number of fragments in the SKB */
2235 	nr_frags = skb_shinfo(skb)->nr_frags;
2236 
2237 	/* calculate the required number of TxBDs for this skb */
2238 	if (unlikely(do_tstamp))
2239 		nr_txbds = nr_frags + 2;
2240 	else
2241 		nr_txbds = nr_frags + 1;
2242 
2243 	/* check if there is space to queue this packet */
2244 	if (nr_txbds > tx_queue->num_txbdfree) {
2245 		/* no space, stop the queue */
2246 		netif_tx_stop_queue(txq);
2247 		dev->stats.tx_fifo_errors++;
2248 		return NETDEV_TX_BUSY;
2249 	}
2250 
2251 	/* Update transmit stats */
2252 	bytes_sent = skb->len;
2253 	tx_queue->stats.tx_bytes += bytes_sent;
2254 	/* keep Tx bytes on wire for BQL accounting */
2255 	GFAR_CB(skb)->bytes_sent = bytes_sent;
2256 	tx_queue->stats.tx_packets++;
2257 
2258 	txbdp = txbdp_start = tx_queue->cur_tx;
2259 	lstatus = txbdp->lstatus;
2260 
2261 	/* Time stamp insertion requires one additional TxBD */
2262 	if (unlikely(do_tstamp))
2263 		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2264 						 tx_queue->tx_ring_size);
2265 
2266 	if (nr_frags == 0) {
2267 		if (unlikely(do_tstamp))
2268 			txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2269 							  TXBD_INTERRUPT);
2270 		else
2271 			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2272 	} else {
2273 		/* Place the fragment addresses and lengths into the TxBDs */
2274 		for (i = 0; i < nr_frags; i++) {
2275 			unsigned int frag_len;
2276 			/* Point at the next BD, wrapping as needed */
2277 			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2278 
2279 			frag_len = skb_shinfo(skb)->frags[i].size;
2280 
2281 			lstatus = txbdp->lstatus | frag_len |
2282 				  BD_LFLAG(TXBD_READY);
2283 
2284 			/* Handle the last BD specially */
2285 			if (i == nr_frags - 1)
2286 				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2287 
2288 			bufaddr = skb_frag_dma_map(priv->dev,
2289 						   &skb_shinfo(skb)->frags[i],
2290 						   0,
2291 						   frag_len,
2292 						   DMA_TO_DEVICE);
2293 
2294 			/* set the TxBD length and buffer pointer */
2295 			txbdp->bufPtr = bufaddr;
2296 			txbdp->lstatus = lstatus;
2297 		}
2298 
2299 		lstatus = txbdp_start->lstatus;
2300 	}
2301 
2302 	/* Add TxPAL between FCB and frame if required */
2303 	if (unlikely(do_tstamp)) {
2304 		skb_push(skb, GMAC_TXPAL_LEN);
2305 		memset(skb->data, 0, GMAC_TXPAL_LEN);
2306 	}
2307 
2308 	/* Add TxFCB if required */
2309 	if (fcb_len) {
2310 		fcb = gfar_add_fcb(skb);
2311 		lstatus |= BD_LFLAG(TXBD_TOE);
2312 	}
2313 
2314 	/* Set up checksumming */
2315 	if (do_csum) {
2316 		gfar_tx_checksum(skb, fcb, fcb_len);
2317 
2318 		if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2319 		    unlikely(gfar_csum_errata_76(priv, skb->len))) {
2320 			__skb_pull(skb, GMAC_FCB_LEN);
2321 			skb_checksum_help(skb);
2322 			if (do_vlan || do_tstamp) {
2323 				/* put back a new fcb for vlan/tstamp TOE */
2324 				fcb = gfar_add_fcb(skb);
2325 			} else {
2326 				/* Tx TOE not used */
2327 				lstatus &= ~(BD_LFLAG(TXBD_TOE));
2328 				fcb = NULL;
2329 			}
2330 		}
2331 	}
2332 
2333 	if (do_vlan)
2334 		gfar_tx_vlan(skb, fcb);
2335 
2336 	/* Setup tx hardware time stamping if requested */
2337 	if (unlikely(do_tstamp)) {
2338 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2339 		fcb->ptp = 1;
2340 	}
2341 
2342 	txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
2343 					     skb_headlen(skb), DMA_TO_DEVICE);
2344 
2345 	/* If time stamping is requested one additional TxBD must be set up. The
2346 	 * first TxBD points to the FCB and must have a data length of
2347 	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2348 	 * the full frame length.
2349 	 */
2350 	if (unlikely(do_tstamp)) {
2351 		txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
2352 		txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2353 					 (skb_headlen(skb) - fcb_len);
2354 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2355 	} else {
2356 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2357 	}
2358 
2359 	netdev_tx_sent_queue(txq, bytes_sent);
2360 
2361 	/* We can work in parallel with gfar_clean_tx_ring(), except
2362 	 * when modifying num_txbdfree. Note that we didn't grab the lock
2363 	 * when we were reading the num_txbdfree and checking for available
2364 	 * space, that's because outside of this function it can only grow,
2365 	 * and once we've got needed space, it cannot suddenly disappear.
2366 	 *
2367 	 * The lock also protects us from gfar_error(), which can modify
2368 	 * regs->tstat and thus retrigger the transfers, which is why we
2369 	 * also must grab the lock before setting ready bit for the first
2370 	 * to be transmitted BD.
2371 	 */
2372 	spin_lock_irqsave(&tx_queue->txlock, flags);
2373 
2374 	/* The powerpc-specific eieio() is used, as wmb() has too strong
2375 	 * semantics (it requires synchronization between cacheable and
2376 	 * uncacheable mappings, which eieio doesn't provide and which we
2377 	 * don't need), thus requiring a more expensive sync instruction.  At
2378 	 * some point, the set of architecture-independent barrier functions
2379 	 * should be expanded to include weaker barriers.
2380 	 */
2381 	eieio();
2382 
2383 	txbdp_start->lstatus = lstatus;
2384 
2385 	eieio(); /* force lstatus write before tx_skbuff */
2386 
2387 	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2388 
2389 	/* Update the current skb pointer to the next entry we will use
2390 	 * (wrapping if necessary)
2391 	 */
2392 	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2393 			      TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2394 
2395 	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2396 
2397 	/* reduce TxBD free count */
2398 	tx_queue->num_txbdfree -= (nr_txbds);
2399 
2400 	/* If the next BD still needs to be cleaned up, then the bds
2401 	 * are full.  We need to tell the kernel to stop sending us stuff.
2402 	 */
2403 	if (!tx_queue->num_txbdfree) {
2404 		netif_tx_stop_queue(txq);
2405 
2406 		dev->stats.tx_fifo_errors++;
2407 	}
2408 
2409 	/* Tell the DMA to go go go */
2410 	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2411 
2412 	/* Unlock priv */
2413 	spin_unlock_irqrestore(&tx_queue->txlock, flags);
2414 
2415 	return NETDEV_TX_OK;
2416 }
2417 
2418 /* Stops the kernel queue, and halts the controller */
2419 static int gfar_close(struct net_device *dev)
2420 {
2421 	struct gfar_private *priv = netdev_priv(dev);
2422 
2423 	cancel_work_sync(&priv->reset_task);
2424 	stop_gfar(dev);
2425 
2426 	/* Disconnect from the PHY */
2427 	phy_disconnect(priv->phydev);
2428 	priv->phydev = NULL;
2429 
2430 	gfar_free_irq(priv);
2431 
2432 	return 0;
2433 }
2434 
2435 /* Changes the mac address if the controller is not running. */
2436 static int gfar_set_mac_address(struct net_device *dev)
2437 {
2438 	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2439 
2440 	return 0;
2441 }
2442 
2443 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2444 {
2445 	struct gfar_private *priv = netdev_priv(dev);
2446 	int frame_size = new_mtu + ETH_HLEN;
2447 
2448 	if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2449 		netif_err(priv, drv, dev, "Invalid MTU setting\n");
2450 		return -EINVAL;
2451 	}
2452 
2453 	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2454 		cpu_relax();
2455 
2456 	if (dev->flags & IFF_UP)
2457 		stop_gfar(dev);
2458 
2459 	dev->mtu = new_mtu;
2460 
2461 	if (dev->flags & IFF_UP)
2462 		startup_gfar(dev);
2463 
2464 	clear_bit_unlock(GFAR_RESETTING, &priv->state);
2465 
2466 	return 0;
2467 }
2468 
2469 void reset_gfar(struct net_device *ndev)
2470 {
2471 	struct gfar_private *priv = netdev_priv(ndev);
2472 
2473 	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2474 		cpu_relax();
2475 
2476 	stop_gfar(ndev);
2477 	startup_gfar(ndev);
2478 
2479 	clear_bit_unlock(GFAR_RESETTING, &priv->state);
2480 }
2481 
2482 /* gfar_reset_task gets scheduled when a packet has not been
2483  * transmitted after a set amount of time.
2484  * For now, assume that clearing out all the structures, and
2485  * starting over will fix the problem.
2486  */
2487 static void gfar_reset_task(struct work_struct *work)
2488 {
2489 	struct gfar_private *priv = container_of(work, struct gfar_private,
2490 						 reset_task);
2491 	reset_gfar(priv->ndev);
2492 }
2493 
2494 static void gfar_timeout(struct net_device *dev)
2495 {
2496 	struct gfar_private *priv = netdev_priv(dev);
2497 
2498 	dev->stats.tx_errors++;
2499 	schedule_work(&priv->reset_task);
2500 }
2501 
2502 static void gfar_align_skb(struct sk_buff *skb)
2503 {
2504 	/* We need the data buffer to be aligned properly.  We will reserve
2505 	 * as many bytes as needed to align the data properly
2506 	 */
2507 	skb_reserve(skb, RXBUF_ALIGNMENT -
2508 		    (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2509 }
2510 
2511 /* Interrupt Handler for Transmit complete */
2512 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2513 {
2514 	struct net_device *dev = tx_queue->dev;
2515 	struct netdev_queue *txq;
2516 	struct gfar_private *priv = netdev_priv(dev);
2517 	struct txbd8 *bdp, *next = NULL;
2518 	struct txbd8 *lbdp = NULL;
2519 	struct txbd8 *base = tx_queue->tx_bd_base;
2520 	struct sk_buff *skb;
2521 	int skb_dirtytx;
2522 	int tx_ring_size = tx_queue->tx_ring_size;
2523 	int frags = 0, nr_txbds = 0;
2524 	int i;
2525 	int howmany = 0;
2526 	int tqi = tx_queue->qindex;
2527 	unsigned int bytes_sent = 0;
2528 	u32 lstatus;
2529 	size_t buflen;
2530 
2531 	txq = netdev_get_tx_queue(dev, tqi);
2532 	bdp = tx_queue->dirty_tx;
2533 	skb_dirtytx = tx_queue->skb_dirtytx;
2534 
2535 	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2536 		unsigned long flags;
2537 
2538 		frags = skb_shinfo(skb)->nr_frags;
2539 
2540 		/* When time stamping, one additional TxBD must be freed.
2541 		 * Also, we need to dma_unmap_single() the TxPAL.
2542 		 */
2543 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2544 			nr_txbds = frags + 2;
2545 		else
2546 			nr_txbds = frags + 1;
2547 
2548 		lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2549 
2550 		lstatus = lbdp->lstatus;
2551 
2552 		/* Only clean completed frames */
2553 		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2554 		    (lstatus & BD_LENGTH_MASK))
2555 			break;
2556 
2557 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2558 			next = next_txbd(bdp, base, tx_ring_size);
2559 			buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2560 		} else
2561 			buflen = bdp->length;
2562 
2563 		dma_unmap_single(priv->dev, bdp->bufPtr,
2564 				 buflen, DMA_TO_DEVICE);
2565 
2566 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2567 			struct skb_shared_hwtstamps shhwtstamps;
2568 			u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2569 
2570 			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2571 			shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2572 			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2573 			skb_tstamp_tx(skb, &shhwtstamps);
2574 			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2575 			bdp = next;
2576 		}
2577 
2578 		bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2579 		bdp = next_txbd(bdp, base, tx_ring_size);
2580 
2581 		for (i = 0; i < frags; i++) {
2582 			dma_unmap_page(priv->dev, bdp->bufPtr,
2583 				       bdp->length, DMA_TO_DEVICE);
2584 			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2585 			bdp = next_txbd(bdp, base, tx_ring_size);
2586 		}
2587 
2588 		bytes_sent += GFAR_CB(skb)->bytes_sent;
2589 
2590 		dev_kfree_skb_any(skb);
2591 
2592 		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2593 
2594 		skb_dirtytx = (skb_dirtytx + 1) &
2595 			      TX_RING_MOD_MASK(tx_ring_size);
2596 
2597 		howmany++;
2598 		spin_lock_irqsave(&tx_queue->txlock, flags);
2599 		tx_queue->num_txbdfree += nr_txbds;
2600 		spin_unlock_irqrestore(&tx_queue->txlock, flags);
2601 	}
2602 
2603 	/* If we freed a buffer, we can restart transmission, if necessary */
2604 	if (tx_queue->num_txbdfree &&
2605 	    netif_tx_queue_stopped(txq) &&
2606 	    !(test_bit(GFAR_DOWN, &priv->state)))
2607 		netif_wake_subqueue(priv->ndev, tqi);
2608 
2609 	/* Update dirty indicators */
2610 	tx_queue->skb_dirtytx = skb_dirtytx;
2611 	tx_queue->dirty_tx = bdp;
2612 
2613 	netdev_tx_completed_queue(txq, howmany, bytes_sent);
2614 }
2615 
2616 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2617 			   struct sk_buff *skb)
2618 {
2619 	struct net_device *dev = rx_queue->dev;
2620 	struct gfar_private *priv = netdev_priv(dev);
2621 	dma_addr_t buf;
2622 
2623 	buf = dma_map_single(priv->dev, skb->data,
2624 			     priv->rx_buffer_size, DMA_FROM_DEVICE);
2625 	gfar_init_rxbdp(rx_queue, bdp, buf);
2626 }
2627 
2628 static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2629 {
2630 	struct gfar_private *priv = netdev_priv(dev);
2631 	struct sk_buff *skb;
2632 
2633 	skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2634 	if (!skb)
2635 		return NULL;
2636 
2637 	gfar_align_skb(skb);
2638 
2639 	return skb;
2640 }
2641 
2642 struct sk_buff *gfar_new_skb(struct net_device *dev)
2643 {
2644 	return gfar_alloc_skb(dev);
2645 }
2646 
2647 static inline void count_errors(unsigned short status, struct net_device *dev)
2648 {
2649 	struct gfar_private *priv = netdev_priv(dev);
2650 	struct net_device_stats *stats = &dev->stats;
2651 	struct gfar_extra_stats *estats = &priv->extra_stats;
2652 
2653 	/* If the packet was truncated, none of the other errors matter */
2654 	if (status & RXBD_TRUNCATED) {
2655 		stats->rx_length_errors++;
2656 
2657 		atomic64_inc(&estats->rx_trunc);
2658 
2659 		return;
2660 	}
2661 	/* Count the errors, if there were any */
2662 	if (status & (RXBD_LARGE | RXBD_SHORT)) {
2663 		stats->rx_length_errors++;
2664 
2665 		if (status & RXBD_LARGE)
2666 			atomic64_inc(&estats->rx_large);
2667 		else
2668 			atomic64_inc(&estats->rx_short);
2669 	}
2670 	if (status & RXBD_NONOCTET) {
2671 		stats->rx_frame_errors++;
2672 		atomic64_inc(&estats->rx_nonoctet);
2673 	}
2674 	if (status & RXBD_CRCERR) {
2675 		atomic64_inc(&estats->rx_crcerr);
2676 		stats->rx_crc_errors++;
2677 	}
2678 	if (status & RXBD_OVERRUN) {
2679 		atomic64_inc(&estats->rx_overrun);
2680 		stats->rx_crc_errors++;
2681 	}
2682 }
2683 
2684 irqreturn_t gfar_receive(int irq, void *grp_id)
2685 {
2686 	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2687 	unsigned long flags;
2688 	u32 imask;
2689 
2690 	if (likely(napi_schedule_prep(&grp->napi_rx))) {
2691 		spin_lock_irqsave(&grp->grplock, flags);
2692 		imask = gfar_read(&grp->regs->imask);
2693 		imask &= IMASK_RX_DISABLED;
2694 		gfar_write(&grp->regs->imask, imask);
2695 		spin_unlock_irqrestore(&grp->grplock, flags);
2696 		__napi_schedule(&grp->napi_rx);
2697 	} else {
2698 		/* Clear IEVENT, so interrupts aren't called again
2699 		 * because of the packets that have already arrived.
2700 		 */
2701 		gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2702 	}
2703 
2704 	return IRQ_HANDLED;
2705 }
2706 
2707 /* Interrupt Handler for Transmit complete */
2708 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2709 {
2710 	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2711 	unsigned long flags;
2712 	u32 imask;
2713 
2714 	if (likely(napi_schedule_prep(&grp->napi_tx))) {
2715 		spin_lock_irqsave(&grp->grplock, flags);
2716 		imask = gfar_read(&grp->regs->imask);
2717 		imask &= IMASK_TX_DISABLED;
2718 		gfar_write(&grp->regs->imask, imask);
2719 		spin_unlock_irqrestore(&grp->grplock, flags);
2720 		__napi_schedule(&grp->napi_tx);
2721 	} else {
2722 		/* Clear IEVENT, so interrupts aren't called again
2723 		 * because of the packets that have already arrived.
2724 		 */
2725 		gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2726 	}
2727 
2728 	return IRQ_HANDLED;
2729 }
2730 
2731 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2732 {
2733 	/* If valid headers were found, and valid sums
2734 	 * were verified, then we tell the kernel that no
2735 	 * checksumming is necessary.  Otherwise, it is [FIXME]
2736 	 */
2737 	if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2738 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2739 	else
2740 		skb_checksum_none_assert(skb);
2741 }
2742 
2743 
2744 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2745 static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2746 			       int amount_pull, struct napi_struct *napi)
2747 {
2748 	struct gfar_private *priv = netdev_priv(dev);
2749 	struct rxfcb *fcb = NULL;
2750 
2751 	/* fcb is at the beginning if exists */
2752 	fcb = (struct rxfcb *)skb->data;
2753 
2754 	/* Remove the FCB from the skb
2755 	 * Remove the padded bytes, if there are any
2756 	 */
2757 	if (amount_pull) {
2758 		skb_record_rx_queue(skb, fcb->rq);
2759 		skb_pull(skb, amount_pull);
2760 	}
2761 
2762 	/* Get receive timestamp from the skb */
2763 	if (priv->hwts_rx_en) {
2764 		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2765 		u64 *ns = (u64 *) skb->data;
2766 
2767 		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2768 		shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2769 	}
2770 
2771 	if (priv->padding)
2772 		skb_pull(skb, priv->padding);
2773 
2774 	if (dev->features & NETIF_F_RXCSUM)
2775 		gfar_rx_checksum(skb, fcb);
2776 
2777 	/* Tell the skb what kind of packet this is */
2778 	skb->protocol = eth_type_trans(skb, dev);
2779 
2780 	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2781 	 * Even if vlan rx accel is disabled, on some chips
2782 	 * RXFCB_VLN is pseudo randomly set.
2783 	 */
2784 	if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2785 	    fcb->flags & RXFCB_VLN)
2786 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl);
2787 
2788 	/* Send the packet up the stack */
2789 	napi_gro_receive(napi, skb);
2790 
2791 }
2792 
2793 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2794  * until the budget/quota has been reached. Returns the number
2795  * of frames handled
2796  */
2797 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2798 {
2799 	struct net_device *dev = rx_queue->dev;
2800 	struct rxbd8 *bdp, *base;
2801 	struct sk_buff *skb;
2802 	int pkt_len;
2803 	int amount_pull;
2804 	int howmany = 0;
2805 	struct gfar_private *priv = netdev_priv(dev);
2806 
2807 	/* Get the first full descriptor */
2808 	bdp = rx_queue->cur_rx;
2809 	base = rx_queue->rx_bd_base;
2810 
2811 	amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
2812 
2813 	while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2814 		struct sk_buff *newskb;
2815 
2816 		rmb();
2817 
2818 		/* Add another skb for the future */
2819 		newskb = gfar_new_skb(dev);
2820 
2821 		skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2822 
2823 		dma_unmap_single(priv->dev, bdp->bufPtr,
2824 				 priv->rx_buffer_size, DMA_FROM_DEVICE);
2825 
2826 		if (unlikely(!(bdp->status & RXBD_ERR) &&
2827 			     bdp->length > priv->rx_buffer_size))
2828 			bdp->status = RXBD_LARGE;
2829 
2830 		/* We drop the frame if we failed to allocate a new buffer */
2831 		if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2832 			     bdp->status & RXBD_ERR)) {
2833 			count_errors(bdp->status, dev);
2834 
2835 			if (unlikely(!newskb))
2836 				newskb = skb;
2837 			else if (skb)
2838 				dev_kfree_skb(skb);
2839 		} else {
2840 			/* Increment the number of packets */
2841 			rx_queue->stats.rx_packets++;
2842 			howmany++;
2843 
2844 			if (likely(skb)) {
2845 				pkt_len = bdp->length - ETH_FCS_LEN;
2846 				/* Remove the FCS from the packet length */
2847 				skb_put(skb, pkt_len);
2848 				rx_queue->stats.rx_bytes += pkt_len;
2849 				skb_record_rx_queue(skb, rx_queue->qindex);
2850 				gfar_process_frame(dev, skb, amount_pull,
2851 						   &rx_queue->grp->napi_rx);
2852 
2853 			} else {
2854 				netif_warn(priv, rx_err, dev, "Missing skb!\n");
2855 				rx_queue->stats.rx_dropped++;
2856 				atomic64_inc(&priv->extra_stats.rx_skbmissing);
2857 			}
2858 
2859 		}
2860 
2861 		rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2862 
2863 		/* Setup the new bdp */
2864 		gfar_new_rxbdp(rx_queue, bdp, newskb);
2865 
2866 		/* Update to the next pointer */
2867 		bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2868 
2869 		/* update to point at the next skb */
2870 		rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2871 				      RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2872 	}
2873 
2874 	/* Update the current rxbd pointer to be the next one */
2875 	rx_queue->cur_rx = bdp;
2876 
2877 	return howmany;
2878 }
2879 
2880 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2881 {
2882 	struct gfar_priv_grp *gfargrp =
2883 		container_of(napi, struct gfar_priv_grp, napi_rx);
2884 	struct gfar __iomem *regs = gfargrp->regs;
2885 	struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2886 	int work_done = 0;
2887 
2888 	/* Clear IEVENT, so interrupts aren't called again
2889 	 * because of the packets that have already arrived
2890 	 */
2891 	gfar_write(&regs->ievent, IEVENT_RX_MASK);
2892 
2893 	work_done = gfar_clean_rx_ring(rx_queue, budget);
2894 
2895 	if (work_done < budget) {
2896 		u32 imask;
2897 		napi_complete(napi);
2898 		/* Clear the halt bit in RSTAT */
2899 		gfar_write(&regs->rstat, gfargrp->rstat);
2900 
2901 		spin_lock_irq(&gfargrp->grplock);
2902 		imask = gfar_read(&regs->imask);
2903 		imask |= IMASK_RX_DEFAULT;
2904 		gfar_write(&regs->imask, imask);
2905 		spin_unlock_irq(&gfargrp->grplock);
2906 	}
2907 
2908 	return work_done;
2909 }
2910 
2911 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2912 {
2913 	struct gfar_priv_grp *gfargrp =
2914 		container_of(napi, struct gfar_priv_grp, napi_tx);
2915 	struct gfar __iomem *regs = gfargrp->regs;
2916 	struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2917 	u32 imask;
2918 
2919 	/* Clear IEVENT, so interrupts aren't called again
2920 	 * because of the packets that have already arrived
2921 	 */
2922 	gfar_write(&regs->ievent, IEVENT_TX_MASK);
2923 
2924 	/* run Tx cleanup to completion */
2925 	if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2926 		gfar_clean_tx_ring(tx_queue);
2927 
2928 	napi_complete(napi);
2929 
2930 	spin_lock_irq(&gfargrp->grplock);
2931 	imask = gfar_read(&regs->imask);
2932 	imask |= IMASK_TX_DEFAULT;
2933 	gfar_write(&regs->imask, imask);
2934 	spin_unlock_irq(&gfargrp->grplock);
2935 
2936 	return 0;
2937 }
2938 
2939 static int gfar_poll_rx(struct napi_struct *napi, int budget)
2940 {
2941 	struct gfar_priv_grp *gfargrp =
2942 		container_of(napi, struct gfar_priv_grp, napi_rx);
2943 	struct gfar_private *priv = gfargrp->priv;
2944 	struct gfar __iomem *regs = gfargrp->regs;
2945 	struct gfar_priv_rx_q *rx_queue = NULL;
2946 	int work_done = 0, work_done_per_q = 0;
2947 	int i, budget_per_q = 0;
2948 	unsigned long rstat_rxf;
2949 	int num_act_queues;
2950 
2951 	/* Clear IEVENT, so interrupts aren't called again
2952 	 * because of the packets that have already arrived
2953 	 */
2954 	gfar_write(&regs->ievent, IEVENT_RX_MASK);
2955 
2956 	rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
2957 
2958 	num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
2959 	if (num_act_queues)
2960 		budget_per_q = budget/num_act_queues;
2961 
2962 	for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2963 		/* skip queue if not active */
2964 		if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
2965 			continue;
2966 
2967 		rx_queue = priv->rx_queue[i];
2968 		work_done_per_q =
2969 			gfar_clean_rx_ring(rx_queue, budget_per_q);
2970 		work_done += work_done_per_q;
2971 
2972 		/* finished processing this queue */
2973 		if (work_done_per_q < budget_per_q) {
2974 			/* clear active queue hw indication */
2975 			gfar_write(&regs->rstat,
2976 				   RSTAT_CLEAR_RXF0 >> i);
2977 			num_act_queues--;
2978 
2979 			if (!num_act_queues)
2980 				break;
2981 		}
2982 	}
2983 
2984 	if (!num_act_queues) {
2985 		u32 imask;
2986 		napi_complete(napi);
2987 
2988 		/* Clear the halt bit in RSTAT */
2989 		gfar_write(&regs->rstat, gfargrp->rstat);
2990 
2991 		spin_lock_irq(&gfargrp->grplock);
2992 		imask = gfar_read(&regs->imask);
2993 		imask |= IMASK_RX_DEFAULT;
2994 		gfar_write(&regs->imask, imask);
2995 		spin_unlock_irq(&gfargrp->grplock);
2996 	}
2997 
2998 	return work_done;
2999 }
3000 
3001 static int gfar_poll_tx(struct napi_struct *napi, int budget)
3002 {
3003 	struct gfar_priv_grp *gfargrp =
3004 		container_of(napi, struct gfar_priv_grp, napi_tx);
3005 	struct gfar_private *priv = gfargrp->priv;
3006 	struct gfar __iomem *regs = gfargrp->regs;
3007 	struct gfar_priv_tx_q *tx_queue = NULL;
3008 	int has_tx_work = 0;
3009 	int i;
3010 
3011 	/* Clear IEVENT, so interrupts aren't called again
3012 	 * because of the packets that have already arrived
3013 	 */
3014 	gfar_write(&regs->ievent, IEVENT_TX_MASK);
3015 
3016 	for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
3017 		tx_queue = priv->tx_queue[i];
3018 		/* run Tx cleanup to completion */
3019 		if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3020 			gfar_clean_tx_ring(tx_queue);
3021 			has_tx_work = 1;
3022 		}
3023 	}
3024 
3025 	if (!has_tx_work) {
3026 		u32 imask;
3027 		napi_complete(napi);
3028 
3029 		spin_lock_irq(&gfargrp->grplock);
3030 		imask = gfar_read(&regs->imask);
3031 		imask |= IMASK_TX_DEFAULT;
3032 		gfar_write(&regs->imask, imask);
3033 		spin_unlock_irq(&gfargrp->grplock);
3034 	}
3035 
3036 	return 0;
3037 }
3038 
3039 
3040 #ifdef CONFIG_NET_POLL_CONTROLLER
3041 /* Polling 'interrupt' - used by things like netconsole to send skbs
3042  * without having to re-enable interrupts. It's not called while
3043  * the interrupt routine is executing.
3044  */
3045 static void gfar_netpoll(struct net_device *dev)
3046 {
3047 	struct gfar_private *priv = netdev_priv(dev);
3048 	int i;
3049 
3050 	/* If the device has multiple interrupts, run tx/rx */
3051 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3052 		for (i = 0; i < priv->num_grps; i++) {
3053 			struct gfar_priv_grp *grp = &priv->gfargrp[i];
3054 
3055 			disable_irq(gfar_irq(grp, TX)->irq);
3056 			disable_irq(gfar_irq(grp, RX)->irq);
3057 			disable_irq(gfar_irq(grp, ER)->irq);
3058 			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3059 			enable_irq(gfar_irq(grp, ER)->irq);
3060 			enable_irq(gfar_irq(grp, RX)->irq);
3061 			enable_irq(gfar_irq(grp, TX)->irq);
3062 		}
3063 	} else {
3064 		for (i = 0; i < priv->num_grps; i++) {
3065 			struct gfar_priv_grp *grp = &priv->gfargrp[i];
3066 
3067 			disable_irq(gfar_irq(grp, TX)->irq);
3068 			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3069 			enable_irq(gfar_irq(grp, TX)->irq);
3070 		}
3071 	}
3072 }
3073 #endif
3074 
3075 /* The interrupt handler for devices with one interrupt */
3076 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
3077 {
3078 	struct gfar_priv_grp *gfargrp = grp_id;
3079 
3080 	/* Save ievent for future reference */
3081 	u32 events = gfar_read(&gfargrp->regs->ievent);
3082 
3083 	/* Check for reception */
3084 	if (events & IEVENT_RX_MASK)
3085 		gfar_receive(irq, grp_id);
3086 
3087 	/* Check for transmit completion */
3088 	if (events & IEVENT_TX_MASK)
3089 		gfar_transmit(irq, grp_id);
3090 
3091 	/* Check for errors */
3092 	if (events & IEVENT_ERR_MASK)
3093 		gfar_error(irq, grp_id);
3094 
3095 	return IRQ_HANDLED;
3096 }
3097 
3098 /* Called every time the controller might need to be made
3099  * aware of new link state.  The PHY code conveys this
3100  * information through variables in the phydev structure, and this
3101  * function converts those variables into the appropriate
3102  * register values, and can bring down the device if needed.
3103  */
3104 static void adjust_link(struct net_device *dev)
3105 {
3106 	struct gfar_private *priv = netdev_priv(dev);
3107 	struct phy_device *phydev = priv->phydev;
3108 
3109 	if (unlikely(phydev->link != priv->oldlink ||
3110 		     phydev->duplex != priv->oldduplex ||
3111 		     phydev->speed != priv->oldspeed))
3112 		gfar_update_link_state(priv);
3113 }
3114 
3115 /* Update the hash table based on the current list of multicast
3116  * addresses we subscribe to.  Also, change the promiscuity of
3117  * the device based on the flags (this function is called
3118  * whenever dev->flags is changed
3119  */
3120 static void gfar_set_multi(struct net_device *dev)
3121 {
3122 	struct netdev_hw_addr *ha;
3123 	struct gfar_private *priv = netdev_priv(dev);
3124 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3125 	u32 tempval;
3126 
3127 	if (dev->flags & IFF_PROMISC) {
3128 		/* Set RCTRL to PROM */
3129 		tempval = gfar_read(&regs->rctrl);
3130 		tempval |= RCTRL_PROM;
3131 		gfar_write(&regs->rctrl, tempval);
3132 	} else {
3133 		/* Set RCTRL to not PROM */
3134 		tempval = gfar_read(&regs->rctrl);
3135 		tempval &= ~(RCTRL_PROM);
3136 		gfar_write(&regs->rctrl, tempval);
3137 	}
3138 
3139 	if (dev->flags & IFF_ALLMULTI) {
3140 		/* Set the hash to rx all multicast frames */
3141 		gfar_write(&regs->igaddr0, 0xffffffff);
3142 		gfar_write(&regs->igaddr1, 0xffffffff);
3143 		gfar_write(&regs->igaddr2, 0xffffffff);
3144 		gfar_write(&regs->igaddr3, 0xffffffff);
3145 		gfar_write(&regs->igaddr4, 0xffffffff);
3146 		gfar_write(&regs->igaddr5, 0xffffffff);
3147 		gfar_write(&regs->igaddr6, 0xffffffff);
3148 		gfar_write(&regs->igaddr7, 0xffffffff);
3149 		gfar_write(&regs->gaddr0, 0xffffffff);
3150 		gfar_write(&regs->gaddr1, 0xffffffff);
3151 		gfar_write(&regs->gaddr2, 0xffffffff);
3152 		gfar_write(&regs->gaddr3, 0xffffffff);
3153 		gfar_write(&regs->gaddr4, 0xffffffff);
3154 		gfar_write(&regs->gaddr5, 0xffffffff);
3155 		gfar_write(&regs->gaddr6, 0xffffffff);
3156 		gfar_write(&regs->gaddr7, 0xffffffff);
3157 	} else {
3158 		int em_num;
3159 		int idx;
3160 
3161 		/* zero out the hash */
3162 		gfar_write(&regs->igaddr0, 0x0);
3163 		gfar_write(&regs->igaddr1, 0x0);
3164 		gfar_write(&regs->igaddr2, 0x0);
3165 		gfar_write(&regs->igaddr3, 0x0);
3166 		gfar_write(&regs->igaddr4, 0x0);
3167 		gfar_write(&regs->igaddr5, 0x0);
3168 		gfar_write(&regs->igaddr6, 0x0);
3169 		gfar_write(&regs->igaddr7, 0x0);
3170 		gfar_write(&regs->gaddr0, 0x0);
3171 		gfar_write(&regs->gaddr1, 0x0);
3172 		gfar_write(&regs->gaddr2, 0x0);
3173 		gfar_write(&regs->gaddr3, 0x0);
3174 		gfar_write(&regs->gaddr4, 0x0);
3175 		gfar_write(&regs->gaddr5, 0x0);
3176 		gfar_write(&regs->gaddr6, 0x0);
3177 		gfar_write(&regs->gaddr7, 0x0);
3178 
3179 		/* If we have extended hash tables, we need to
3180 		 * clear the exact match registers to prepare for
3181 		 * setting them
3182 		 */
3183 		if (priv->extended_hash) {
3184 			em_num = GFAR_EM_NUM + 1;
3185 			gfar_clear_exact_match(dev);
3186 			idx = 1;
3187 		} else {
3188 			idx = 0;
3189 			em_num = 0;
3190 		}
3191 
3192 		if (netdev_mc_empty(dev))
3193 			return;
3194 
3195 		/* Parse the list, and set the appropriate bits */
3196 		netdev_for_each_mc_addr(ha, dev) {
3197 			if (idx < em_num) {
3198 				gfar_set_mac_for_addr(dev, idx, ha->addr);
3199 				idx++;
3200 			} else
3201 				gfar_set_hash_for_addr(dev, ha->addr);
3202 		}
3203 	}
3204 }
3205 
3206 
3207 /* Clears each of the exact match registers to zero, so they
3208  * don't interfere with normal reception
3209  */
3210 static void gfar_clear_exact_match(struct net_device *dev)
3211 {
3212 	int idx;
3213 	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3214 
3215 	for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3216 		gfar_set_mac_for_addr(dev, idx, zero_arr);
3217 }
3218 
3219 /* Set the appropriate hash bit for the given addr */
3220 /* The algorithm works like so:
3221  * 1) Take the Destination Address (ie the multicast address), and
3222  * do a CRC on it (little endian), and reverse the bits of the
3223  * result.
3224  * 2) Use the 8 most significant bits as a hash into a 256-entry
3225  * table.  The table is controlled through 8 32-bit registers:
3226  * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
3227  * gaddr7.  This means that the 3 most significant bits in the
3228  * hash index which gaddr register to use, and the 5 other bits
3229  * indicate which bit (assuming an IBM numbering scheme, which
3230  * for PowerPC (tm) is usually the case) in the register holds
3231  * the entry.
3232  */
3233 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3234 {
3235 	u32 tempval;
3236 	struct gfar_private *priv = netdev_priv(dev);
3237 	u32 result = ether_crc(ETH_ALEN, addr);
3238 	int width = priv->hash_width;
3239 	u8 whichbit = (result >> (32 - width)) & 0x1f;
3240 	u8 whichreg = result >> (32 - width + 5);
3241 	u32 value = (1 << (31-whichbit));
3242 
3243 	tempval = gfar_read(priv->hash_regs[whichreg]);
3244 	tempval |= value;
3245 	gfar_write(priv->hash_regs[whichreg], tempval);
3246 }
3247 
3248 
3249 /* There are multiple MAC Address register pairs on some controllers
3250  * This function sets the numth pair to a given address
3251  */
3252 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3253 				  const u8 *addr)
3254 {
3255 	struct gfar_private *priv = netdev_priv(dev);
3256 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3257 	u32 tempval;
3258 	u32 __iomem *macptr = &regs->macstnaddr1;
3259 
3260 	macptr += num*2;
3261 
3262 	/* For a station address of 0x12345678ABCD in transmission
3263 	 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
3264 	 * MACnADDR2 is set to 0x34120000.
3265 	 */
3266 	tempval = (addr[5] << 24) | (addr[4] << 16) |
3267 		  (addr[3] << 8)  |  addr[2];
3268 
3269 	gfar_write(macptr, tempval);
3270 
3271 	tempval = (addr[1] << 24) | (addr[0] << 16);
3272 
3273 	gfar_write(macptr+1, tempval);
3274 }
3275 
3276 /* GFAR error interrupt handler */
3277 static irqreturn_t gfar_error(int irq, void *grp_id)
3278 {
3279 	struct gfar_priv_grp *gfargrp = grp_id;
3280 	struct gfar __iomem *regs = gfargrp->regs;
3281 	struct gfar_private *priv= gfargrp->priv;
3282 	struct net_device *dev = priv->ndev;
3283 
3284 	/* Save ievent for future reference */
3285 	u32 events = gfar_read(&regs->ievent);
3286 
3287 	/* Clear IEVENT */
3288 	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
3289 
3290 	/* Magic Packet is not an error. */
3291 	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3292 	    (events & IEVENT_MAG))
3293 		events &= ~IEVENT_MAG;
3294 
3295 	/* Hmm... */
3296 	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3297 		netdev_dbg(dev,
3298 			   "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3299 			   events, gfar_read(&regs->imask));
3300 
3301 	/* Update the error counters */
3302 	if (events & IEVENT_TXE) {
3303 		dev->stats.tx_errors++;
3304 
3305 		if (events & IEVENT_LC)
3306 			dev->stats.tx_window_errors++;
3307 		if (events & IEVENT_CRL)
3308 			dev->stats.tx_aborted_errors++;
3309 		if (events & IEVENT_XFUN) {
3310 			unsigned long flags;
3311 
3312 			netif_dbg(priv, tx_err, dev,
3313 				  "TX FIFO underrun, packet dropped\n");
3314 			dev->stats.tx_dropped++;
3315 			atomic64_inc(&priv->extra_stats.tx_underrun);
3316 
3317 			local_irq_save(flags);
3318 			lock_tx_qs(priv);
3319 
3320 			/* Reactivate the Tx Queues */
3321 			gfar_write(&regs->tstat, gfargrp->tstat);
3322 
3323 			unlock_tx_qs(priv);
3324 			local_irq_restore(flags);
3325 		}
3326 		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3327 	}
3328 	if (events & IEVENT_BSY) {
3329 		dev->stats.rx_errors++;
3330 		atomic64_inc(&priv->extra_stats.rx_bsy);
3331 
3332 		gfar_receive(irq, grp_id);
3333 
3334 		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3335 			  gfar_read(&regs->rstat));
3336 	}
3337 	if (events & IEVENT_BABR) {
3338 		dev->stats.rx_errors++;
3339 		atomic64_inc(&priv->extra_stats.rx_babr);
3340 
3341 		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
3342 	}
3343 	if (events & IEVENT_EBERR) {
3344 		atomic64_inc(&priv->extra_stats.eberr);
3345 		netif_dbg(priv, rx_err, dev, "bus error\n");
3346 	}
3347 	if (events & IEVENT_RXC)
3348 		netif_dbg(priv, rx_status, dev, "control frame\n");
3349 
3350 	if (events & IEVENT_BABT) {
3351 		atomic64_inc(&priv->extra_stats.tx_babt);
3352 		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
3353 	}
3354 	return IRQ_HANDLED;
3355 }
3356 
3357 static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3358 {
3359 	struct phy_device *phydev = priv->phydev;
3360 	u32 val = 0;
3361 
3362 	if (!phydev->duplex)
3363 		return val;
3364 
3365 	if (!priv->pause_aneg_en) {
3366 		if (priv->tx_pause_en)
3367 			val |= MACCFG1_TX_FLOW;
3368 		if (priv->rx_pause_en)
3369 			val |= MACCFG1_RX_FLOW;
3370 	} else {
3371 		u16 lcl_adv, rmt_adv;
3372 		u8 flowctrl;
3373 		/* get link partner capabilities */
3374 		rmt_adv = 0;
3375 		if (phydev->pause)
3376 			rmt_adv = LPA_PAUSE_CAP;
3377 		if (phydev->asym_pause)
3378 			rmt_adv |= LPA_PAUSE_ASYM;
3379 
3380 		lcl_adv = mii_advertise_flowctrl(phydev->advertising);
3381 
3382 		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3383 		if (flowctrl & FLOW_CTRL_TX)
3384 			val |= MACCFG1_TX_FLOW;
3385 		if (flowctrl & FLOW_CTRL_RX)
3386 			val |= MACCFG1_RX_FLOW;
3387 	}
3388 
3389 	return val;
3390 }
3391 
3392 static noinline void gfar_update_link_state(struct gfar_private *priv)
3393 {
3394 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3395 	struct phy_device *phydev = priv->phydev;
3396 
3397 	if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3398 		return;
3399 
3400 	if (phydev->link) {
3401 		u32 tempval1 = gfar_read(&regs->maccfg1);
3402 		u32 tempval = gfar_read(&regs->maccfg2);
3403 		u32 ecntrl = gfar_read(&regs->ecntrl);
3404 
3405 		if (phydev->duplex != priv->oldduplex) {
3406 			if (!(phydev->duplex))
3407 				tempval &= ~(MACCFG2_FULL_DUPLEX);
3408 			else
3409 				tempval |= MACCFG2_FULL_DUPLEX;
3410 
3411 			priv->oldduplex = phydev->duplex;
3412 		}
3413 
3414 		if (phydev->speed != priv->oldspeed) {
3415 			switch (phydev->speed) {
3416 			case 1000:
3417 				tempval =
3418 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3419 
3420 				ecntrl &= ~(ECNTRL_R100);
3421 				break;
3422 			case 100:
3423 			case 10:
3424 				tempval =
3425 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3426 
3427 				/* Reduced mode distinguishes
3428 				 * between 10 and 100
3429 				 */
3430 				if (phydev->speed == SPEED_100)
3431 					ecntrl |= ECNTRL_R100;
3432 				else
3433 					ecntrl &= ~(ECNTRL_R100);
3434 				break;
3435 			default:
3436 				netif_warn(priv, link, priv->ndev,
3437 					   "Ack!  Speed (%d) is not 10/100/1000!\n",
3438 					   phydev->speed);
3439 				break;
3440 			}
3441 
3442 			priv->oldspeed = phydev->speed;
3443 		}
3444 
3445 		tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3446 		tempval1 |= gfar_get_flowctrl_cfg(priv);
3447 
3448 		gfar_write(&regs->maccfg1, tempval1);
3449 		gfar_write(&regs->maccfg2, tempval);
3450 		gfar_write(&regs->ecntrl, ecntrl);
3451 
3452 		if (!priv->oldlink)
3453 			priv->oldlink = 1;
3454 
3455 	} else if (priv->oldlink) {
3456 		priv->oldlink = 0;
3457 		priv->oldspeed = 0;
3458 		priv->oldduplex = -1;
3459 	}
3460 
3461 	if (netif_msg_link(priv))
3462 		phy_print_status(phydev);
3463 }
3464 
3465 static struct of_device_id gfar_match[] =
3466 {
3467 	{
3468 		.type = "network",
3469 		.compatible = "gianfar",
3470 	},
3471 	{
3472 		.compatible = "fsl,etsec2",
3473 	},
3474 	{},
3475 };
3476 MODULE_DEVICE_TABLE(of, gfar_match);
3477 
3478 /* Structure for a device driver */
3479 static struct platform_driver gfar_driver = {
3480 	.driver = {
3481 		.name = "fsl-gianfar",
3482 		.owner = THIS_MODULE,
3483 		.pm = GFAR_PM_OPS,
3484 		.of_match_table = gfar_match,
3485 	},
3486 	.probe = gfar_probe,
3487 	.remove = gfar_remove,
3488 };
3489 
3490 module_platform_driver(gfar_driver);
3491