1 /*
2  * Cadence MACB/GEM Ethernet Controller driver
3  *
4  * Copyright (C) 2004-2006 Atmel Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/clk.h>
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/circ_buf.h>
18 #include <linux/slab.h>
19 #include <linux/init.h>
20 #include <linux/io.h>
21 #include <linux/gpio.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/interrupt.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/platform_data/macb.h>
28 #include <linux/platform_device.h>
29 #include <linux/phy.h>
30 #include <linux/of.h>
31 #include <linux/of_device.h>
32 #include <linux/of_gpio.h>
33 #include <linux/of_mdio.h>
34 #include <linux/of_net.h>
35 #include <linux/ip.h>
36 #include <linux/udp.h>
37 #include <linux/tcp.h>
38 #include "macb.h"
39 
40 #define MACB_RX_BUFFER_SIZE	128
41 #define RX_BUFFER_MULTIPLE	64  /* bytes */
42 
43 #define DEFAULT_RX_RING_SIZE	512 /* must be power of 2 */
44 #define MIN_RX_RING_SIZE	64
45 #define MAX_RX_RING_SIZE	8192
46 #define RX_RING_BYTES(bp)	(macb_dma_desc_get_size(bp)	\
47 				 * (bp)->rx_ring_size)
48 
49 #define DEFAULT_TX_RING_SIZE	512 /* must be power of 2 */
50 #define MIN_TX_RING_SIZE	64
51 #define MAX_TX_RING_SIZE	4096
52 #define TX_RING_BYTES(bp)	(macb_dma_desc_get_size(bp)	\
53 				 * (bp)->tx_ring_size)
54 
55 /* level of occupied TX descriptors under which we wake up TX process */
56 #define MACB_TX_WAKEUP_THRESH(bp)	(3 * (bp)->tx_ring_size / 4)
57 
58 #define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
59 				 | MACB_BIT(ISR_ROVR))
60 #define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
61 					| MACB_BIT(ISR_RLE)		\
62 					| MACB_BIT(TXERR))
63 #define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
64 
65 /* Max length of transmit frame must be a multiple of 8 bytes */
66 #define MACB_TX_LEN_ALIGN	8
67 #define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
68 #define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
69 
70 #define GEM_MTU_MIN_SIZE	ETH_MIN_MTU
71 #define MACB_NETIF_LSO		NETIF_F_TSO
72 
73 #define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
74 #define MACB_WOL_ENABLED		(0x1 << 1)
75 
76 /* Graceful stop timeouts in us. We should allow up to
77  * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
78  */
79 #define MACB_HALT_TIMEOUT	1230
80 
81 /* DMA buffer descriptor might be different size
82  * depends on hardware configuration:
83  *
84  * 1. dma address width 32 bits:
85  *    word 1: 32 bit address of Data Buffer
86  *    word 2: control
87  *
88  * 2. dma address width 64 bits:
89  *    word 1: 32 bit address of Data Buffer
90  *    word 2: control
91  *    word 3: upper 32 bit address of Data Buffer
92  *    word 4: unused
93  *
94  * 3. dma address width 32 bits with hardware timestamping:
95  *    word 1: 32 bit address of Data Buffer
96  *    word 2: control
97  *    word 3: timestamp word 1
98  *    word 4: timestamp word 2
99  *
100  * 4. dma address width 64 bits with hardware timestamping:
101  *    word 1: 32 bit address of Data Buffer
102  *    word 2: control
103  *    word 3: upper 32 bit address of Data Buffer
104  *    word 4: unused
105  *    word 5: timestamp word 1
106  *    word 6: timestamp word 2
107  */
108 static unsigned int macb_dma_desc_get_size(struct macb *bp)
109 {
110 #ifdef MACB_EXT_DESC
111 	unsigned int desc_size;
112 
113 	switch (bp->hw_dma_cap) {
114 	case HW_DMA_CAP_64B:
115 		desc_size = sizeof(struct macb_dma_desc)
116 			+ sizeof(struct macb_dma_desc_64);
117 		break;
118 	case HW_DMA_CAP_PTP:
119 		desc_size = sizeof(struct macb_dma_desc)
120 			+ sizeof(struct macb_dma_desc_ptp);
121 		break;
122 	case HW_DMA_CAP_64B_PTP:
123 		desc_size = sizeof(struct macb_dma_desc)
124 			+ sizeof(struct macb_dma_desc_64)
125 			+ sizeof(struct macb_dma_desc_ptp);
126 		break;
127 	default:
128 		desc_size = sizeof(struct macb_dma_desc);
129 	}
130 	return desc_size;
131 #endif
132 	return sizeof(struct macb_dma_desc);
133 }
134 
135 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
136 {
137 #ifdef MACB_EXT_DESC
138 	switch (bp->hw_dma_cap) {
139 	case HW_DMA_CAP_64B:
140 	case HW_DMA_CAP_PTP:
141 		desc_idx <<= 1;
142 		break;
143 	case HW_DMA_CAP_64B_PTP:
144 		desc_idx *= 3;
145 		break;
146 	default:
147 		break;
148 	}
149 #endif
150 	return desc_idx;
151 }
152 
153 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
154 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
155 {
156 	if (bp->hw_dma_cap & HW_DMA_CAP_64B)
157 		return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
158 	return NULL;
159 }
160 #endif
161 
162 /* Ring buffer accessors */
163 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
164 {
165 	return index & (bp->tx_ring_size - 1);
166 }
167 
168 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
169 					  unsigned int index)
170 {
171 	index = macb_tx_ring_wrap(queue->bp, index);
172 	index = macb_adj_dma_desc_idx(queue->bp, index);
173 	return &queue->tx_ring[index];
174 }
175 
176 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
177 				       unsigned int index)
178 {
179 	return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
180 }
181 
182 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
183 {
184 	dma_addr_t offset;
185 
186 	offset = macb_tx_ring_wrap(queue->bp, index) *
187 			macb_dma_desc_get_size(queue->bp);
188 
189 	return queue->tx_ring_dma + offset;
190 }
191 
192 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
193 {
194 	return index & (bp->rx_ring_size - 1);
195 }
196 
197 static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
198 {
199 	index = macb_rx_ring_wrap(queue->bp, index);
200 	index = macb_adj_dma_desc_idx(queue->bp, index);
201 	return &queue->rx_ring[index];
202 }
203 
204 static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
205 {
206 	return queue->rx_buffers + queue->bp->rx_buffer_size *
207 	       macb_rx_ring_wrap(queue->bp, index);
208 }
209 
210 /* I/O accessors */
211 static u32 hw_readl_native(struct macb *bp, int offset)
212 {
213 	return __raw_readl(bp->regs + offset);
214 }
215 
216 static void hw_writel_native(struct macb *bp, int offset, u32 value)
217 {
218 	__raw_writel(value, bp->regs + offset);
219 }
220 
221 static u32 hw_readl(struct macb *bp, int offset)
222 {
223 	return readl_relaxed(bp->regs + offset);
224 }
225 
226 static void hw_writel(struct macb *bp, int offset, u32 value)
227 {
228 	writel_relaxed(value, bp->regs + offset);
229 }
230 
231 /* Find the CPU endianness by using the loopback bit of NCR register. When the
232  * CPU is in big endian we need to program swapped mode for management
233  * descriptor access.
234  */
235 static bool hw_is_native_io(void __iomem *addr)
236 {
237 	u32 value = MACB_BIT(LLB);
238 
239 	__raw_writel(value, addr + MACB_NCR);
240 	value = __raw_readl(addr + MACB_NCR);
241 
242 	/* Write 0 back to disable everything */
243 	__raw_writel(0, addr + MACB_NCR);
244 
245 	return value == MACB_BIT(LLB);
246 }
247 
248 static bool hw_is_gem(void __iomem *addr, bool native_io)
249 {
250 	u32 id;
251 
252 	if (native_io)
253 		id = __raw_readl(addr + MACB_MID);
254 	else
255 		id = readl_relaxed(addr + MACB_MID);
256 
257 	return MACB_BFEXT(IDNUM, id) >= 0x2;
258 }
259 
260 static void macb_set_hwaddr(struct macb *bp)
261 {
262 	u32 bottom;
263 	u16 top;
264 
265 	bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
266 	macb_or_gem_writel(bp, SA1B, bottom);
267 	top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
268 	macb_or_gem_writel(bp, SA1T, top);
269 
270 	/* Clear unused address register sets */
271 	macb_or_gem_writel(bp, SA2B, 0);
272 	macb_or_gem_writel(bp, SA2T, 0);
273 	macb_or_gem_writel(bp, SA3B, 0);
274 	macb_or_gem_writel(bp, SA3T, 0);
275 	macb_or_gem_writel(bp, SA4B, 0);
276 	macb_or_gem_writel(bp, SA4T, 0);
277 }
278 
279 static void macb_get_hwaddr(struct macb *bp)
280 {
281 	struct macb_platform_data *pdata;
282 	u32 bottom;
283 	u16 top;
284 	u8 addr[6];
285 	int i;
286 
287 	pdata = dev_get_platdata(&bp->pdev->dev);
288 
289 	/* Check all 4 address register for valid address */
290 	for (i = 0; i < 4; i++) {
291 		bottom = macb_or_gem_readl(bp, SA1B + i * 8);
292 		top = macb_or_gem_readl(bp, SA1T + i * 8);
293 
294 		if (pdata && pdata->rev_eth_addr) {
295 			addr[5] = bottom & 0xff;
296 			addr[4] = (bottom >> 8) & 0xff;
297 			addr[3] = (bottom >> 16) & 0xff;
298 			addr[2] = (bottom >> 24) & 0xff;
299 			addr[1] = top & 0xff;
300 			addr[0] = (top & 0xff00) >> 8;
301 		} else {
302 			addr[0] = bottom & 0xff;
303 			addr[1] = (bottom >> 8) & 0xff;
304 			addr[2] = (bottom >> 16) & 0xff;
305 			addr[3] = (bottom >> 24) & 0xff;
306 			addr[4] = top & 0xff;
307 			addr[5] = (top >> 8) & 0xff;
308 		}
309 
310 		if (is_valid_ether_addr(addr)) {
311 			memcpy(bp->dev->dev_addr, addr, sizeof(addr));
312 			return;
313 		}
314 	}
315 
316 	dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
317 	eth_hw_addr_random(bp->dev);
318 }
319 
320 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
321 {
322 	struct macb *bp = bus->priv;
323 	int value;
324 
325 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
326 			      | MACB_BF(RW, MACB_MAN_READ)
327 			      | MACB_BF(PHYA, mii_id)
328 			      | MACB_BF(REGA, regnum)
329 			      | MACB_BF(CODE, MACB_MAN_CODE)));
330 
331 	/* wait for end of transfer */
332 	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
333 		cpu_relax();
334 
335 	value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
336 
337 	return value;
338 }
339 
340 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
341 			   u16 value)
342 {
343 	struct macb *bp = bus->priv;
344 
345 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
346 			      | MACB_BF(RW, MACB_MAN_WRITE)
347 			      | MACB_BF(PHYA, mii_id)
348 			      | MACB_BF(REGA, regnum)
349 			      | MACB_BF(CODE, MACB_MAN_CODE)
350 			      | MACB_BF(DATA, value)));
351 
352 	/* wait for end of transfer */
353 	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
354 		cpu_relax();
355 
356 	return 0;
357 }
358 
359 /**
360  * macb_set_tx_clk() - Set a clock to a new frequency
361  * @clk		Pointer to the clock to change
362  * @rate	New frequency in Hz
363  * @dev		Pointer to the struct net_device
364  */
365 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
366 {
367 	long ferr, rate, rate_rounded;
368 
369 	if (!clk)
370 		return;
371 
372 	switch (speed) {
373 	case SPEED_10:
374 		rate = 2500000;
375 		break;
376 	case SPEED_100:
377 		rate = 25000000;
378 		break;
379 	case SPEED_1000:
380 		rate = 125000000;
381 		break;
382 	default:
383 		return;
384 	}
385 
386 	rate_rounded = clk_round_rate(clk, rate);
387 	if (rate_rounded < 0)
388 		return;
389 
390 	/* RGMII allows 50 ppm frequency error. Test and warn if this limit
391 	 * is not satisfied.
392 	 */
393 	ferr = abs(rate_rounded - rate);
394 	ferr = DIV_ROUND_UP(ferr, rate / 100000);
395 	if (ferr > 5)
396 		netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
397 			    rate);
398 
399 	if (clk_set_rate(clk, rate_rounded))
400 		netdev_err(dev, "adjusting tx_clk failed.\n");
401 }
402 
403 static void macb_handle_link_change(struct net_device *dev)
404 {
405 	struct macb *bp = netdev_priv(dev);
406 	struct phy_device *phydev = dev->phydev;
407 	unsigned long flags;
408 	int status_change = 0;
409 
410 	spin_lock_irqsave(&bp->lock, flags);
411 
412 	if (phydev->link) {
413 		if ((bp->speed != phydev->speed) ||
414 		    (bp->duplex != phydev->duplex)) {
415 			u32 reg;
416 
417 			reg = macb_readl(bp, NCFGR);
418 			reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
419 			if (macb_is_gem(bp))
420 				reg &= ~GEM_BIT(GBE);
421 
422 			if (phydev->duplex)
423 				reg |= MACB_BIT(FD);
424 			if (phydev->speed == SPEED_100)
425 				reg |= MACB_BIT(SPD);
426 			if (phydev->speed == SPEED_1000 &&
427 			    bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
428 				reg |= GEM_BIT(GBE);
429 
430 			macb_or_gem_writel(bp, NCFGR, reg);
431 
432 			bp->speed = phydev->speed;
433 			bp->duplex = phydev->duplex;
434 			status_change = 1;
435 		}
436 	}
437 
438 	if (phydev->link != bp->link) {
439 		if (!phydev->link) {
440 			bp->speed = 0;
441 			bp->duplex = -1;
442 		}
443 		bp->link = phydev->link;
444 
445 		status_change = 1;
446 	}
447 
448 	spin_unlock_irqrestore(&bp->lock, flags);
449 
450 	if (status_change) {
451 		if (phydev->link) {
452 			/* Update the TX clock rate if and only if the link is
453 			 * up and there has been a link change.
454 			 */
455 			macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
456 
457 			netif_carrier_on(dev);
458 			netdev_info(dev, "link up (%d/%s)\n",
459 				    phydev->speed,
460 				    phydev->duplex == DUPLEX_FULL ?
461 				    "Full" : "Half");
462 		} else {
463 			netif_carrier_off(dev);
464 			netdev_info(dev, "link down\n");
465 		}
466 	}
467 }
468 
469 /* based on au1000_eth. c*/
470 static int macb_mii_probe(struct net_device *dev)
471 {
472 	struct macb *bp = netdev_priv(dev);
473 	struct macb_platform_data *pdata;
474 	struct phy_device *phydev;
475 	struct device_node *np;
476 	int phy_irq, ret, i;
477 
478 	pdata = dev_get_platdata(&bp->pdev->dev);
479 	np = bp->pdev->dev.of_node;
480 	ret = 0;
481 
482 	if (np) {
483 		if (of_phy_is_fixed_link(np)) {
484 			if (of_phy_register_fixed_link(np) < 0) {
485 				dev_err(&bp->pdev->dev,
486 					"broken fixed-link specification\n");
487 				return -ENODEV;
488 			}
489 			bp->phy_node = of_node_get(np);
490 		} else {
491 			bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
492 			/* fallback to standard phy registration if no
493 			 * phy-handle was found nor any phy found during
494 			 * dt phy registration
495 			 */
496 			if (!bp->phy_node && !phy_find_first(bp->mii_bus)) {
497 				for (i = 0; i < PHY_MAX_ADDR; i++) {
498 					struct phy_device *phydev;
499 
500 					phydev = mdiobus_scan(bp->mii_bus, i);
501 					if (IS_ERR(phydev) &&
502 					    PTR_ERR(phydev) != -ENODEV) {
503 						ret = PTR_ERR(phydev);
504 						break;
505 					}
506 				}
507 
508 				if (ret)
509 					return -ENODEV;
510 			}
511 		}
512 	}
513 
514 	if (bp->phy_node) {
515 		phydev = of_phy_connect(dev, bp->phy_node,
516 					&macb_handle_link_change, 0,
517 					bp->phy_interface);
518 		if (!phydev)
519 			return -ENODEV;
520 	} else {
521 		phydev = phy_find_first(bp->mii_bus);
522 		if (!phydev) {
523 			netdev_err(dev, "no PHY found\n");
524 			return -ENXIO;
525 		}
526 
527 		if (pdata) {
528 			if (gpio_is_valid(pdata->phy_irq_pin)) {
529 				ret = devm_gpio_request(&bp->pdev->dev,
530 							pdata->phy_irq_pin, "phy int");
531 				if (!ret) {
532 					phy_irq = gpio_to_irq(pdata->phy_irq_pin);
533 					phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
534 				}
535 			} else {
536 				phydev->irq = PHY_POLL;
537 			}
538 		}
539 
540 		/* attach the mac to the phy */
541 		ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
542 					 bp->phy_interface);
543 		if (ret) {
544 			netdev_err(dev, "Could not attach to PHY\n");
545 			return ret;
546 		}
547 	}
548 
549 	/* mask with MAC supported features */
550 	if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
551 		phydev->supported &= PHY_GBIT_FEATURES;
552 	else
553 		phydev->supported &= PHY_BASIC_FEATURES;
554 
555 	if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
556 		phydev->supported &= ~SUPPORTED_1000baseT_Half;
557 
558 	phydev->advertising = phydev->supported;
559 
560 	bp->link = 0;
561 	bp->speed = 0;
562 	bp->duplex = -1;
563 
564 	return 0;
565 }
566 
567 static int macb_mii_init(struct macb *bp)
568 {
569 	struct macb_platform_data *pdata;
570 	struct device_node *np;
571 	int err;
572 
573 	/* Enable management port */
574 	macb_writel(bp, NCR, MACB_BIT(MPE));
575 
576 	bp->mii_bus = mdiobus_alloc();
577 	if (!bp->mii_bus) {
578 		err = -ENOMEM;
579 		goto err_out;
580 	}
581 
582 	bp->mii_bus->name = "MACB_mii_bus";
583 	bp->mii_bus->read = &macb_mdio_read;
584 	bp->mii_bus->write = &macb_mdio_write;
585 	snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
586 		 bp->pdev->name, bp->pdev->id);
587 	bp->mii_bus->priv = bp;
588 	bp->mii_bus->parent = &bp->pdev->dev;
589 	pdata = dev_get_platdata(&bp->pdev->dev);
590 
591 	dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
592 
593 	np = bp->pdev->dev.of_node;
594 	if (pdata)
595 		bp->mii_bus->phy_mask = pdata->phy_mask;
596 
597 	err = of_mdiobus_register(bp->mii_bus, np);
598 	if (err)
599 		goto err_out_free_mdiobus;
600 
601 	err = macb_mii_probe(bp->dev);
602 	if (err)
603 		goto err_out_unregister_bus;
604 
605 	return 0;
606 
607 err_out_unregister_bus:
608 	mdiobus_unregister(bp->mii_bus);
609 	if (np && of_phy_is_fixed_link(np))
610 		of_phy_deregister_fixed_link(np);
611 err_out_free_mdiobus:
612 	of_node_put(bp->phy_node);
613 	mdiobus_free(bp->mii_bus);
614 err_out:
615 	return err;
616 }
617 
618 static void macb_update_stats(struct macb *bp)
619 {
620 	u32 *p = &bp->hw_stats.macb.rx_pause_frames;
621 	u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
622 	int offset = MACB_PFR;
623 
624 	WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
625 
626 	for (; p < end; p++, offset += 4)
627 		*p += bp->macb_reg_readl(bp, offset);
628 }
629 
630 static int macb_halt_tx(struct macb *bp)
631 {
632 	unsigned long	halt_time, timeout;
633 	u32		status;
634 
635 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
636 
637 	timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
638 	do {
639 		halt_time = jiffies;
640 		status = macb_readl(bp, TSR);
641 		if (!(status & MACB_BIT(TGO)))
642 			return 0;
643 
644 		usleep_range(10, 250);
645 	} while (time_before(halt_time, timeout));
646 
647 	return -ETIMEDOUT;
648 }
649 
650 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
651 {
652 	if (tx_skb->mapping) {
653 		if (tx_skb->mapped_as_page)
654 			dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
655 				       tx_skb->size, DMA_TO_DEVICE);
656 		else
657 			dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
658 					 tx_skb->size, DMA_TO_DEVICE);
659 		tx_skb->mapping = 0;
660 	}
661 
662 	if (tx_skb->skb) {
663 		dev_kfree_skb_any(tx_skb->skb);
664 		tx_skb->skb = NULL;
665 	}
666 }
667 
668 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
669 {
670 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
671 	struct macb_dma_desc_64 *desc_64;
672 
673 	if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
674 		desc_64 = macb_64b_desc(bp, desc);
675 		desc_64->addrh = upper_32_bits(addr);
676 	}
677 #endif
678 	desc->addr = lower_32_bits(addr);
679 }
680 
681 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
682 {
683 	dma_addr_t addr = 0;
684 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
685 	struct macb_dma_desc_64 *desc_64;
686 
687 	if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
688 		desc_64 = macb_64b_desc(bp, desc);
689 		addr = ((u64)(desc_64->addrh) << 32);
690 	}
691 #endif
692 	addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
693 	return addr;
694 }
695 
696 static void macb_tx_error_task(struct work_struct *work)
697 {
698 	struct macb_queue	*queue = container_of(work, struct macb_queue,
699 						      tx_error_task);
700 	struct macb		*bp = queue->bp;
701 	struct macb_tx_skb	*tx_skb;
702 	struct macb_dma_desc	*desc;
703 	struct sk_buff		*skb;
704 	unsigned int		tail;
705 	unsigned long		flags;
706 
707 	netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
708 		    (unsigned int)(queue - bp->queues),
709 		    queue->tx_tail, queue->tx_head);
710 
711 	/* Prevent the queue IRQ handlers from running: each of them may call
712 	 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
713 	 * As explained below, we have to halt the transmission before updating
714 	 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
715 	 * network engine about the macb/gem being halted.
716 	 */
717 	spin_lock_irqsave(&bp->lock, flags);
718 
719 	/* Make sure nobody is trying to queue up new packets */
720 	netif_tx_stop_all_queues(bp->dev);
721 
722 	/* Stop transmission now
723 	 * (in case we have just queued new packets)
724 	 * macb/gem must be halted to write TBQP register
725 	 */
726 	if (macb_halt_tx(bp))
727 		/* Just complain for now, reinitializing TX path can be good */
728 		netdev_err(bp->dev, "BUG: halt tx timed out\n");
729 
730 	/* Treat frames in TX queue including the ones that caused the error.
731 	 * Free transmit buffers in upper layer.
732 	 */
733 	for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
734 		u32	ctrl;
735 
736 		desc = macb_tx_desc(queue, tail);
737 		ctrl = desc->ctrl;
738 		tx_skb = macb_tx_skb(queue, tail);
739 		skb = tx_skb->skb;
740 
741 		if (ctrl & MACB_BIT(TX_USED)) {
742 			/* skb is set for the last buffer of the frame */
743 			while (!skb) {
744 				macb_tx_unmap(bp, tx_skb);
745 				tail++;
746 				tx_skb = macb_tx_skb(queue, tail);
747 				skb = tx_skb->skb;
748 			}
749 
750 			/* ctrl still refers to the first buffer descriptor
751 			 * since it's the only one written back by the hardware
752 			 */
753 			if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
754 				netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
755 					    macb_tx_ring_wrap(bp, tail),
756 					    skb->data);
757 				bp->dev->stats.tx_packets++;
758 				queue->stats.tx_packets++;
759 				bp->dev->stats.tx_bytes += skb->len;
760 				queue->stats.tx_bytes += skb->len;
761 			}
762 		} else {
763 			/* "Buffers exhausted mid-frame" errors may only happen
764 			 * if the driver is buggy, so complain loudly about
765 			 * those. Statistics are updated by hardware.
766 			 */
767 			if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
768 				netdev_err(bp->dev,
769 					   "BUG: TX buffers exhausted mid-frame\n");
770 
771 			desc->ctrl = ctrl | MACB_BIT(TX_USED);
772 		}
773 
774 		macb_tx_unmap(bp, tx_skb);
775 	}
776 
777 	/* Set end of TX queue */
778 	desc = macb_tx_desc(queue, 0);
779 	macb_set_addr(bp, desc, 0);
780 	desc->ctrl = MACB_BIT(TX_USED);
781 
782 	/* Make descriptor updates visible to hardware */
783 	wmb();
784 
785 	/* Reinitialize the TX desc queue */
786 	queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
787 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
788 	if (bp->hw_dma_cap & HW_DMA_CAP_64B)
789 		queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
790 #endif
791 	/* Make TX ring reflect state of hardware */
792 	queue->tx_head = 0;
793 	queue->tx_tail = 0;
794 
795 	/* Housework before enabling TX IRQ */
796 	macb_writel(bp, TSR, macb_readl(bp, TSR));
797 	queue_writel(queue, IER, MACB_TX_INT_FLAGS);
798 
799 	/* Now we are ready to start transmission again */
800 	netif_tx_start_all_queues(bp->dev);
801 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
802 
803 	spin_unlock_irqrestore(&bp->lock, flags);
804 }
805 
806 static void macb_tx_interrupt(struct macb_queue *queue)
807 {
808 	unsigned int tail;
809 	unsigned int head;
810 	u32 status;
811 	struct macb *bp = queue->bp;
812 	u16 queue_index = queue - bp->queues;
813 
814 	status = macb_readl(bp, TSR);
815 	macb_writel(bp, TSR, status);
816 
817 	if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
818 		queue_writel(queue, ISR, MACB_BIT(TCOMP));
819 
820 	netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
821 		    (unsigned long)status);
822 
823 	head = queue->tx_head;
824 	for (tail = queue->tx_tail; tail != head; tail++) {
825 		struct macb_tx_skb	*tx_skb;
826 		struct sk_buff		*skb;
827 		struct macb_dma_desc	*desc;
828 		u32			ctrl;
829 
830 		desc = macb_tx_desc(queue, tail);
831 
832 		/* Make hw descriptor updates visible to CPU */
833 		rmb();
834 
835 		ctrl = desc->ctrl;
836 
837 		/* TX_USED bit is only set by hardware on the very first buffer
838 		 * descriptor of the transmitted frame.
839 		 */
840 		if (!(ctrl & MACB_BIT(TX_USED)))
841 			break;
842 
843 		/* Process all buffers of the current transmitted frame */
844 		for (;; tail++) {
845 			tx_skb = macb_tx_skb(queue, tail);
846 			skb = tx_skb->skb;
847 
848 			/* First, update TX stats if needed */
849 			if (skb) {
850 				if (gem_ptp_do_txstamp(queue, skb, desc) == 0) {
851 					/* skb now belongs to timestamp buffer
852 					 * and will be removed later
853 					 */
854 					tx_skb->skb = NULL;
855 				}
856 				netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
857 					    macb_tx_ring_wrap(bp, tail),
858 					    skb->data);
859 				bp->dev->stats.tx_packets++;
860 				queue->stats.tx_packets++;
861 				bp->dev->stats.tx_bytes += skb->len;
862 				queue->stats.tx_bytes += skb->len;
863 			}
864 
865 			/* Now we can safely release resources */
866 			macb_tx_unmap(bp, tx_skb);
867 
868 			/* skb is set only for the last buffer of the frame.
869 			 * WARNING: at this point skb has been freed by
870 			 * macb_tx_unmap().
871 			 */
872 			if (skb)
873 				break;
874 		}
875 	}
876 
877 	queue->tx_tail = tail;
878 	if (__netif_subqueue_stopped(bp->dev, queue_index) &&
879 	    CIRC_CNT(queue->tx_head, queue->tx_tail,
880 		     bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
881 		netif_wake_subqueue(bp->dev, queue_index);
882 }
883 
884 static void gem_rx_refill(struct macb_queue *queue)
885 {
886 	unsigned int		entry;
887 	struct sk_buff		*skb;
888 	dma_addr_t		paddr;
889 	struct macb *bp = queue->bp;
890 	struct macb_dma_desc *desc;
891 
892 	while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
893 			bp->rx_ring_size) > 0) {
894 		entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
895 
896 		/* Make hw descriptor updates visible to CPU */
897 		rmb();
898 
899 		queue->rx_prepared_head++;
900 		desc = macb_rx_desc(queue, entry);
901 
902 		if (!queue->rx_skbuff[entry]) {
903 			/* allocate sk_buff for this free entry in ring */
904 			skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
905 			if (unlikely(!skb)) {
906 				netdev_err(bp->dev,
907 					   "Unable to allocate sk_buff\n");
908 				break;
909 			}
910 
911 			/* now fill corresponding descriptor entry */
912 			paddr = dma_map_single(&bp->pdev->dev, skb->data,
913 					       bp->rx_buffer_size,
914 					       DMA_FROM_DEVICE);
915 			if (dma_mapping_error(&bp->pdev->dev, paddr)) {
916 				dev_kfree_skb(skb);
917 				break;
918 			}
919 
920 			queue->rx_skbuff[entry] = skb;
921 
922 			if (entry == bp->rx_ring_size - 1)
923 				paddr |= MACB_BIT(RX_WRAP);
924 			macb_set_addr(bp, desc, paddr);
925 			desc->ctrl = 0;
926 
927 			/* properly align Ethernet header */
928 			skb_reserve(skb, NET_IP_ALIGN);
929 		} else {
930 			desc->addr &= ~MACB_BIT(RX_USED);
931 			desc->ctrl = 0;
932 		}
933 	}
934 
935 	/* Make descriptor updates visible to hardware */
936 	wmb();
937 
938 	netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
939 			queue, queue->rx_prepared_head, queue->rx_tail);
940 }
941 
942 /* Mark DMA descriptors from begin up to and not including end as unused */
943 static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
944 				  unsigned int end)
945 {
946 	unsigned int frag;
947 
948 	for (frag = begin; frag != end; frag++) {
949 		struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
950 
951 		desc->addr &= ~MACB_BIT(RX_USED);
952 	}
953 
954 	/* Make descriptor updates visible to hardware */
955 	wmb();
956 
957 	/* When this happens, the hardware stats registers for
958 	 * whatever caused this is updated, so we don't have to record
959 	 * anything.
960 	 */
961 }
962 
963 static int gem_rx(struct macb_queue *queue, int budget)
964 {
965 	struct macb *bp = queue->bp;
966 	unsigned int		len;
967 	unsigned int		entry;
968 	struct sk_buff		*skb;
969 	struct macb_dma_desc	*desc;
970 	int			count = 0;
971 
972 	while (count < budget) {
973 		u32 ctrl;
974 		dma_addr_t addr;
975 		bool rxused;
976 
977 		entry = macb_rx_ring_wrap(bp, queue->rx_tail);
978 		desc = macb_rx_desc(queue, entry);
979 
980 		/* Make hw descriptor updates visible to CPU */
981 		rmb();
982 
983 		rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
984 		addr = macb_get_addr(bp, desc);
985 		ctrl = desc->ctrl;
986 
987 		if (!rxused)
988 			break;
989 
990 		queue->rx_tail++;
991 		count++;
992 
993 		if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
994 			netdev_err(bp->dev,
995 				   "not whole frame pointed by descriptor\n");
996 			bp->dev->stats.rx_dropped++;
997 			queue->stats.rx_dropped++;
998 			break;
999 		}
1000 		skb = queue->rx_skbuff[entry];
1001 		if (unlikely(!skb)) {
1002 			netdev_err(bp->dev,
1003 				   "inconsistent Rx descriptor chain\n");
1004 			bp->dev->stats.rx_dropped++;
1005 			queue->stats.rx_dropped++;
1006 			break;
1007 		}
1008 		/* now everything is ready for receiving packet */
1009 		queue->rx_skbuff[entry] = NULL;
1010 		len = ctrl & bp->rx_frm_len_mask;
1011 
1012 		netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
1013 
1014 		skb_put(skb, len);
1015 		dma_unmap_single(&bp->pdev->dev, addr,
1016 				 bp->rx_buffer_size, DMA_FROM_DEVICE);
1017 
1018 		skb->protocol = eth_type_trans(skb, bp->dev);
1019 		skb_checksum_none_assert(skb);
1020 		if (bp->dev->features & NETIF_F_RXCSUM &&
1021 		    !(bp->dev->flags & IFF_PROMISC) &&
1022 		    GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
1023 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1024 
1025 		bp->dev->stats.rx_packets++;
1026 		queue->stats.rx_packets++;
1027 		bp->dev->stats.rx_bytes += skb->len;
1028 		queue->stats.rx_bytes += skb->len;
1029 
1030 		gem_ptp_do_rxstamp(bp, skb, desc);
1031 
1032 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1033 		netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1034 			    skb->len, skb->csum);
1035 		print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
1036 			       skb_mac_header(skb), 16, true);
1037 		print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
1038 			       skb->data, 32, true);
1039 #endif
1040 
1041 		netif_receive_skb(skb);
1042 	}
1043 
1044 	gem_rx_refill(queue);
1045 
1046 	return count;
1047 }
1048 
1049 static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag,
1050 			 unsigned int last_frag)
1051 {
1052 	unsigned int len;
1053 	unsigned int frag;
1054 	unsigned int offset;
1055 	struct sk_buff *skb;
1056 	struct macb_dma_desc *desc;
1057 	struct macb *bp = queue->bp;
1058 
1059 	desc = macb_rx_desc(queue, last_frag);
1060 	len = desc->ctrl & bp->rx_frm_len_mask;
1061 
1062 	netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
1063 		macb_rx_ring_wrap(bp, first_frag),
1064 		macb_rx_ring_wrap(bp, last_frag), len);
1065 
1066 	/* The ethernet header starts NET_IP_ALIGN bytes into the
1067 	 * first buffer. Since the header is 14 bytes, this makes the
1068 	 * payload word-aligned.
1069 	 *
1070 	 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
1071 	 * the two padding bytes into the skb so that we avoid hitting
1072 	 * the slowpath in memcpy(), and pull them off afterwards.
1073 	 */
1074 	skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
1075 	if (!skb) {
1076 		bp->dev->stats.rx_dropped++;
1077 		for (frag = first_frag; ; frag++) {
1078 			desc = macb_rx_desc(queue, frag);
1079 			desc->addr &= ~MACB_BIT(RX_USED);
1080 			if (frag == last_frag)
1081 				break;
1082 		}
1083 
1084 		/* Make descriptor updates visible to hardware */
1085 		wmb();
1086 
1087 		return 1;
1088 	}
1089 
1090 	offset = 0;
1091 	len += NET_IP_ALIGN;
1092 	skb_checksum_none_assert(skb);
1093 	skb_put(skb, len);
1094 
1095 	for (frag = first_frag; ; frag++) {
1096 		unsigned int frag_len = bp->rx_buffer_size;
1097 
1098 		if (offset + frag_len > len) {
1099 			if (unlikely(frag != last_frag)) {
1100 				dev_kfree_skb_any(skb);
1101 				return -1;
1102 			}
1103 			frag_len = len - offset;
1104 		}
1105 		skb_copy_to_linear_data_offset(skb, offset,
1106 					       macb_rx_buffer(queue, frag),
1107 					       frag_len);
1108 		offset += bp->rx_buffer_size;
1109 		desc = macb_rx_desc(queue, frag);
1110 		desc->addr &= ~MACB_BIT(RX_USED);
1111 
1112 		if (frag == last_frag)
1113 			break;
1114 	}
1115 
1116 	/* Make descriptor updates visible to hardware */
1117 	wmb();
1118 
1119 	__skb_pull(skb, NET_IP_ALIGN);
1120 	skb->protocol = eth_type_trans(skb, bp->dev);
1121 
1122 	bp->dev->stats.rx_packets++;
1123 	bp->dev->stats.rx_bytes += skb->len;
1124 	netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1125 		    skb->len, skb->csum);
1126 	netif_receive_skb(skb);
1127 
1128 	return 0;
1129 }
1130 
1131 static inline void macb_init_rx_ring(struct macb_queue *queue)
1132 {
1133 	struct macb *bp = queue->bp;
1134 	dma_addr_t addr;
1135 	struct macb_dma_desc *desc = NULL;
1136 	int i;
1137 
1138 	addr = queue->rx_buffers_dma;
1139 	for (i = 0; i < bp->rx_ring_size; i++) {
1140 		desc = macb_rx_desc(queue, i);
1141 		macb_set_addr(bp, desc, addr);
1142 		desc->ctrl = 0;
1143 		addr += bp->rx_buffer_size;
1144 	}
1145 	desc->addr |= MACB_BIT(RX_WRAP);
1146 	queue->rx_tail = 0;
1147 }
1148 
1149 static int macb_rx(struct macb_queue *queue, int budget)
1150 {
1151 	struct macb *bp = queue->bp;
1152 	bool reset_rx_queue = false;
1153 	int received = 0;
1154 	unsigned int tail;
1155 	int first_frag = -1;
1156 
1157 	for (tail = queue->rx_tail; budget > 0; tail++) {
1158 		struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
1159 		u32 ctrl;
1160 
1161 		/* Make hw descriptor updates visible to CPU */
1162 		rmb();
1163 
1164 		ctrl = desc->ctrl;
1165 
1166 		if (!(desc->addr & MACB_BIT(RX_USED)))
1167 			break;
1168 
1169 		if (ctrl & MACB_BIT(RX_SOF)) {
1170 			if (first_frag != -1)
1171 				discard_partial_frame(queue, first_frag, tail);
1172 			first_frag = tail;
1173 		}
1174 
1175 		if (ctrl & MACB_BIT(RX_EOF)) {
1176 			int dropped;
1177 
1178 			if (unlikely(first_frag == -1)) {
1179 				reset_rx_queue = true;
1180 				continue;
1181 			}
1182 
1183 			dropped = macb_rx_frame(queue, first_frag, tail);
1184 			first_frag = -1;
1185 			if (unlikely(dropped < 0)) {
1186 				reset_rx_queue = true;
1187 				continue;
1188 			}
1189 			if (!dropped) {
1190 				received++;
1191 				budget--;
1192 			}
1193 		}
1194 	}
1195 
1196 	if (unlikely(reset_rx_queue)) {
1197 		unsigned long flags;
1198 		u32 ctrl;
1199 
1200 		netdev_err(bp->dev, "RX queue corruption: reset it\n");
1201 
1202 		spin_lock_irqsave(&bp->lock, flags);
1203 
1204 		ctrl = macb_readl(bp, NCR);
1205 		macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1206 
1207 		macb_init_rx_ring(queue);
1208 		queue_writel(queue, RBQP, queue->rx_ring_dma);
1209 
1210 		macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1211 
1212 		spin_unlock_irqrestore(&bp->lock, flags);
1213 		return received;
1214 	}
1215 
1216 	if (first_frag != -1)
1217 		queue->rx_tail = first_frag;
1218 	else
1219 		queue->rx_tail = tail;
1220 
1221 	return received;
1222 }
1223 
1224 static int macb_poll(struct napi_struct *napi, int budget)
1225 {
1226 	struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
1227 	struct macb *bp = queue->bp;
1228 	int work_done;
1229 	u32 status;
1230 
1231 	status = macb_readl(bp, RSR);
1232 	macb_writel(bp, RSR, status);
1233 
1234 	netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
1235 		    (unsigned long)status, budget);
1236 
1237 	work_done = bp->macbgem_ops.mog_rx(queue, budget);
1238 	if (work_done < budget) {
1239 		napi_complete_done(napi, work_done);
1240 
1241 		/* Packets received while interrupts were disabled */
1242 		status = macb_readl(bp, RSR);
1243 		if (status) {
1244 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1245 				queue_writel(queue, ISR, MACB_BIT(RCOMP));
1246 			napi_reschedule(napi);
1247 		} else {
1248 			queue_writel(queue, IER, MACB_RX_INT_FLAGS);
1249 		}
1250 	}
1251 
1252 	/* TODO: Handle errors */
1253 
1254 	return work_done;
1255 }
1256 
1257 static void macb_hresp_error_task(unsigned long data)
1258 {
1259 	struct macb *bp = (struct macb *)data;
1260 	struct net_device *dev = bp->dev;
1261 	struct macb_queue *queue = bp->queues;
1262 	unsigned int q;
1263 	u32 ctrl;
1264 
1265 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1266 		queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
1267 					 MACB_TX_INT_FLAGS |
1268 					 MACB_BIT(HRESP));
1269 	}
1270 	ctrl = macb_readl(bp, NCR);
1271 	ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
1272 	macb_writel(bp, NCR, ctrl);
1273 
1274 	netif_tx_stop_all_queues(dev);
1275 	netif_carrier_off(dev);
1276 
1277 	bp->macbgem_ops.mog_init_rings(bp);
1278 
1279 	/* Initialize TX and RX buffers */
1280 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1281 		queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
1282 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1283 		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1284 			queue_writel(queue, RBQPH,
1285 				     upper_32_bits(queue->rx_ring_dma));
1286 #endif
1287 		queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1288 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1289 		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1290 			queue_writel(queue, TBQPH,
1291 				     upper_32_bits(queue->tx_ring_dma));
1292 #endif
1293 
1294 		/* Enable interrupts */
1295 		queue_writel(queue, IER,
1296 			     MACB_RX_INT_FLAGS |
1297 			     MACB_TX_INT_FLAGS |
1298 			     MACB_BIT(HRESP));
1299 	}
1300 
1301 	ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
1302 	macb_writel(bp, NCR, ctrl);
1303 
1304 	netif_carrier_on(dev);
1305 	netif_tx_start_all_queues(dev);
1306 }
1307 
1308 static irqreturn_t macb_interrupt(int irq, void *dev_id)
1309 {
1310 	struct macb_queue *queue = dev_id;
1311 	struct macb *bp = queue->bp;
1312 	struct net_device *dev = bp->dev;
1313 	u32 status, ctrl;
1314 
1315 	status = queue_readl(queue, ISR);
1316 
1317 	if (unlikely(!status))
1318 		return IRQ_NONE;
1319 
1320 	spin_lock(&bp->lock);
1321 
1322 	while (status) {
1323 		/* close possible race with dev_close */
1324 		if (unlikely(!netif_running(dev))) {
1325 			queue_writel(queue, IDR, -1);
1326 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1327 				queue_writel(queue, ISR, -1);
1328 			break;
1329 		}
1330 
1331 		netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1332 			    (unsigned int)(queue - bp->queues),
1333 			    (unsigned long)status);
1334 
1335 		if (status & MACB_RX_INT_FLAGS) {
1336 			/* There's no point taking any more interrupts
1337 			 * until we have processed the buffers. The
1338 			 * scheduling call may fail if the poll routine
1339 			 * is already scheduled, so disable interrupts
1340 			 * now.
1341 			 */
1342 			queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
1343 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1344 				queue_writel(queue, ISR, MACB_BIT(RCOMP));
1345 
1346 			if (napi_schedule_prep(&queue->napi)) {
1347 				netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1348 				__napi_schedule(&queue->napi);
1349 			}
1350 		}
1351 
1352 		if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1353 			queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1354 			schedule_work(&queue->tx_error_task);
1355 
1356 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1357 				queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1358 
1359 			break;
1360 		}
1361 
1362 		if (status & MACB_BIT(TCOMP))
1363 			macb_tx_interrupt(queue);
1364 
1365 		/* Link change detection isn't possible with RMII, so we'll
1366 		 * add that if/when we get our hands on a full-blown MII PHY.
1367 		 */
1368 
1369 		/* There is a hardware issue under heavy load where DMA can
1370 		 * stop, this causes endless "used buffer descriptor read"
1371 		 * interrupts but it can be cleared by re-enabling RX. See
1372 		 * the at91 manual, section 41.3.1 or the Zynq manual
1373 		 * section 16.7.4 for details.
1374 		 */
1375 		if (status & MACB_BIT(RXUBR)) {
1376 			ctrl = macb_readl(bp, NCR);
1377 			macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1378 			wmb();
1379 			macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1380 
1381 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1382 				queue_writel(queue, ISR, MACB_BIT(RXUBR));
1383 		}
1384 
1385 		if (status & MACB_BIT(ISR_ROVR)) {
1386 			/* We missed at least one packet */
1387 			if (macb_is_gem(bp))
1388 				bp->hw_stats.gem.rx_overruns++;
1389 			else
1390 				bp->hw_stats.macb.rx_overruns++;
1391 
1392 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1393 				queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1394 		}
1395 
1396 		if (status & MACB_BIT(HRESP)) {
1397 			tasklet_schedule(&bp->hresp_err_tasklet);
1398 			netdev_err(dev, "DMA bus error: HRESP not OK\n");
1399 
1400 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1401 				queue_writel(queue, ISR, MACB_BIT(HRESP));
1402 		}
1403 		status = queue_readl(queue, ISR);
1404 	}
1405 
1406 	spin_unlock(&bp->lock);
1407 
1408 	return IRQ_HANDLED;
1409 }
1410 
1411 #ifdef CONFIG_NET_POLL_CONTROLLER
1412 /* Polling receive - used by netconsole and other diagnostic tools
1413  * to allow network i/o with interrupts disabled.
1414  */
1415 static void macb_poll_controller(struct net_device *dev)
1416 {
1417 	struct macb *bp = netdev_priv(dev);
1418 	struct macb_queue *queue;
1419 	unsigned long flags;
1420 	unsigned int q;
1421 
1422 	local_irq_save(flags);
1423 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1424 		macb_interrupt(dev->irq, queue);
1425 	local_irq_restore(flags);
1426 }
1427 #endif
1428 
1429 static unsigned int macb_tx_map(struct macb *bp,
1430 				struct macb_queue *queue,
1431 				struct sk_buff *skb,
1432 				unsigned int hdrlen)
1433 {
1434 	dma_addr_t mapping;
1435 	unsigned int len, entry, i, tx_head = queue->tx_head;
1436 	struct macb_tx_skb *tx_skb = NULL;
1437 	struct macb_dma_desc *desc;
1438 	unsigned int offset, size, count = 0;
1439 	unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
1440 	unsigned int eof = 1, mss_mfs = 0;
1441 	u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
1442 
1443 	/* LSO */
1444 	if (skb_shinfo(skb)->gso_size != 0) {
1445 		if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1446 			/* UDP - UFO */
1447 			lso_ctrl = MACB_LSO_UFO_ENABLE;
1448 		else
1449 			/* TCP - TSO */
1450 			lso_ctrl = MACB_LSO_TSO_ENABLE;
1451 	}
1452 
1453 	/* First, map non-paged data */
1454 	len = skb_headlen(skb);
1455 
1456 	/* first buffer length */
1457 	size = hdrlen;
1458 
1459 	offset = 0;
1460 	while (len) {
1461 		entry = macb_tx_ring_wrap(bp, tx_head);
1462 		tx_skb = &queue->tx_skb[entry];
1463 
1464 		mapping = dma_map_single(&bp->pdev->dev,
1465 					 skb->data + offset,
1466 					 size, DMA_TO_DEVICE);
1467 		if (dma_mapping_error(&bp->pdev->dev, mapping))
1468 			goto dma_error;
1469 
1470 		/* Save info to properly release resources */
1471 		tx_skb->skb = NULL;
1472 		tx_skb->mapping = mapping;
1473 		tx_skb->size = size;
1474 		tx_skb->mapped_as_page = false;
1475 
1476 		len -= size;
1477 		offset += size;
1478 		count++;
1479 		tx_head++;
1480 
1481 		size = min(len, bp->max_tx_length);
1482 	}
1483 
1484 	/* Then, map paged data from fragments */
1485 	for (f = 0; f < nr_frags; f++) {
1486 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1487 
1488 		len = skb_frag_size(frag);
1489 		offset = 0;
1490 		while (len) {
1491 			size = min(len, bp->max_tx_length);
1492 			entry = macb_tx_ring_wrap(bp, tx_head);
1493 			tx_skb = &queue->tx_skb[entry];
1494 
1495 			mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1496 						   offset, size, DMA_TO_DEVICE);
1497 			if (dma_mapping_error(&bp->pdev->dev, mapping))
1498 				goto dma_error;
1499 
1500 			/* Save info to properly release resources */
1501 			tx_skb->skb = NULL;
1502 			tx_skb->mapping = mapping;
1503 			tx_skb->size = size;
1504 			tx_skb->mapped_as_page = true;
1505 
1506 			len -= size;
1507 			offset += size;
1508 			count++;
1509 			tx_head++;
1510 		}
1511 	}
1512 
1513 	/* Should never happen */
1514 	if (unlikely(!tx_skb)) {
1515 		netdev_err(bp->dev, "BUG! empty skb!\n");
1516 		return 0;
1517 	}
1518 
1519 	/* This is the last buffer of the frame: save socket buffer */
1520 	tx_skb->skb = skb;
1521 
1522 	/* Update TX ring: update buffer descriptors in reverse order
1523 	 * to avoid race condition
1524 	 */
1525 
1526 	/* Set 'TX_USED' bit in buffer descriptor at tx_head position
1527 	 * to set the end of TX queue
1528 	 */
1529 	i = tx_head;
1530 	entry = macb_tx_ring_wrap(bp, i);
1531 	ctrl = MACB_BIT(TX_USED);
1532 	desc = macb_tx_desc(queue, entry);
1533 	desc->ctrl = ctrl;
1534 
1535 	if (lso_ctrl) {
1536 		if (lso_ctrl == MACB_LSO_UFO_ENABLE)
1537 			/* include header and FCS in value given to h/w */
1538 			mss_mfs = skb_shinfo(skb)->gso_size +
1539 					skb_transport_offset(skb) +
1540 					ETH_FCS_LEN;
1541 		else /* TSO */ {
1542 			mss_mfs = skb_shinfo(skb)->gso_size;
1543 			/* TCP Sequence Number Source Select
1544 			 * can be set only for TSO
1545 			 */
1546 			seq_ctrl = 0;
1547 		}
1548 	}
1549 
1550 	do {
1551 		i--;
1552 		entry = macb_tx_ring_wrap(bp, i);
1553 		tx_skb = &queue->tx_skb[entry];
1554 		desc = macb_tx_desc(queue, entry);
1555 
1556 		ctrl = (u32)tx_skb->size;
1557 		if (eof) {
1558 			ctrl |= MACB_BIT(TX_LAST);
1559 			eof = 0;
1560 		}
1561 		if (unlikely(entry == (bp->tx_ring_size - 1)))
1562 			ctrl |= MACB_BIT(TX_WRAP);
1563 
1564 		/* First descriptor is header descriptor */
1565 		if (i == queue->tx_head) {
1566 			ctrl |= MACB_BF(TX_LSO, lso_ctrl);
1567 			ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
1568 		} else
1569 			/* Only set MSS/MFS on payload descriptors
1570 			 * (second or later descriptor)
1571 			 */
1572 			ctrl |= MACB_BF(MSS_MFS, mss_mfs);
1573 
1574 		/* Set TX buffer descriptor */
1575 		macb_set_addr(bp, desc, tx_skb->mapping);
1576 		/* desc->addr must be visible to hardware before clearing
1577 		 * 'TX_USED' bit in desc->ctrl.
1578 		 */
1579 		wmb();
1580 		desc->ctrl = ctrl;
1581 	} while (i != queue->tx_head);
1582 
1583 	queue->tx_head = tx_head;
1584 
1585 	return count;
1586 
1587 dma_error:
1588 	netdev_err(bp->dev, "TX DMA map failed\n");
1589 
1590 	for (i = queue->tx_head; i != tx_head; i++) {
1591 		tx_skb = macb_tx_skb(queue, i);
1592 
1593 		macb_tx_unmap(bp, tx_skb);
1594 	}
1595 
1596 	return 0;
1597 }
1598 
1599 static netdev_features_t macb_features_check(struct sk_buff *skb,
1600 					     struct net_device *dev,
1601 					     netdev_features_t features)
1602 {
1603 	unsigned int nr_frags, f;
1604 	unsigned int hdrlen;
1605 
1606 	/* Validate LSO compatibility */
1607 
1608 	/* there is only one buffer */
1609 	if (!skb_is_nonlinear(skb))
1610 		return features;
1611 
1612 	/* length of header */
1613 	hdrlen = skb_transport_offset(skb);
1614 	if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1615 		hdrlen += tcp_hdrlen(skb);
1616 
1617 	/* For LSO:
1618 	 * When software supplies two or more payload buffers all payload buffers
1619 	 * apart from the last must be a multiple of 8 bytes in size.
1620 	 */
1621 	if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
1622 		return features & ~MACB_NETIF_LSO;
1623 
1624 	nr_frags = skb_shinfo(skb)->nr_frags;
1625 	/* No need to check last fragment */
1626 	nr_frags--;
1627 	for (f = 0; f < nr_frags; f++) {
1628 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1629 
1630 		if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
1631 			return features & ~MACB_NETIF_LSO;
1632 	}
1633 	return features;
1634 }
1635 
1636 static inline int macb_clear_csum(struct sk_buff *skb)
1637 {
1638 	/* no change for packets without checksum offloading */
1639 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1640 		return 0;
1641 
1642 	/* make sure we can modify the header */
1643 	if (unlikely(skb_cow_head(skb, 0)))
1644 		return -1;
1645 
1646 	/* initialize checksum field
1647 	 * This is required - at least for Zynq, which otherwise calculates
1648 	 * wrong UDP header checksums for UDP packets with UDP data len <=2
1649 	 */
1650 	*(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
1651 	return 0;
1652 }
1653 
1654 static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1655 {
1656 	u16 queue_index = skb_get_queue_mapping(skb);
1657 	struct macb *bp = netdev_priv(dev);
1658 	struct macb_queue *queue = &bp->queues[queue_index];
1659 	unsigned long flags;
1660 	unsigned int desc_cnt, nr_frags, frag_size, f;
1661 	unsigned int hdrlen;
1662 	bool is_lso, is_udp = 0;
1663 
1664 	is_lso = (skb_shinfo(skb)->gso_size != 0);
1665 
1666 	if (is_lso) {
1667 		is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
1668 
1669 		/* length of headers */
1670 		if (is_udp)
1671 			/* only queue eth + ip headers separately for UDP */
1672 			hdrlen = skb_transport_offset(skb);
1673 		else
1674 			hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
1675 		if (skb_headlen(skb) < hdrlen) {
1676 			netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
1677 			/* if this is required, would need to copy to single buffer */
1678 			return NETDEV_TX_BUSY;
1679 		}
1680 	} else
1681 		hdrlen = min(skb_headlen(skb), bp->max_tx_length);
1682 
1683 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1684 	netdev_vdbg(bp->dev,
1685 		    "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1686 		    queue_index, skb->len, skb->head, skb->data,
1687 		    skb_tail_pointer(skb), skb_end_pointer(skb));
1688 	print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1689 		       skb->data, 16, true);
1690 #endif
1691 
1692 	/* Count how many TX buffer descriptors are needed to send this
1693 	 * socket buffer: skb fragments of jumbo frames may need to be
1694 	 * split into many buffer descriptors.
1695 	 */
1696 	if (is_lso && (skb_headlen(skb) > hdrlen))
1697 		/* extra header descriptor if also payload in first buffer */
1698 		desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
1699 	else
1700 		desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1701 	nr_frags = skb_shinfo(skb)->nr_frags;
1702 	for (f = 0; f < nr_frags; f++) {
1703 		frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1704 		desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1705 	}
1706 
1707 	spin_lock_irqsave(&bp->lock, flags);
1708 
1709 	/* This is a hard error, log it. */
1710 	if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
1711 		       bp->tx_ring_size) < desc_cnt) {
1712 		netif_stop_subqueue(dev, queue_index);
1713 		spin_unlock_irqrestore(&bp->lock, flags);
1714 		netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
1715 			   queue->tx_head, queue->tx_tail);
1716 		return NETDEV_TX_BUSY;
1717 	}
1718 
1719 	if (macb_clear_csum(skb)) {
1720 		dev_kfree_skb_any(skb);
1721 		goto unlock;
1722 	}
1723 
1724 	/* Map socket buffer for DMA transfer */
1725 	if (!macb_tx_map(bp, queue, skb, hdrlen)) {
1726 		dev_kfree_skb_any(skb);
1727 		goto unlock;
1728 	}
1729 
1730 	/* Make newly initialized descriptor visible to hardware */
1731 	wmb();
1732 	skb_tx_timestamp(skb);
1733 
1734 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1735 
1736 	if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
1737 		netif_stop_subqueue(dev, queue_index);
1738 
1739 unlock:
1740 	spin_unlock_irqrestore(&bp->lock, flags);
1741 
1742 	return NETDEV_TX_OK;
1743 }
1744 
1745 static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1746 {
1747 	if (!macb_is_gem(bp)) {
1748 		bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1749 	} else {
1750 		bp->rx_buffer_size = size;
1751 
1752 		if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
1753 			netdev_dbg(bp->dev,
1754 				   "RX buffer must be multiple of %d bytes, expanding\n",
1755 				   RX_BUFFER_MULTIPLE);
1756 			bp->rx_buffer_size =
1757 				roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1758 		}
1759 	}
1760 
1761 	netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
1762 		   bp->dev->mtu, bp->rx_buffer_size);
1763 }
1764 
1765 static void gem_free_rx_buffers(struct macb *bp)
1766 {
1767 	struct sk_buff		*skb;
1768 	struct macb_dma_desc	*desc;
1769 	struct macb_queue *queue;
1770 	dma_addr_t		addr;
1771 	unsigned int q;
1772 	int i;
1773 
1774 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1775 		if (!queue->rx_skbuff)
1776 			continue;
1777 
1778 		for (i = 0; i < bp->rx_ring_size; i++) {
1779 			skb = queue->rx_skbuff[i];
1780 
1781 			if (!skb)
1782 				continue;
1783 
1784 			desc = macb_rx_desc(queue, i);
1785 			addr = macb_get_addr(bp, desc);
1786 
1787 			dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1788 					DMA_FROM_DEVICE);
1789 			dev_kfree_skb_any(skb);
1790 			skb = NULL;
1791 		}
1792 
1793 		kfree(queue->rx_skbuff);
1794 		queue->rx_skbuff = NULL;
1795 	}
1796 }
1797 
1798 static void macb_free_rx_buffers(struct macb *bp)
1799 {
1800 	struct macb_queue *queue = &bp->queues[0];
1801 
1802 	if (queue->rx_buffers) {
1803 		dma_free_coherent(&bp->pdev->dev,
1804 				  bp->rx_ring_size * bp->rx_buffer_size,
1805 				  queue->rx_buffers, queue->rx_buffers_dma);
1806 		queue->rx_buffers = NULL;
1807 	}
1808 }
1809 
1810 static void macb_free_consistent(struct macb *bp)
1811 {
1812 	struct macb_queue *queue;
1813 	unsigned int q;
1814 
1815 	queue = &bp->queues[0];
1816 	bp->macbgem_ops.mog_free_rx_buffers(bp);
1817 	if (queue->rx_ring) {
1818 		dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
1819 				queue->rx_ring, queue->rx_ring_dma);
1820 		queue->rx_ring = NULL;
1821 	}
1822 
1823 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1824 		kfree(queue->tx_skb);
1825 		queue->tx_skb = NULL;
1826 		if (queue->tx_ring) {
1827 			dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp),
1828 					  queue->tx_ring, queue->tx_ring_dma);
1829 			queue->tx_ring = NULL;
1830 		}
1831 	}
1832 }
1833 
1834 static int gem_alloc_rx_buffers(struct macb *bp)
1835 {
1836 	struct macb_queue *queue;
1837 	unsigned int q;
1838 	int size;
1839 
1840 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1841 		size = bp->rx_ring_size * sizeof(struct sk_buff *);
1842 		queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
1843 		if (!queue->rx_skbuff)
1844 			return -ENOMEM;
1845 		else
1846 			netdev_dbg(bp->dev,
1847 				   "Allocated %d RX struct sk_buff entries at %p\n",
1848 				   bp->rx_ring_size, queue->rx_skbuff);
1849 	}
1850 	return 0;
1851 }
1852 
1853 static int macb_alloc_rx_buffers(struct macb *bp)
1854 {
1855 	struct macb_queue *queue = &bp->queues[0];
1856 	int size;
1857 
1858 	size = bp->rx_ring_size * bp->rx_buffer_size;
1859 	queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
1860 					    &queue->rx_buffers_dma, GFP_KERNEL);
1861 	if (!queue->rx_buffers)
1862 		return -ENOMEM;
1863 
1864 	netdev_dbg(bp->dev,
1865 		   "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1866 		   size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
1867 	return 0;
1868 }
1869 
1870 static int macb_alloc_consistent(struct macb *bp)
1871 {
1872 	struct macb_queue *queue;
1873 	unsigned int q;
1874 	int size;
1875 
1876 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1877 		size = TX_RING_BYTES(bp);
1878 		queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1879 						    &queue->tx_ring_dma,
1880 						    GFP_KERNEL);
1881 		if (!queue->tx_ring)
1882 			goto out_err;
1883 		netdev_dbg(bp->dev,
1884 			   "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
1885 			   q, size, (unsigned long)queue->tx_ring_dma,
1886 			   queue->tx_ring);
1887 
1888 		size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
1889 		queue->tx_skb = kmalloc(size, GFP_KERNEL);
1890 		if (!queue->tx_skb)
1891 			goto out_err;
1892 
1893 		size = RX_RING_BYTES(bp);
1894 		queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1895 						 &queue->rx_ring_dma, GFP_KERNEL);
1896 		if (!queue->rx_ring)
1897 			goto out_err;
1898 		netdev_dbg(bp->dev,
1899 			   "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
1900 			   size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
1901 	}
1902 	if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
1903 		goto out_err;
1904 
1905 	return 0;
1906 
1907 out_err:
1908 	macb_free_consistent(bp);
1909 	return -ENOMEM;
1910 }
1911 
1912 static void gem_init_rings(struct macb *bp)
1913 {
1914 	struct macb_queue *queue;
1915 	struct macb_dma_desc *desc = NULL;
1916 	unsigned int q;
1917 	int i;
1918 
1919 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1920 		for (i = 0; i < bp->tx_ring_size; i++) {
1921 			desc = macb_tx_desc(queue, i);
1922 			macb_set_addr(bp, desc, 0);
1923 			desc->ctrl = MACB_BIT(TX_USED);
1924 		}
1925 		desc->ctrl |= MACB_BIT(TX_WRAP);
1926 		queue->tx_head = 0;
1927 		queue->tx_tail = 0;
1928 
1929 		queue->rx_tail = 0;
1930 		queue->rx_prepared_head = 0;
1931 
1932 		gem_rx_refill(queue);
1933 	}
1934 
1935 }
1936 
1937 static void macb_init_rings(struct macb *bp)
1938 {
1939 	int i;
1940 	struct macb_dma_desc *desc = NULL;
1941 
1942 	macb_init_rx_ring(&bp->queues[0]);
1943 
1944 	for (i = 0; i < bp->tx_ring_size; i++) {
1945 		desc = macb_tx_desc(&bp->queues[0], i);
1946 		macb_set_addr(bp, desc, 0);
1947 		desc->ctrl = MACB_BIT(TX_USED);
1948 	}
1949 	bp->queues[0].tx_head = 0;
1950 	bp->queues[0].tx_tail = 0;
1951 	desc->ctrl |= MACB_BIT(TX_WRAP);
1952 }
1953 
1954 static void macb_reset_hw(struct macb *bp)
1955 {
1956 	struct macb_queue *queue;
1957 	unsigned int q;
1958 
1959 	/* Disable RX and TX (XXX: Should we halt the transmission
1960 	 * more gracefully?)
1961 	 */
1962 	macb_writel(bp, NCR, 0);
1963 
1964 	/* Clear the stats registers (XXX: Update stats first?) */
1965 	macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
1966 
1967 	/* Clear all status flags */
1968 	macb_writel(bp, TSR, -1);
1969 	macb_writel(bp, RSR, -1);
1970 
1971 	/* Disable all interrupts */
1972 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1973 		queue_writel(queue, IDR, -1);
1974 		queue_readl(queue, ISR);
1975 		if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1976 			queue_writel(queue, ISR, -1);
1977 	}
1978 }
1979 
1980 static u32 gem_mdc_clk_div(struct macb *bp)
1981 {
1982 	u32 config;
1983 	unsigned long pclk_hz = clk_get_rate(bp->pclk);
1984 
1985 	if (pclk_hz <= 20000000)
1986 		config = GEM_BF(CLK, GEM_CLK_DIV8);
1987 	else if (pclk_hz <= 40000000)
1988 		config = GEM_BF(CLK, GEM_CLK_DIV16);
1989 	else if (pclk_hz <= 80000000)
1990 		config = GEM_BF(CLK, GEM_CLK_DIV32);
1991 	else if (pclk_hz <= 120000000)
1992 		config = GEM_BF(CLK, GEM_CLK_DIV48);
1993 	else if (pclk_hz <= 160000000)
1994 		config = GEM_BF(CLK, GEM_CLK_DIV64);
1995 	else
1996 		config = GEM_BF(CLK, GEM_CLK_DIV96);
1997 
1998 	return config;
1999 }
2000 
2001 static u32 macb_mdc_clk_div(struct macb *bp)
2002 {
2003 	u32 config;
2004 	unsigned long pclk_hz;
2005 
2006 	if (macb_is_gem(bp))
2007 		return gem_mdc_clk_div(bp);
2008 
2009 	pclk_hz = clk_get_rate(bp->pclk);
2010 	if (pclk_hz <= 20000000)
2011 		config = MACB_BF(CLK, MACB_CLK_DIV8);
2012 	else if (pclk_hz <= 40000000)
2013 		config = MACB_BF(CLK, MACB_CLK_DIV16);
2014 	else if (pclk_hz <= 80000000)
2015 		config = MACB_BF(CLK, MACB_CLK_DIV32);
2016 	else
2017 		config = MACB_BF(CLK, MACB_CLK_DIV64);
2018 
2019 	return config;
2020 }
2021 
2022 /* Get the DMA bus width field of the network configuration register that we
2023  * should program.  We find the width from decoding the design configuration
2024  * register to find the maximum supported data bus width.
2025  */
2026 static u32 macb_dbw(struct macb *bp)
2027 {
2028 	if (!macb_is_gem(bp))
2029 		return 0;
2030 
2031 	switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
2032 	case 4:
2033 		return GEM_BF(DBW, GEM_DBW128);
2034 	case 2:
2035 		return GEM_BF(DBW, GEM_DBW64);
2036 	case 1:
2037 	default:
2038 		return GEM_BF(DBW, GEM_DBW32);
2039 	}
2040 }
2041 
2042 /* Configure the receive DMA engine
2043  * - use the correct receive buffer size
2044  * - set best burst length for DMA operations
2045  *   (if not supported by FIFO, it will fallback to default)
2046  * - set both rx/tx packet buffers to full memory size
2047  * These are configurable parameters for GEM.
2048  */
2049 static void macb_configure_dma(struct macb *bp)
2050 {
2051 	struct macb_queue *queue;
2052 	u32 buffer_size;
2053 	unsigned int q;
2054 	u32 dmacfg;
2055 
2056 	buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
2057 	if (macb_is_gem(bp)) {
2058 		dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
2059 		for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2060 			if (q)
2061 				queue_writel(queue, RBQS, buffer_size);
2062 			else
2063 				dmacfg |= GEM_BF(RXBS, buffer_size);
2064 		}
2065 		if (bp->dma_burst_length)
2066 			dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
2067 		dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
2068 		dmacfg &= ~GEM_BIT(ENDIA_PKT);
2069 
2070 		if (bp->native_io)
2071 			dmacfg &= ~GEM_BIT(ENDIA_DESC);
2072 		else
2073 			dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
2074 
2075 		if (bp->dev->features & NETIF_F_HW_CSUM)
2076 			dmacfg |= GEM_BIT(TXCOEN);
2077 		else
2078 			dmacfg &= ~GEM_BIT(TXCOEN);
2079 
2080 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2081 		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2082 			dmacfg |= GEM_BIT(ADDR64);
2083 #endif
2084 #ifdef CONFIG_MACB_USE_HWSTAMP
2085 		if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
2086 			dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
2087 #endif
2088 		netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
2089 			   dmacfg);
2090 		gem_writel(bp, DMACFG, dmacfg);
2091 	}
2092 }
2093 
2094 static void macb_init_hw(struct macb *bp)
2095 {
2096 	struct macb_queue *queue;
2097 	unsigned int q;
2098 
2099 	u32 config;
2100 
2101 	macb_reset_hw(bp);
2102 	macb_set_hwaddr(bp);
2103 
2104 	config = macb_mdc_clk_div(bp);
2105 	if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
2106 		config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
2107 	config |= MACB_BF(RBOF, NET_IP_ALIGN);	/* Make eth data aligned */
2108 	config |= MACB_BIT(PAE);		/* PAuse Enable */
2109 	config |= MACB_BIT(DRFCS);		/* Discard Rx FCS */
2110 	if (bp->caps & MACB_CAPS_JUMBO)
2111 		config |= MACB_BIT(JFRAME);	/* Enable jumbo frames */
2112 	else
2113 		config |= MACB_BIT(BIG);	/* Receive oversized frames */
2114 	if (bp->dev->flags & IFF_PROMISC)
2115 		config |= MACB_BIT(CAF);	/* Copy All Frames */
2116 	else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
2117 		config |= GEM_BIT(RXCOEN);
2118 	if (!(bp->dev->flags & IFF_BROADCAST))
2119 		config |= MACB_BIT(NBC);	/* No BroadCast */
2120 	config |= macb_dbw(bp);
2121 	macb_writel(bp, NCFGR, config);
2122 	if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
2123 		gem_writel(bp, JML, bp->jumbo_max_len);
2124 	bp->speed = SPEED_10;
2125 	bp->duplex = DUPLEX_HALF;
2126 	bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
2127 	if (bp->caps & MACB_CAPS_JUMBO)
2128 		bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
2129 
2130 	macb_configure_dma(bp);
2131 
2132 	/* Initialize TX and RX buffers */
2133 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2134 		queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
2135 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2136 		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2137 			queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
2138 #endif
2139 		queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
2140 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2141 		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2142 			queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
2143 #endif
2144 
2145 		/* Enable interrupts */
2146 		queue_writel(queue, IER,
2147 			     MACB_RX_INT_FLAGS |
2148 			     MACB_TX_INT_FLAGS |
2149 			     MACB_BIT(HRESP));
2150 	}
2151 
2152 	/* Enable TX and RX */
2153 	macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
2154 }
2155 
2156 /* The hash address register is 64 bits long and takes up two
2157  * locations in the memory map.  The least significant bits are stored
2158  * in EMAC_HSL and the most significant bits in EMAC_HSH.
2159  *
2160  * The unicast hash enable and the multicast hash enable bits in the
2161  * network configuration register enable the reception of hash matched
2162  * frames. The destination address is reduced to a 6 bit index into
2163  * the 64 bit hash register using the following hash function.  The
2164  * hash function is an exclusive or of every sixth bit of the
2165  * destination address.
2166  *
2167  * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
2168  * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
2169  * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
2170  * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
2171  * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
2172  * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
2173  *
2174  * da[0] represents the least significant bit of the first byte
2175  * received, that is, the multicast/unicast indicator, and da[47]
2176  * represents the most significant bit of the last byte received.  If
2177  * the hash index, hi[n], points to a bit that is set in the hash
2178  * register then the frame will be matched according to whether the
2179  * frame is multicast or unicast.  A multicast match will be signalled
2180  * if the multicast hash enable bit is set, da[0] is 1 and the hash
2181  * index points to a bit set in the hash register.  A unicast match
2182  * will be signalled if the unicast hash enable bit is set, da[0] is 0
2183  * and the hash index points to a bit set in the hash register.  To
2184  * receive all multicast frames, the hash register should be set with
2185  * all ones and the multicast hash enable bit should be set in the
2186  * network configuration register.
2187  */
2188 
2189 static inline int hash_bit_value(int bitnr, __u8 *addr)
2190 {
2191 	if (addr[bitnr / 8] & (1 << (bitnr % 8)))
2192 		return 1;
2193 	return 0;
2194 }
2195 
2196 /* Return the hash index value for the specified address. */
2197 static int hash_get_index(__u8 *addr)
2198 {
2199 	int i, j, bitval;
2200 	int hash_index = 0;
2201 
2202 	for (j = 0; j < 6; j++) {
2203 		for (i = 0, bitval = 0; i < 8; i++)
2204 			bitval ^= hash_bit_value(i * 6 + j, addr);
2205 
2206 		hash_index |= (bitval << j);
2207 	}
2208 
2209 	return hash_index;
2210 }
2211 
2212 /* Add multicast addresses to the internal multicast-hash table. */
2213 static void macb_sethashtable(struct net_device *dev)
2214 {
2215 	struct netdev_hw_addr *ha;
2216 	unsigned long mc_filter[2];
2217 	unsigned int bitnr;
2218 	struct macb *bp = netdev_priv(dev);
2219 
2220 	mc_filter[0] = 0;
2221 	mc_filter[1] = 0;
2222 
2223 	netdev_for_each_mc_addr(ha, dev) {
2224 		bitnr = hash_get_index(ha->addr);
2225 		mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
2226 	}
2227 
2228 	macb_or_gem_writel(bp, HRB, mc_filter[0]);
2229 	macb_or_gem_writel(bp, HRT, mc_filter[1]);
2230 }
2231 
2232 /* Enable/Disable promiscuous and multicast modes. */
2233 static void macb_set_rx_mode(struct net_device *dev)
2234 {
2235 	unsigned long cfg;
2236 	struct macb *bp = netdev_priv(dev);
2237 
2238 	cfg = macb_readl(bp, NCFGR);
2239 
2240 	if (dev->flags & IFF_PROMISC) {
2241 		/* Enable promiscuous mode */
2242 		cfg |= MACB_BIT(CAF);
2243 
2244 		/* Disable RX checksum offload */
2245 		if (macb_is_gem(bp))
2246 			cfg &= ~GEM_BIT(RXCOEN);
2247 	} else {
2248 		/* Disable promiscuous mode */
2249 		cfg &= ~MACB_BIT(CAF);
2250 
2251 		/* Enable RX checksum offload only if requested */
2252 		if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
2253 			cfg |= GEM_BIT(RXCOEN);
2254 	}
2255 
2256 	if (dev->flags & IFF_ALLMULTI) {
2257 		/* Enable all multicast mode */
2258 		macb_or_gem_writel(bp, HRB, -1);
2259 		macb_or_gem_writel(bp, HRT, -1);
2260 		cfg |= MACB_BIT(NCFGR_MTI);
2261 	} else if (!netdev_mc_empty(dev)) {
2262 		/* Enable specific multicasts */
2263 		macb_sethashtable(dev);
2264 		cfg |= MACB_BIT(NCFGR_MTI);
2265 	} else if (dev->flags & (~IFF_ALLMULTI)) {
2266 		/* Disable all multicast mode */
2267 		macb_or_gem_writel(bp, HRB, 0);
2268 		macb_or_gem_writel(bp, HRT, 0);
2269 		cfg &= ~MACB_BIT(NCFGR_MTI);
2270 	}
2271 
2272 	macb_writel(bp, NCFGR, cfg);
2273 }
2274 
2275 static int macb_open(struct net_device *dev)
2276 {
2277 	struct macb *bp = netdev_priv(dev);
2278 	size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
2279 	struct macb_queue *queue;
2280 	unsigned int q;
2281 	int err;
2282 
2283 	netdev_dbg(bp->dev, "open\n");
2284 
2285 	/* carrier starts down */
2286 	netif_carrier_off(dev);
2287 
2288 	/* if the phy is not yet register, retry later*/
2289 	if (!dev->phydev)
2290 		return -EAGAIN;
2291 
2292 	/* RX buffers initialization */
2293 	macb_init_rx_buffer_size(bp, bufsz);
2294 
2295 	err = macb_alloc_consistent(bp);
2296 	if (err) {
2297 		netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
2298 			   err);
2299 		return err;
2300 	}
2301 
2302 	bp->macbgem_ops.mog_init_rings(bp);
2303 	macb_init_hw(bp);
2304 
2305 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2306 		napi_enable(&queue->napi);
2307 
2308 	/* schedule a link state check */
2309 	phy_start(dev->phydev);
2310 
2311 	netif_tx_start_all_queues(dev);
2312 
2313 	if (bp->ptp_info)
2314 		bp->ptp_info->ptp_init(dev);
2315 
2316 	return 0;
2317 }
2318 
2319 static int macb_close(struct net_device *dev)
2320 {
2321 	struct macb *bp = netdev_priv(dev);
2322 	struct macb_queue *queue;
2323 	unsigned long flags;
2324 	unsigned int q;
2325 
2326 	netif_tx_stop_all_queues(dev);
2327 
2328 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2329 		napi_disable(&queue->napi);
2330 
2331 	if (dev->phydev)
2332 		phy_stop(dev->phydev);
2333 
2334 	spin_lock_irqsave(&bp->lock, flags);
2335 	macb_reset_hw(bp);
2336 	netif_carrier_off(dev);
2337 	spin_unlock_irqrestore(&bp->lock, flags);
2338 
2339 	macb_free_consistent(bp);
2340 
2341 	if (bp->ptp_info)
2342 		bp->ptp_info->ptp_remove(dev);
2343 
2344 	return 0;
2345 }
2346 
2347 static int macb_change_mtu(struct net_device *dev, int new_mtu)
2348 {
2349 	if (netif_running(dev))
2350 		return -EBUSY;
2351 
2352 	dev->mtu = new_mtu;
2353 
2354 	return 0;
2355 }
2356 
2357 static void gem_update_stats(struct macb *bp)
2358 {
2359 	struct macb_queue *queue;
2360 	unsigned int i, q, idx;
2361 	unsigned long *stat;
2362 
2363 	u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
2364 
2365 	for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
2366 		u32 offset = gem_statistics[i].offset;
2367 		u64 val = bp->macb_reg_readl(bp, offset);
2368 
2369 		bp->ethtool_stats[i] += val;
2370 		*p += val;
2371 
2372 		if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
2373 			/* Add GEM_OCTTXH, GEM_OCTRXH */
2374 			val = bp->macb_reg_readl(bp, offset + 4);
2375 			bp->ethtool_stats[i] += ((u64)val) << 32;
2376 			*(++p) += val;
2377 		}
2378 	}
2379 
2380 	idx = GEM_STATS_LEN;
2381 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2382 		for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
2383 			bp->ethtool_stats[idx++] = *stat;
2384 }
2385 
2386 static struct net_device_stats *gem_get_stats(struct macb *bp)
2387 {
2388 	struct gem_stats *hwstat = &bp->hw_stats.gem;
2389 	struct net_device_stats *nstat = &bp->dev->stats;
2390 
2391 	gem_update_stats(bp);
2392 
2393 	nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
2394 			    hwstat->rx_alignment_errors +
2395 			    hwstat->rx_resource_errors +
2396 			    hwstat->rx_overruns +
2397 			    hwstat->rx_oversize_frames +
2398 			    hwstat->rx_jabbers +
2399 			    hwstat->rx_undersized_frames +
2400 			    hwstat->rx_length_field_frame_errors);
2401 	nstat->tx_errors = (hwstat->tx_late_collisions +
2402 			    hwstat->tx_excessive_collisions +
2403 			    hwstat->tx_underrun +
2404 			    hwstat->tx_carrier_sense_errors);
2405 	nstat->multicast = hwstat->rx_multicast_frames;
2406 	nstat->collisions = (hwstat->tx_single_collision_frames +
2407 			     hwstat->tx_multiple_collision_frames +
2408 			     hwstat->tx_excessive_collisions);
2409 	nstat->rx_length_errors = (hwstat->rx_oversize_frames +
2410 				   hwstat->rx_jabbers +
2411 				   hwstat->rx_undersized_frames +
2412 				   hwstat->rx_length_field_frame_errors);
2413 	nstat->rx_over_errors = hwstat->rx_resource_errors;
2414 	nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
2415 	nstat->rx_frame_errors = hwstat->rx_alignment_errors;
2416 	nstat->rx_fifo_errors = hwstat->rx_overruns;
2417 	nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
2418 	nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
2419 	nstat->tx_fifo_errors = hwstat->tx_underrun;
2420 
2421 	return nstat;
2422 }
2423 
2424 static void gem_get_ethtool_stats(struct net_device *dev,
2425 				  struct ethtool_stats *stats, u64 *data)
2426 {
2427 	struct macb *bp;
2428 
2429 	bp = netdev_priv(dev);
2430 	gem_update_stats(bp);
2431 	memcpy(data, &bp->ethtool_stats, sizeof(u64)
2432 			* (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
2433 }
2434 
2435 static int gem_get_sset_count(struct net_device *dev, int sset)
2436 {
2437 	struct macb *bp = netdev_priv(dev);
2438 
2439 	switch (sset) {
2440 	case ETH_SS_STATS:
2441 		return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
2442 	default:
2443 		return -EOPNOTSUPP;
2444 	}
2445 }
2446 
2447 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
2448 {
2449 	char stat_string[ETH_GSTRING_LEN];
2450 	struct macb *bp = netdev_priv(dev);
2451 	struct macb_queue *queue;
2452 	unsigned int i;
2453 	unsigned int q;
2454 
2455 	switch (sset) {
2456 	case ETH_SS_STATS:
2457 		for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
2458 			memcpy(p, gem_statistics[i].stat_string,
2459 			       ETH_GSTRING_LEN);
2460 
2461 		for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2462 			for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
2463 				snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
2464 						q, queue_statistics[i].stat_string);
2465 				memcpy(p, stat_string, ETH_GSTRING_LEN);
2466 			}
2467 		}
2468 		break;
2469 	}
2470 }
2471 
2472 static struct net_device_stats *macb_get_stats(struct net_device *dev)
2473 {
2474 	struct macb *bp = netdev_priv(dev);
2475 	struct net_device_stats *nstat = &bp->dev->stats;
2476 	struct macb_stats *hwstat = &bp->hw_stats.macb;
2477 
2478 	if (macb_is_gem(bp))
2479 		return gem_get_stats(bp);
2480 
2481 	/* read stats from hardware */
2482 	macb_update_stats(bp);
2483 
2484 	/* Convert HW stats into netdevice stats */
2485 	nstat->rx_errors = (hwstat->rx_fcs_errors +
2486 			    hwstat->rx_align_errors +
2487 			    hwstat->rx_resource_errors +
2488 			    hwstat->rx_overruns +
2489 			    hwstat->rx_oversize_pkts +
2490 			    hwstat->rx_jabbers +
2491 			    hwstat->rx_undersize_pkts +
2492 			    hwstat->rx_length_mismatch);
2493 	nstat->tx_errors = (hwstat->tx_late_cols +
2494 			    hwstat->tx_excessive_cols +
2495 			    hwstat->tx_underruns +
2496 			    hwstat->tx_carrier_errors +
2497 			    hwstat->sqe_test_errors);
2498 	nstat->collisions = (hwstat->tx_single_cols +
2499 			     hwstat->tx_multiple_cols +
2500 			     hwstat->tx_excessive_cols);
2501 	nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
2502 				   hwstat->rx_jabbers +
2503 				   hwstat->rx_undersize_pkts +
2504 				   hwstat->rx_length_mismatch);
2505 	nstat->rx_over_errors = hwstat->rx_resource_errors +
2506 				   hwstat->rx_overruns;
2507 	nstat->rx_crc_errors = hwstat->rx_fcs_errors;
2508 	nstat->rx_frame_errors = hwstat->rx_align_errors;
2509 	nstat->rx_fifo_errors = hwstat->rx_overruns;
2510 	/* XXX: What does "missed" mean? */
2511 	nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
2512 	nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
2513 	nstat->tx_fifo_errors = hwstat->tx_underruns;
2514 	/* Don't know about heartbeat or window errors... */
2515 
2516 	return nstat;
2517 }
2518 
2519 static int macb_get_regs_len(struct net_device *netdev)
2520 {
2521 	return MACB_GREGS_NBR * sizeof(u32);
2522 }
2523 
2524 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2525 			  void *p)
2526 {
2527 	struct macb *bp = netdev_priv(dev);
2528 	unsigned int tail, head;
2529 	u32 *regs_buff = p;
2530 
2531 	regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
2532 			| MACB_GREGS_VERSION;
2533 
2534 	tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
2535 	head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
2536 
2537 	regs_buff[0]  = macb_readl(bp, NCR);
2538 	regs_buff[1]  = macb_or_gem_readl(bp, NCFGR);
2539 	regs_buff[2]  = macb_readl(bp, NSR);
2540 	regs_buff[3]  = macb_readl(bp, TSR);
2541 	regs_buff[4]  = macb_readl(bp, RBQP);
2542 	regs_buff[5]  = macb_readl(bp, TBQP);
2543 	regs_buff[6]  = macb_readl(bp, RSR);
2544 	regs_buff[7]  = macb_readl(bp, IMR);
2545 
2546 	regs_buff[8]  = tail;
2547 	regs_buff[9]  = head;
2548 	regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
2549 	regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
2550 
2551 	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
2552 		regs_buff[12] = macb_or_gem_readl(bp, USRIO);
2553 	if (macb_is_gem(bp))
2554 		regs_buff[13] = gem_readl(bp, DMACFG);
2555 }
2556 
2557 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2558 {
2559 	struct macb *bp = netdev_priv(netdev);
2560 
2561 	wol->supported = 0;
2562 	wol->wolopts = 0;
2563 
2564 	if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
2565 		wol->supported = WAKE_MAGIC;
2566 
2567 		if (bp->wol & MACB_WOL_ENABLED)
2568 			wol->wolopts |= WAKE_MAGIC;
2569 	}
2570 }
2571 
2572 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2573 {
2574 	struct macb *bp = netdev_priv(netdev);
2575 
2576 	if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
2577 	    (wol->wolopts & ~WAKE_MAGIC))
2578 		return -EOPNOTSUPP;
2579 
2580 	if (wol->wolopts & WAKE_MAGIC)
2581 		bp->wol |= MACB_WOL_ENABLED;
2582 	else
2583 		bp->wol &= ~MACB_WOL_ENABLED;
2584 
2585 	device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
2586 
2587 	return 0;
2588 }
2589 
2590 static void macb_get_ringparam(struct net_device *netdev,
2591 			       struct ethtool_ringparam *ring)
2592 {
2593 	struct macb *bp = netdev_priv(netdev);
2594 
2595 	ring->rx_max_pending = MAX_RX_RING_SIZE;
2596 	ring->tx_max_pending = MAX_TX_RING_SIZE;
2597 
2598 	ring->rx_pending = bp->rx_ring_size;
2599 	ring->tx_pending = bp->tx_ring_size;
2600 }
2601 
2602 static int macb_set_ringparam(struct net_device *netdev,
2603 			      struct ethtool_ringparam *ring)
2604 {
2605 	struct macb *bp = netdev_priv(netdev);
2606 	u32 new_rx_size, new_tx_size;
2607 	unsigned int reset = 0;
2608 
2609 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2610 		return -EINVAL;
2611 
2612 	new_rx_size = clamp_t(u32, ring->rx_pending,
2613 			      MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
2614 	new_rx_size = roundup_pow_of_two(new_rx_size);
2615 
2616 	new_tx_size = clamp_t(u32, ring->tx_pending,
2617 			      MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
2618 	new_tx_size = roundup_pow_of_two(new_tx_size);
2619 
2620 	if ((new_tx_size == bp->tx_ring_size) &&
2621 	    (new_rx_size == bp->rx_ring_size)) {
2622 		/* nothing to do */
2623 		return 0;
2624 	}
2625 
2626 	if (netif_running(bp->dev)) {
2627 		reset = 1;
2628 		macb_close(bp->dev);
2629 	}
2630 
2631 	bp->rx_ring_size = new_rx_size;
2632 	bp->tx_ring_size = new_tx_size;
2633 
2634 	if (reset)
2635 		macb_open(bp->dev);
2636 
2637 	return 0;
2638 }
2639 
2640 #ifdef CONFIG_MACB_USE_HWSTAMP
2641 static unsigned int gem_get_tsu_rate(struct macb *bp)
2642 {
2643 	struct clk *tsu_clk;
2644 	unsigned int tsu_rate;
2645 
2646 	tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
2647 	if (!IS_ERR(tsu_clk))
2648 		tsu_rate = clk_get_rate(tsu_clk);
2649 	/* try pclk instead */
2650 	else if (!IS_ERR(bp->pclk)) {
2651 		tsu_clk = bp->pclk;
2652 		tsu_rate = clk_get_rate(tsu_clk);
2653 	} else
2654 		return -ENOTSUPP;
2655 	return tsu_rate;
2656 }
2657 
2658 static s32 gem_get_ptp_max_adj(void)
2659 {
2660 	return 64000000;
2661 }
2662 
2663 static int gem_get_ts_info(struct net_device *dev,
2664 			   struct ethtool_ts_info *info)
2665 {
2666 	struct macb *bp = netdev_priv(dev);
2667 
2668 	if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
2669 		ethtool_op_get_ts_info(dev, info);
2670 		return 0;
2671 	}
2672 
2673 	info->so_timestamping =
2674 		SOF_TIMESTAMPING_TX_SOFTWARE |
2675 		SOF_TIMESTAMPING_RX_SOFTWARE |
2676 		SOF_TIMESTAMPING_SOFTWARE |
2677 		SOF_TIMESTAMPING_TX_HARDWARE |
2678 		SOF_TIMESTAMPING_RX_HARDWARE |
2679 		SOF_TIMESTAMPING_RAW_HARDWARE;
2680 	info->tx_types =
2681 		(1 << HWTSTAMP_TX_ONESTEP_SYNC) |
2682 		(1 << HWTSTAMP_TX_OFF) |
2683 		(1 << HWTSTAMP_TX_ON);
2684 	info->rx_filters =
2685 		(1 << HWTSTAMP_FILTER_NONE) |
2686 		(1 << HWTSTAMP_FILTER_ALL);
2687 
2688 	info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
2689 
2690 	return 0;
2691 }
2692 
2693 static struct macb_ptp_info gem_ptp_info = {
2694 	.ptp_init	 = gem_ptp_init,
2695 	.ptp_remove	 = gem_ptp_remove,
2696 	.get_ptp_max_adj = gem_get_ptp_max_adj,
2697 	.get_tsu_rate	 = gem_get_tsu_rate,
2698 	.get_ts_info	 = gem_get_ts_info,
2699 	.get_hwtst	 = gem_get_hwtst,
2700 	.set_hwtst	 = gem_set_hwtst,
2701 };
2702 #endif
2703 
2704 static int macb_get_ts_info(struct net_device *netdev,
2705 			    struct ethtool_ts_info *info)
2706 {
2707 	struct macb *bp = netdev_priv(netdev);
2708 
2709 	if (bp->ptp_info)
2710 		return bp->ptp_info->get_ts_info(netdev, info);
2711 
2712 	return ethtool_op_get_ts_info(netdev, info);
2713 }
2714 
2715 static void gem_enable_flow_filters(struct macb *bp, bool enable)
2716 {
2717 	struct ethtool_rx_fs_item *item;
2718 	u32 t2_scr;
2719 	int num_t2_scr;
2720 
2721 	num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
2722 
2723 	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2724 		struct ethtool_rx_flow_spec *fs = &item->fs;
2725 		struct ethtool_tcpip4_spec *tp4sp_m;
2726 
2727 		if (fs->location >= num_t2_scr)
2728 			continue;
2729 
2730 		t2_scr = gem_readl_n(bp, SCRT2, fs->location);
2731 
2732 		/* enable/disable screener regs for the flow entry */
2733 		t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
2734 
2735 		/* only enable fields with no masking */
2736 		tp4sp_m = &(fs->m_u.tcp_ip4_spec);
2737 
2738 		if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
2739 			t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
2740 		else
2741 			t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
2742 
2743 		if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
2744 			t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
2745 		else
2746 			t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
2747 
2748 		if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
2749 			t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
2750 		else
2751 			t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
2752 
2753 		gem_writel_n(bp, SCRT2, fs->location, t2_scr);
2754 	}
2755 }
2756 
2757 static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
2758 {
2759 	struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
2760 	uint16_t index = fs->location;
2761 	u32 w0, w1, t2_scr;
2762 	bool cmp_a = false;
2763 	bool cmp_b = false;
2764 	bool cmp_c = false;
2765 
2766 	tp4sp_v = &(fs->h_u.tcp_ip4_spec);
2767 	tp4sp_m = &(fs->m_u.tcp_ip4_spec);
2768 
2769 	/* ignore field if any masking set */
2770 	if (tp4sp_m->ip4src == 0xFFFFFFFF) {
2771 		/* 1st compare reg - IP source address */
2772 		w0 = 0;
2773 		w1 = 0;
2774 		w0 = tp4sp_v->ip4src;
2775 		w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
2776 		w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
2777 		w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
2778 		gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
2779 		gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
2780 		cmp_a = true;
2781 	}
2782 
2783 	/* ignore field if any masking set */
2784 	if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
2785 		/* 2nd compare reg - IP destination address */
2786 		w0 = 0;
2787 		w1 = 0;
2788 		w0 = tp4sp_v->ip4dst;
2789 		w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
2790 		w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
2791 		w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
2792 		gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
2793 		gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
2794 		cmp_b = true;
2795 	}
2796 
2797 	/* ignore both port fields if masking set in both */
2798 	if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
2799 		/* 3rd compare reg - source port, destination port */
2800 		w0 = 0;
2801 		w1 = 0;
2802 		w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
2803 		if (tp4sp_m->psrc == tp4sp_m->pdst) {
2804 			w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
2805 			w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
2806 			w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
2807 			w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
2808 		} else {
2809 			/* only one port definition */
2810 			w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */
2811 			w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
2812 			if (tp4sp_m->psrc == 0xFFFF) { /* src port */
2813 				w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
2814 				w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
2815 			} else { /* dst port */
2816 				w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
2817 				w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
2818 			}
2819 		}
2820 		gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
2821 		gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
2822 		cmp_c = true;
2823 	}
2824 
2825 	t2_scr = 0;
2826 	t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
2827 	t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
2828 	if (cmp_a)
2829 		t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
2830 	if (cmp_b)
2831 		t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
2832 	if (cmp_c)
2833 		t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
2834 	gem_writel_n(bp, SCRT2, index, t2_scr);
2835 }
2836 
2837 static int gem_add_flow_filter(struct net_device *netdev,
2838 		struct ethtool_rxnfc *cmd)
2839 {
2840 	struct macb *bp = netdev_priv(netdev);
2841 	struct ethtool_rx_flow_spec *fs = &cmd->fs;
2842 	struct ethtool_rx_fs_item *item, *newfs;
2843 	unsigned long flags;
2844 	int ret = -EINVAL;
2845 	bool added = false;
2846 
2847 	newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
2848 	if (newfs == NULL)
2849 		return -ENOMEM;
2850 	memcpy(&newfs->fs, fs, sizeof(newfs->fs));
2851 
2852 	netdev_dbg(netdev,
2853 			"Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
2854 			fs->flow_type, (int)fs->ring_cookie, fs->location,
2855 			htonl(fs->h_u.tcp_ip4_spec.ip4src),
2856 			htonl(fs->h_u.tcp_ip4_spec.ip4dst),
2857 			htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
2858 
2859 	spin_lock_irqsave(&bp->rx_fs_lock, flags);
2860 
2861 	/* find correct place to add in list */
2862 	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2863 		if (item->fs.location > newfs->fs.location) {
2864 			list_add_tail(&newfs->list, &item->list);
2865 			added = true;
2866 			break;
2867 		} else if (item->fs.location == fs->location) {
2868 			netdev_err(netdev, "Rule not added: location %d not free!\n",
2869 					fs->location);
2870 			ret = -EBUSY;
2871 			goto err;
2872 		}
2873 	}
2874 	if (!added)
2875 		list_add_tail(&newfs->list, &bp->rx_fs_list.list);
2876 
2877 	gem_prog_cmp_regs(bp, fs);
2878 	bp->rx_fs_list.count++;
2879 	/* enable filtering if NTUPLE on */
2880 	if (netdev->features & NETIF_F_NTUPLE)
2881 		gem_enable_flow_filters(bp, 1);
2882 
2883 	spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
2884 	return 0;
2885 
2886 err:
2887 	spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
2888 	kfree(newfs);
2889 	return ret;
2890 }
2891 
2892 static int gem_del_flow_filter(struct net_device *netdev,
2893 		struct ethtool_rxnfc *cmd)
2894 {
2895 	struct macb *bp = netdev_priv(netdev);
2896 	struct ethtool_rx_fs_item *item;
2897 	struct ethtool_rx_flow_spec *fs;
2898 	unsigned long flags;
2899 
2900 	spin_lock_irqsave(&bp->rx_fs_lock, flags);
2901 
2902 	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2903 		if (item->fs.location == cmd->fs.location) {
2904 			/* disable screener regs for the flow entry */
2905 			fs = &(item->fs);
2906 			netdev_dbg(netdev,
2907 					"Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
2908 					fs->flow_type, (int)fs->ring_cookie, fs->location,
2909 					htonl(fs->h_u.tcp_ip4_spec.ip4src),
2910 					htonl(fs->h_u.tcp_ip4_spec.ip4dst),
2911 					htons(fs->h_u.tcp_ip4_spec.psrc),
2912 					htons(fs->h_u.tcp_ip4_spec.pdst));
2913 
2914 			gem_writel_n(bp, SCRT2, fs->location, 0);
2915 
2916 			list_del(&item->list);
2917 			bp->rx_fs_list.count--;
2918 			spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
2919 			kfree(item);
2920 			return 0;
2921 		}
2922 	}
2923 
2924 	spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
2925 	return -EINVAL;
2926 }
2927 
2928 static int gem_get_flow_entry(struct net_device *netdev,
2929 		struct ethtool_rxnfc *cmd)
2930 {
2931 	struct macb *bp = netdev_priv(netdev);
2932 	struct ethtool_rx_fs_item *item;
2933 
2934 	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2935 		if (item->fs.location == cmd->fs.location) {
2936 			memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
2937 			return 0;
2938 		}
2939 	}
2940 	return -EINVAL;
2941 }
2942 
2943 static int gem_get_all_flow_entries(struct net_device *netdev,
2944 		struct ethtool_rxnfc *cmd, u32 *rule_locs)
2945 {
2946 	struct macb *bp = netdev_priv(netdev);
2947 	struct ethtool_rx_fs_item *item;
2948 	uint32_t cnt = 0;
2949 
2950 	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2951 		if (cnt == cmd->rule_cnt)
2952 			return -EMSGSIZE;
2953 		rule_locs[cnt] = item->fs.location;
2954 		cnt++;
2955 	}
2956 	cmd->data = bp->max_tuples;
2957 	cmd->rule_cnt = cnt;
2958 
2959 	return 0;
2960 }
2961 
2962 static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
2963 		u32 *rule_locs)
2964 {
2965 	struct macb *bp = netdev_priv(netdev);
2966 	int ret = 0;
2967 
2968 	switch (cmd->cmd) {
2969 	case ETHTOOL_GRXRINGS:
2970 		cmd->data = bp->num_queues;
2971 		break;
2972 	case ETHTOOL_GRXCLSRLCNT:
2973 		cmd->rule_cnt = bp->rx_fs_list.count;
2974 		break;
2975 	case ETHTOOL_GRXCLSRULE:
2976 		ret = gem_get_flow_entry(netdev, cmd);
2977 		break;
2978 	case ETHTOOL_GRXCLSRLALL:
2979 		ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
2980 		break;
2981 	default:
2982 		netdev_err(netdev,
2983 			  "Command parameter %d is not supported\n", cmd->cmd);
2984 		ret = -EOPNOTSUPP;
2985 	}
2986 
2987 	return ret;
2988 }
2989 
2990 static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
2991 {
2992 	struct macb *bp = netdev_priv(netdev);
2993 	int ret;
2994 
2995 	switch (cmd->cmd) {
2996 	case ETHTOOL_SRXCLSRLINS:
2997 		if ((cmd->fs.location >= bp->max_tuples)
2998 				|| (cmd->fs.ring_cookie >= bp->num_queues)) {
2999 			ret = -EINVAL;
3000 			break;
3001 		}
3002 		ret = gem_add_flow_filter(netdev, cmd);
3003 		break;
3004 	case ETHTOOL_SRXCLSRLDEL:
3005 		ret = gem_del_flow_filter(netdev, cmd);
3006 		break;
3007 	default:
3008 		netdev_err(netdev,
3009 			  "Command parameter %d is not supported\n", cmd->cmd);
3010 		ret = -EOPNOTSUPP;
3011 	}
3012 
3013 	return ret;
3014 }
3015 
3016 static const struct ethtool_ops macb_ethtool_ops = {
3017 	.get_regs_len		= macb_get_regs_len,
3018 	.get_regs		= macb_get_regs,
3019 	.get_link		= ethtool_op_get_link,
3020 	.get_ts_info		= ethtool_op_get_ts_info,
3021 	.get_wol		= macb_get_wol,
3022 	.set_wol		= macb_set_wol,
3023 	.get_link_ksettings     = phy_ethtool_get_link_ksettings,
3024 	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
3025 	.get_ringparam		= macb_get_ringparam,
3026 	.set_ringparam		= macb_set_ringparam,
3027 };
3028 
3029 static const struct ethtool_ops gem_ethtool_ops = {
3030 	.get_regs_len		= macb_get_regs_len,
3031 	.get_regs		= macb_get_regs,
3032 	.get_link		= ethtool_op_get_link,
3033 	.get_ts_info		= macb_get_ts_info,
3034 	.get_ethtool_stats	= gem_get_ethtool_stats,
3035 	.get_strings		= gem_get_ethtool_strings,
3036 	.get_sset_count		= gem_get_sset_count,
3037 	.get_link_ksettings     = phy_ethtool_get_link_ksettings,
3038 	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
3039 	.get_ringparam		= macb_get_ringparam,
3040 	.set_ringparam		= macb_set_ringparam,
3041 	.get_rxnfc			= gem_get_rxnfc,
3042 	.set_rxnfc			= gem_set_rxnfc,
3043 };
3044 
3045 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3046 {
3047 	struct phy_device *phydev = dev->phydev;
3048 	struct macb *bp = netdev_priv(dev);
3049 
3050 	if (!netif_running(dev))
3051 		return -EINVAL;
3052 
3053 	if (!phydev)
3054 		return -ENODEV;
3055 
3056 	if (!bp->ptp_info)
3057 		return phy_mii_ioctl(phydev, rq, cmd);
3058 
3059 	switch (cmd) {
3060 	case SIOCSHWTSTAMP:
3061 		return bp->ptp_info->set_hwtst(dev, rq, cmd);
3062 	case SIOCGHWTSTAMP:
3063 		return bp->ptp_info->get_hwtst(dev, rq);
3064 	default:
3065 		return phy_mii_ioctl(phydev, rq, cmd);
3066 	}
3067 }
3068 
3069 static int macb_set_features(struct net_device *netdev,
3070 			     netdev_features_t features)
3071 {
3072 	struct macb *bp = netdev_priv(netdev);
3073 	netdev_features_t changed = features ^ netdev->features;
3074 
3075 	/* TX checksum offload */
3076 	if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
3077 		u32 dmacfg;
3078 
3079 		dmacfg = gem_readl(bp, DMACFG);
3080 		if (features & NETIF_F_HW_CSUM)
3081 			dmacfg |= GEM_BIT(TXCOEN);
3082 		else
3083 			dmacfg &= ~GEM_BIT(TXCOEN);
3084 		gem_writel(bp, DMACFG, dmacfg);
3085 	}
3086 
3087 	/* RX checksum offload */
3088 	if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
3089 		u32 netcfg;
3090 
3091 		netcfg = gem_readl(bp, NCFGR);
3092 		if (features & NETIF_F_RXCSUM &&
3093 		    !(netdev->flags & IFF_PROMISC))
3094 			netcfg |= GEM_BIT(RXCOEN);
3095 		else
3096 			netcfg &= ~GEM_BIT(RXCOEN);
3097 		gem_writel(bp, NCFGR, netcfg);
3098 	}
3099 
3100 	/* RX Flow Filters */
3101 	if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) {
3102 		bool turn_on = features & NETIF_F_NTUPLE;
3103 
3104 		gem_enable_flow_filters(bp, turn_on);
3105 	}
3106 	return 0;
3107 }
3108 
3109 static const struct net_device_ops macb_netdev_ops = {
3110 	.ndo_open		= macb_open,
3111 	.ndo_stop		= macb_close,
3112 	.ndo_start_xmit		= macb_start_xmit,
3113 	.ndo_set_rx_mode	= macb_set_rx_mode,
3114 	.ndo_get_stats		= macb_get_stats,
3115 	.ndo_do_ioctl		= macb_ioctl,
3116 	.ndo_validate_addr	= eth_validate_addr,
3117 	.ndo_change_mtu		= macb_change_mtu,
3118 	.ndo_set_mac_address	= eth_mac_addr,
3119 #ifdef CONFIG_NET_POLL_CONTROLLER
3120 	.ndo_poll_controller	= macb_poll_controller,
3121 #endif
3122 	.ndo_set_features	= macb_set_features,
3123 	.ndo_features_check	= macb_features_check,
3124 };
3125 
3126 /* Configure peripheral capabilities according to device tree
3127  * and integration options used
3128  */
3129 static void macb_configure_caps(struct macb *bp,
3130 				const struct macb_config *dt_conf)
3131 {
3132 	u32 dcfg;
3133 
3134 	if (dt_conf)
3135 		bp->caps = dt_conf->caps;
3136 
3137 	if (hw_is_gem(bp->regs, bp->native_io)) {
3138 		bp->caps |= MACB_CAPS_MACB_IS_GEM;
3139 
3140 		dcfg = gem_readl(bp, DCFG1);
3141 		if (GEM_BFEXT(IRQCOR, dcfg) == 0)
3142 			bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
3143 		dcfg = gem_readl(bp, DCFG2);
3144 		if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
3145 			bp->caps |= MACB_CAPS_FIFO_MODE;
3146 #ifdef CONFIG_MACB_USE_HWSTAMP
3147 		if (gem_has_ptp(bp)) {
3148 			if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
3149 				pr_err("GEM doesn't support hardware ptp.\n");
3150 			else {
3151 				bp->hw_dma_cap |= HW_DMA_CAP_PTP;
3152 				bp->ptp_info = &gem_ptp_info;
3153 			}
3154 		}
3155 #endif
3156 	}
3157 
3158 	dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
3159 }
3160 
3161 static void macb_probe_queues(void __iomem *mem,
3162 			      bool native_io,
3163 			      unsigned int *queue_mask,
3164 			      unsigned int *num_queues)
3165 {
3166 	unsigned int hw_q;
3167 
3168 	*queue_mask = 0x1;
3169 	*num_queues = 1;
3170 
3171 	/* is it macb or gem ?
3172 	 *
3173 	 * We need to read directly from the hardware here because
3174 	 * we are early in the probe process and don't have the
3175 	 * MACB_CAPS_MACB_IS_GEM flag positioned
3176 	 */
3177 	if (!hw_is_gem(mem, native_io))
3178 		return;
3179 
3180 	/* bit 0 is never set but queue 0 always exists */
3181 	*queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
3182 
3183 	*queue_mask |= 0x1;
3184 
3185 	for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
3186 		if (*queue_mask & (1 << hw_q))
3187 			(*num_queues)++;
3188 }
3189 
3190 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
3191 			 struct clk **hclk, struct clk **tx_clk,
3192 			 struct clk **rx_clk)
3193 {
3194 	struct macb_platform_data *pdata;
3195 	int err;
3196 
3197 	pdata = dev_get_platdata(&pdev->dev);
3198 	if (pdata) {
3199 		*pclk = pdata->pclk;
3200 		*hclk = pdata->hclk;
3201 	} else {
3202 		*pclk = devm_clk_get(&pdev->dev, "pclk");
3203 		*hclk = devm_clk_get(&pdev->dev, "hclk");
3204 	}
3205 
3206 	if (IS_ERR(*pclk)) {
3207 		err = PTR_ERR(*pclk);
3208 		dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
3209 		return err;
3210 	}
3211 
3212 	if (IS_ERR(*hclk)) {
3213 		err = PTR_ERR(*hclk);
3214 		dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
3215 		return err;
3216 	}
3217 
3218 	*tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
3219 	if (IS_ERR(*tx_clk))
3220 		*tx_clk = NULL;
3221 
3222 	*rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
3223 	if (IS_ERR(*rx_clk))
3224 		*rx_clk = NULL;
3225 
3226 	err = clk_prepare_enable(*pclk);
3227 	if (err) {
3228 		dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
3229 		return err;
3230 	}
3231 
3232 	err = clk_prepare_enable(*hclk);
3233 	if (err) {
3234 		dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
3235 		goto err_disable_pclk;
3236 	}
3237 
3238 	err = clk_prepare_enable(*tx_clk);
3239 	if (err) {
3240 		dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
3241 		goto err_disable_hclk;
3242 	}
3243 
3244 	err = clk_prepare_enable(*rx_clk);
3245 	if (err) {
3246 		dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
3247 		goto err_disable_txclk;
3248 	}
3249 
3250 	return 0;
3251 
3252 err_disable_txclk:
3253 	clk_disable_unprepare(*tx_clk);
3254 
3255 err_disable_hclk:
3256 	clk_disable_unprepare(*hclk);
3257 
3258 err_disable_pclk:
3259 	clk_disable_unprepare(*pclk);
3260 
3261 	return err;
3262 }
3263 
3264 static int macb_init(struct platform_device *pdev)
3265 {
3266 	struct net_device *dev = platform_get_drvdata(pdev);
3267 	unsigned int hw_q, q;
3268 	struct macb *bp = netdev_priv(dev);
3269 	struct macb_queue *queue;
3270 	int err;
3271 	u32 val, reg;
3272 
3273 	bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
3274 	bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
3275 
3276 	/* set the queue register mapping once for all: queue0 has a special
3277 	 * register mapping but we don't want to test the queue index then
3278 	 * compute the corresponding register offset at run time.
3279 	 */
3280 	for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
3281 		if (!(bp->queue_mask & (1 << hw_q)))
3282 			continue;
3283 
3284 		queue = &bp->queues[q];
3285 		queue->bp = bp;
3286 		netif_napi_add(dev, &queue->napi, macb_poll, 64);
3287 		if (hw_q) {
3288 			queue->ISR  = GEM_ISR(hw_q - 1);
3289 			queue->IER  = GEM_IER(hw_q - 1);
3290 			queue->IDR  = GEM_IDR(hw_q - 1);
3291 			queue->IMR  = GEM_IMR(hw_q - 1);
3292 			queue->TBQP = GEM_TBQP(hw_q - 1);
3293 			queue->RBQP = GEM_RBQP(hw_q - 1);
3294 			queue->RBQS = GEM_RBQS(hw_q - 1);
3295 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3296 			if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
3297 				queue->TBQPH = GEM_TBQPH(hw_q - 1);
3298 				queue->RBQPH = GEM_RBQPH(hw_q - 1);
3299 			}
3300 #endif
3301 		} else {
3302 			/* queue0 uses legacy registers */
3303 			queue->ISR  = MACB_ISR;
3304 			queue->IER  = MACB_IER;
3305 			queue->IDR  = MACB_IDR;
3306 			queue->IMR  = MACB_IMR;
3307 			queue->TBQP = MACB_TBQP;
3308 			queue->RBQP = MACB_RBQP;
3309 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3310 			if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
3311 				queue->TBQPH = MACB_TBQPH;
3312 				queue->RBQPH = MACB_RBQPH;
3313 			}
3314 #endif
3315 		}
3316 
3317 		/* get irq: here we use the linux queue index, not the hardware
3318 		 * queue index. the queue irq definitions in the device tree
3319 		 * must remove the optional gaps that could exist in the
3320 		 * hardware queue mask.
3321 		 */
3322 		queue->irq = platform_get_irq(pdev, q);
3323 		err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
3324 				       IRQF_SHARED, dev->name, queue);
3325 		if (err) {
3326 			dev_err(&pdev->dev,
3327 				"Unable to request IRQ %d (error %d)\n",
3328 				queue->irq, err);
3329 			return err;
3330 		}
3331 
3332 		INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
3333 		q++;
3334 	}
3335 
3336 	dev->netdev_ops = &macb_netdev_ops;
3337 
3338 	/* setup appropriated routines according to adapter type */
3339 	if (macb_is_gem(bp)) {
3340 		bp->max_tx_length = GEM_MAX_TX_LEN;
3341 		bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
3342 		bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
3343 		bp->macbgem_ops.mog_init_rings = gem_init_rings;
3344 		bp->macbgem_ops.mog_rx = gem_rx;
3345 		dev->ethtool_ops = &gem_ethtool_ops;
3346 	} else {
3347 		bp->max_tx_length = MACB_MAX_TX_LEN;
3348 		bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
3349 		bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
3350 		bp->macbgem_ops.mog_init_rings = macb_init_rings;
3351 		bp->macbgem_ops.mog_rx = macb_rx;
3352 		dev->ethtool_ops = &macb_ethtool_ops;
3353 	}
3354 
3355 	/* Set features */
3356 	dev->hw_features = NETIF_F_SG;
3357 
3358 	/* Check LSO capability */
3359 	if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
3360 		dev->hw_features |= MACB_NETIF_LSO;
3361 
3362 	/* Checksum offload is only available on gem with packet buffer */
3363 	if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
3364 		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
3365 	if (bp->caps & MACB_CAPS_SG_DISABLED)
3366 		dev->hw_features &= ~NETIF_F_SG;
3367 	dev->features = dev->hw_features;
3368 
3369 	/* Check RX Flow Filters support.
3370 	 * Max Rx flows set by availability of screeners & compare regs:
3371 	 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs
3372 	 */
3373 	reg = gem_readl(bp, DCFG8);
3374 	bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
3375 			GEM_BFEXT(T2SCR, reg));
3376 	if (bp->max_tuples > 0) {
3377 		/* also needs one ethtype match to check IPv4 */
3378 		if (GEM_BFEXT(SCR2ETH, reg) > 0) {
3379 			/* program this reg now */
3380 			reg = 0;
3381 			reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
3382 			gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
3383 			/* Filtering is supported in hw but don't enable it in kernel now */
3384 			dev->hw_features |= NETIF_F_NTUPLE;
3385 			/* init Rx flow definitions */
3386 			INIT_LIST_HEAD(&bp->rx_fs_list.list);
3387 			bp->rx_fs_list.count = 0;
3388 			spin_lock_init(&bp->rx_fs_lock);
3389 		} else
3390 			bp->max_tuples = 0;
3391 	}
3392 
3393 	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
3394 		val = 0;
3395 		if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
3396 			val = GEM_BIT(RGMII);
3397 		else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
3398 			 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
3399 			val = MACB_BIT(RMII);
3400 		else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
3401 			val = MACB_BIT(MII);
3402 
3403 		if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
3404 			val |= MACB_BIT(CLKEN);
3405 
3406 		macb_or_gem_writel(bp, USRIO, val);
3407 	}
3408 
3409 	/* Set MII management clock divider */
3410 	val = macb_mdc_clk_div(bp);
3411 	val |= macb_dbw(bp);
3412 	if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
3413 		val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
3414 	macb_writel(bp, NCFGR, val);
3415 
3416 	return 0;
3417 }
3418 
3419 #if defined(CONFIG_OF)
3420 /* 1518 rounded up */
3421 #define AT91ETHER_MAX_RBUFF_SZ	0x600
3422 /* max number of receive buffers */
3423 #define AT91ETHER_MAX_RX_DESCR	9
3424 
3425 /* Initialize and start the Receiver and Transmit subsystems */
3426 static int at91ether_start(struct net_device *dev)
3427 {
3428 	struct macb *lp = netdev_priv(dev);
3429 	struct macb_queue *q = &lp->queues[0];
3430 	struct macb_dma_desc *desc;
3431 	dma_addr_t addr;
3432 	u32 ctl;
3433 	int i;
3434 
3435 	q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
3436 					 (AT91ETHER_MAX_RX_DESCR *
3437 					  macb_dma_desc_get_size(lp)),
3438 					 &q->rx_ring_dma, GFP_KERNEL);
3439 	if (!q->rx_ring)
3440 		return -ENOMEM;
3441 
3442 	q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
3443 					    AT91ETHER_MAX_RX_DESCR *
3444 					    AT91ETHER_MAX_RBUFF_SZ,
3445 					    &q->rx_buffers_dma, GFP_KERNEL);
3446 	if (!q->rx_buffers) {
3447 		dma_free_coherent(&lp->pdev->dev,
3448 				  AT91ETHER_MAX_RX_DESCR *
3449 				  macb_dma_desc_get_size(lp),
3450 				  q->rx_ring, q->rx_ring_dma);
3451 		q->rx_ring = NULL;
3452 		return -ENOMEM;
3453 	}
3454 
3455 	addr = q->rx_buffers_dma;
3456 	for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
3457 		desc = macb_rx_desc(q, i);
3458 		macb_set_addr(lp, desc, addr);
3459 		desc->ctrl = 0;
3460 		addr += AT91ETHER_MAX_RBUFF_SZ;
3461 	}
3462 
3463 	/* Set the Wrap bit on the last descriptor */
3464 	desc->addr |= MACB_BIT(RX_WRAP);
3465 
3466 	/* Reset buffer index */
3467 	q->rx_tail = 0;
3468 
3469 	/* Program address of descriptor list in Rx Buffer Queue register */
3470 	macb_writel(lp, RBQP, q->rx_ring_dma);
3471 
3472 	/* Enable Receive and Transmit */
3473 	ctl = macb_readl(lp, NCR);
3474 	macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
3475 
3476 	return 0;
3477 }
3478 
3479 /* Open the ethernet interface */
3480 static int at91ether_open(struct net_device *dev)
3481 {
3482 	struct macb *lp = netdev_priv(dev);
3483 	u32 ctl;
3484 	int ret;
3485 
3486 	/* Clear internal statistics */
3487 	ctl = macb_readl(lp, NCR);
3488 	macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
3489 
3490 	macb_set_hwaddr(lp);
3491 
3492 	ret = at91ether_start(dev);
3493 	if (ret)
3494 		return ret;
3495 
3496 	/* Enable MAC interrupts */
3497 	macb_writel(lp, IER, MACB_BIT(RCOMP)	|
3498 			     MACB_BIT(RXUBR)	|
3499 			     MACB_BIT(ISR_TUND)	|
3500 			     MACB_BIT(ISR_RLE)	|
3501 			     MACB_BIT(TCOMP)	|
3502 			     MACB_BIT(ISR_ROVR)	|
3503 			     MACB_BIT(HRESP));
3504 
3505 	/* schedule a link state check */
3506 	phy_start(dev->phydev);
3507 
3508 	netif_start_queue(dev);
3509 
3510 	return 0;
3511 }
3512 
3513 /* Close the interface */
3514 static int at91ether_close(struct net_device *dev)
3515 {
3516 	struct macb *lp = netdev_priv(dev);
3517 	struct macb_queue *q = &lp->queues[0];
3518 	u32 ctl;
3519 
3520 	/* Disable Receiver and Transmitter */
3521 	ctl = macb_readl(lp, NCR);
3522 	macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
3523 
3524 	/* Disable MAC interrupts */
3525 	macb_writel(lp, IDR, MACB_BIT(RCOMP)	|
3526 			     MACB_BIT(RXUBR)	|
3527 			     MACB_BIT(ISR_TUND)	|
3528 			     MACB_BIT(ISR_RLE)	|
3529 			     MACB_BIT(TCOMP)	|
3530 			     MACB_BIT(ISR_ROVR) |
3531 			     MACB_BIT(HRESP));
3532 
3533 	netif_stop_queue(dev);
3534 
3535 	dma_free_coherent(&lp->pdev->dev,
3536 			  AT91ETHER_MAX_RX_DESCR *
3537 			  macb_dma_desc_get_size(lp),
3538 			  q->rx_ring, q->rx_ring_dma);
3539 	q->rx_ring = NULL;
3540 
3541 	dma_free_coherent(&lp->pdev->dev,
3542 			  AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
3543 			  q->rx_buffers, q->rx_buffers_dma);
3544 	q->rx_buffers = NULL;
3545 
3546 	return 0;
3547 }
3548 
3549 /* Transmit packet */
3550 static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
3551 {
3552 	struct macb *lp = netdev_priv(dev);
3553 
3554 	if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
3555 		netif_stop_queue(dev);
3556 
3557 		/* Store packet information (to free when Tx completed) */
3558 		lp->skb = skb;
3559 		lp->skb_length = skb->len;
3560 		lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
3561 							DMA_TO_DEVICE);
3562 		if (dma_mapping_error(NULL, lp->skb_physaddr)) {
3563 			dev_kfree_skb_any(skb);
3564 			dev->stats.tx_dropped++;
3565 			netdev_err(dev, "%s: DMA mapping error\n", __func__);
3566 			return NETDEV_TX_OK;
3567 		}
3568 
3569 		/* Set address of the data in the Transmit Address register */
3570 		macb_writel(lp, TAR, lp->skb_physaddr);
3571 		/* Set length of the packet in the Transmit Control register */
3572 		macb_writel(lp, TCR, skb->len);
3573 
3574 	} else {
3575 		netdev_err(dev, "%s called, but device is busy!\n", __func__);
3576 		return NETDEV_TX_BUSY;
3577 	}
3578 
3579 	return NETDEV_TX_OK;
3580 }
3581 
3582 /* Extract received frame from buffer descriptors and sent to upper layers.
3583  * (Called from interrupt context)
3584  */
3585 static void at91ether_rx(struct net_device *dev)
3586 {
3587 	struct macb *lp = netdev_priv(dev);
3588 	struct macb_queue *q = &lp->queues[0];
3589 	struct macb_dma_desc *desc;
3590 	unsigned char *p_recv;
3591 	struct sk_buff *skb;
3592 	unsigned int pktlen;
3593 
3594 	desc = macb_rx_desc(q, q->rx_tail);
3595 	while (desc->addr & MACB_BIT(RX_USED)) {
3596 		p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
3597 		pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
3598 		skb = netdev_alloc_skb(dev, pktlen + 2);
3599 		if (skb) {
3600 			skb_reserve(skb, 2);
3601 			skb_put_data(skb, p_recv, pktlen);
3602 
3603 			skb->protocol = eth_type_trans(skb, dev);
3604 			dev->stats.rx_packets++;
3605 			dev->stats.rx_bytes += pktlen;
3606 			netif_rx(skb);
3607 		} else {
3608 			dev->stats.rx_dropped++;
3609 		}
3610 
3611 		if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
3612 			dev->stats.multicast++;
3613 
3614 		/* reset ownership bit */
3615 		desc->addr &= ~MACB_BIT(RX_USED);
3616 
3617 		/* wrap after last buffer */
3618 		if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
3619 			q->rx_tail = 0;
3620 		else
3621 			q->rx_tail++;
3622 
3623 		desc = macb_rx_desc(q, q->rx_tail);
3624 	}
3625 }
3626 
3627 /* MAC interrupt handler */
3628 static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
3629 {
3630 	struct net_device *dev = dev_id;
3631 	struct macb *lp = netdev_priv(dev);
3632 	u32 intstatus, ctl;
3633 
3634 	/* MAC Interrupt Status register indicates what interrupts are pending.
3635 	 * It is automatically cleared once read.
3636 	 */
3637 	intstatus = macb_readl(lp, ISR);
3638 
3639 	/* Receive complete */
3640 	if (intstatus & MACB_BIT(RCOMP))
3641 		at91ether_rx(dev);
3642 
3643 	/* Transmit complete */
3644 	if (intstatus & MACB_BIT(TCOMP)) {
3645 		/* The TCOM bit is set even if the transmission failed */
3646 		if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
3647 			dev->stats.tx_errors++;
3648 
3649 		if (lp->skb) {
3650 			dev_kfree_skb_irq(lp->skb);
3651 			lp->skb = NULL;
3652 			dma_unmap_single(NULL, lp->skb_physaddr,
3653 					 lp->skb_length, DMA_TO_DEVICE);
3654 			dev->stats.tx_packets++;
3655 			dev->stats.tx_bytes += lp->skb_length;
3656 		}
3657 		netif_wake_queue(dev);
3658 	}
3659 
3660 	/* Work-around for EMAC Errata section 41.3.1 */
3661 	if (intstatus & MACB_BIT(RXUBR)) {
3662 		ctl = macb_readl(lp, NCR);
3663 		macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
3664 		wmb();
3665 		macb_writel(lp, NCR, ctl | MACB_BIT(RE));
3666 	}
3667 
3668 	if (intstatus & MACB_BIT(ISR_ROVR))
3669 		netdev_err(dev, "ROVR error\n");
3670 
3671 	return IRQ_HANDLED;
3672 }
3673 
3674 #ifdef CONFIG_NET_POLL_CONTROLLER
3675 static void at91ether_poll_controller(struct net_device *dev)
3676 {
3677 	unsigned long flags;
3678 
3679 	local_irq_save(flags);
3680 	at91ether_interrupt(dev->irq, dev);
3681 	local_irq_restore(flags);
3682 }
3683 #endif
3684 
3685 static const struct net_device_ops at91ether_netdev_ops = {
3686 	.ndo_open		= at91ether_open,
3687 	.ndo_stop		= at91ether_close,
3688 	.ndo_start_xmit		= at91ether_start_xmit,
3689 	.ndo_get_stats		= macb_get_stats,
3690 	.ndo_set_rx_mode	= macb_set_rx_mode,
3691 	.ndo_set_mac_address	= eth_mac_addr,
3692 	.ndo_do_ioctl		= macb_ioctl,
3693 	.ndo_validate_addr	= eth_validate_addr,
3694 #ifdef CONFIG_NET_POLL_CONTROLLER
3695 	.ndo_poll_controller	= at91ether_poll_controller,
3696 #endif
3697 };
3698 
3699 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
3700 			      struct clk **hclk, struct clk **tx_clk,
3701 			      struct clk **rx_clk)
3702 {
3703 	int err;
3704 
3705 	*hclk = NULL;
3706 	*tx_clk = NULL;
3707 	*rx_clk = NULL;
3708 
3709 	*pclk = devm_clk_get(&pdev->dev, "ether_clk");
3710 	if (IS_ERR(*pclk))
3711 		return PTR_ERR(*pclk);
3712 
3713 	err = clk_prepare_enable(*pclk);
3714 	if (err) {
3715 		dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
3716 		return err;
3717 	}
3718 
3719 	return 0;
3720 }
3721 
3722 static int at91ether_init(struct platform_device *pdev)
3723 {
3724 	struct net_device *dev = platform_get_drvdata(pdev);
3725 	struct macb *bp = netdev_priv(dev);
3726 	int err;
3727 	u32 reg;
3728 
3729 	dev->netdev_ops = &at91ether_netdev_ops;
3730 	dev->ethtool_ops = &macb_ethtool_ops;
3731 
3732 	err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
3733 			       0, dev->name, dev);
3734 	if (err)
3735 		return err;
3736 
3737 	macb_writel(bp, NCR, 0);
3738 
3739 	reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
3740 	if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
3741 		reg |= MACB_BIT(RM9200_RMII);
3742 
3743 	macb_writel(bp, NCFGR, reg);
3744 
3745 	return 0;
3746 }
3747 
3748 static const struct macb_config at91sam9260_config = {
3749 	.caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3750 	.clk_init = macb_clk_init,
3751 	.init = macb_init,
3752 };
3753 
3754 static const struct macb_config pc302gem_config = {
3755 	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
3756 	.dma_burst_length = 16,
3757 	.clk_init = macb_clk_init,
3758 	.init = macb_init,
3759 };
3760 
3761 static const struct macb_config sama5d2_config = {
3762 	.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3763 	.dma_burst_length = 16,
3764 	.clk_init = macb_clk_init,
3765 	.init = macb_init,
3766 };
3767 
3768 static const struct macb_config sama5d3_config = {
3769 	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
3770 	      | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
3771 	.dma_burst_length = 16,
3772 	.clk_init = macb_clk_init,
3773 	.init = macb_init,
3774 	.jumbo_max_len = 10240,
3775 };
3776 
3777 static const struct macb_config sama5d4_config = {
3778 	.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3779 	.dma_burst_length = 4,
3780 	.clk_init = macb_clk_init,
3781 	.init = macb_init,
3782 };
3783 
3784 static const struct macb_config emac_config = {
3785 	.clk_init = at91ether_clk_init,
3786 	.init = at91ether_init,
3787 };
3788 
3789 static const struct macb_config np4_config = {
3790 	.caps = MACB_CAPS_USRIO_DISABLED,
3791 	.clk_init = macb_clk_init,
3792 	.init = macb_init,
3793 };
3794 
3795 static const struct macb_config zynqmp_config = {
3796 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
3797 			MACB_CAPS_JUMBO |
3798 			MACB_CAPS_GEM_HAS_PTP,
3799 	.dma_burst_length = 16,
3800 	.clk_init = macb_clk_init,
3801 	.init = macb_init,
3802 	.jumbo_max_len = 10240,
3803 };
3804 
3805 static const struct macb_config zynq_config = {
3806 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
3807 	.dma_burst_length = 16,
3808 	.clk_init = macb_clk_init,
3809 	.init = macb_init,
3810 };
3811 
3812 static const struct of_device_id macb_dt_ids[] = {
3813 	{ .compatible = "cdns,at32ap7000-macb" },
3814 	{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
3815 	{ .compatible = "cdns,macb" },
3816 	{ .compatible = "cdns,np4-macb", .data = &np4_config },
3817 	{ .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
3818 	{ .compatible = "cdns,gem", .data = &pc302gem_config },
3819 	{ .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
3820 	{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
3821 	{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
3822 	{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
3823 	{ .compatible = "cdns,emac", .data = &emac_config },
3824 	{ .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
3825 	{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
3826 	{ /* sentinel */ }
3827 };
3828 MODULE_DEVICE_TABLE(of, macb_dt_ids);
3829 #endif /* CONFIG_OF */
3830 
3831 static const struct macb_config default_gem_config = {
3832 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
3833 			MACB_CAPS_JUMBO |
3834 			MACB_CAPS_GEM_HAS_PTP,
3835 	.dma_burst_length = 16,
3836 	.clk_init = macb_clk_init,
3837 	.init = macb_init,
3838 	.jumbo_max_len = 10240,
3839 };
3840 
3841 static int macb_probe(struct platform_device *pdev)
3842 {
3843 	const struct macb_config *macb_config = &default_gem_config;
3844 	int (*clk_init)(struct platform_device *, struct clk **,
3845 			struct clk **, struct clk **,  struct clk **)
3846 					      = macb_config->clk_init;
3847 	int (*init)(struct platform_device *) = macb_config->init;
3848 	struct device_node *np = pdev->dev.of_node;
3849 	struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
3850 	unsigned int queue_mask, num_queues;
3851 	struct macb_platform_data *pdata;
3852 	bool native_io;
3853 	struct phy_device *phydev;
3854 	struct net_device *dev;
3855 	struct resource *regs;
3856 	void __iomem *mem;
3857 	const char *mac;
3858 	struct macb *bp;
3859 	int err;
3860 
3861 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3862 	mem = devm_ioremap_resource(&pdev->dev, regs);
3863 	if (IS_ERR(mem))
3864 		return PTR_ERR(mem);
3865 
3866 	if (np) {
3867 		const struct of_device_id *match;
3868 
3869 		match = of_match_node(macb_dt_ids, np);
3870 		if (match && match->data) {
3871 			macb_config = match->data;
3872 			clk_init = macb_config->clk_init;
3873 			init = macb_config->init;
3874 		}
3875 	}
3876 
3877 	err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk);
3878 	if (err)
3879 		return err;
3880 
3881 	native_io = hw_is_native_io(mem);
3882 
3883 	macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
3884 	dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
3885 	if (!dev) {
3886 		err = -ENOMEM;
3887 		goto err_disable_clocks;
3888 	}
3889 
3890 	dev->base_addr = regs->start;
3891 
3892 	SET_NETDEV_DEV(dev, &pdev->dev);
3893 
3894 	bp = netdev_priv(dev);
3895 	bp->pdev = pdev;
3896 	bp->dev = dev;
3897 	bp->regs = mem;
3898 	bp->native_io = native_io;
3899 	if (native_io) {
3900 		bp->macb_reg_readl = hw_readl_native;
3901 		bp->macb_reg_writel = hw_writel_native;
3902 	} else {
3903 		bp->macb_reg_readl = hw_readl;
3904 		bp->macb_reg_writel = hw_writel;
3905 	}
3906 	bp->num_queues = num_queues;
3907 	bp->queue_mask = queue_mask;
3908 	if (macb_config)
3909 		bp->dma_burst_length = macb_config->dma_burst_length;
3910 	bp->pclk = pclk;
3911 	bp->hclk = hclk;
3912 	bp->tx_clk = tx_clk;
3913 	bp->rx_clk = rx_clk;
3914 	if (macb_config)
3915 		bp->jumbo_max_len = macb_config->jumbo_max_len;
3916 
3917 	bp->wol = 0;
3918 	if (of_get_property(np, "magic-packet", NULL))
3919 		bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
3920 	device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
3921 
3922 	spin_lock_init(&bp->lock);
3923 
3924 	/* setup capabilities */
3925 	macb_configure_caps(bp, macb_config);
3926 
3927 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3928 	if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
3929 		dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
3930 		bp->hw_dma_cap |= HW_DMA_CAP_64B;
3931 	}
3932 #endif
3933 	platform_set_drvdata(pdev, dev);
3934 
3935 	dev->irq = platform_get_irq(pdev, 0);
3936 	if (dev->irq < 0) {
3937 		err = dev->irq;
3938 		goto err_out_free_netdev;
3939 	}
3940 
3941 	/* MTU range: 68 - 1500 or 10240 */
3942 	dev->min_mtu = GEM_MTU_MIN_SIZE;
3943 	if (bp->caps & MACB_CAPS_JUMBO)
3944 		dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
3945 	else
3946 		dev->max_mtu = ETH_DATA_LEN;
3947 
3948 	mac = of_get_mac_address(np);
3949 	if (mac) {
3950 		ether_addr_copy(bp->dev->dev_addr, mac);
3951 	} else {
3952 		err = of_get_nvmem_mac_address(np, bp->dev->dev_addr);
3953 		if (err) {
3954 			if (err == -EPROBE_DEFER)
3955 				goto err_out_free_netdev;
3956 			macb_get_hwaddr(bp);
3957 		}
3958 	}
3959 
3960 	err = of_get_phy_mode(np);
3961 	if (err < 0) {
3962 		pdata = dev_get_platdata(&pdev->dev);
3963 		if (pdata && pdata->is_rmii)
3964 			bp->phy_interface = PHY_INTERFACE_MODE_RMII;
3965 		else
3966 			bp->phy_interface = PHY_INTERFACE_MODE_MII;
3967 	} else {
3968 		bp->phy_interface = err;
3969 	}
3970 
3971 	/* IP specific init */
3972 	err = init(pdev);
3973 	if (err)
3974 		goto err_out_free_netdev;
3975 
3976 	err = macb_mii_init(bp);
3977 	if (err)
3978 		goto err_out_free_netdev;
3979 
3980 	phydev = dev->phydev;
3981 
3982 	netif_carrier_off(dev);
3983 
3984 	err = register_netdev(dev);
3985 	if (err) {
3986 		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
3987 		goto err_out_unregister_mdio;
3988 	}
3989 
3990 	tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
3991 		     (unsigned long)bp);
3992 
3993 	phy_attached_info(phydev);
3994 
3995 	netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
3996 		    macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
3997 		    dev->base_addr, dev->irq, dev->dev_addr);
3998 
3999 	return 0;
4000 
4001 err_out_unregister_mdio:
4002 	phy_disconnect(dev->phydev);
4003 	mdiobus_unregister(bp->mii_bus);
4004 	of_node_put(bp->phy_node);
4005 	if (np && of_phy_is_fixed_link(np))
4006 		of_phy_deregister_fixed_link(np);
4007 	mdiobus_free(bp->mii_bus);
4008 
4009 err_out_free_netdev:
4010 	free_netdev(dev);
4011 
4012 err_disable_clocks:
4013 	clk_disable_unprepare(tx_clk);
4014 	clk_disable_unprepare(hclk);
4015 	clk_disable_unprepare(pclk);
4016 	clk_disable_unprepare(rx_clk);
4017 
4018 	return err;
4019 }
4020 
4021 static int macb_remove(struct platform_device *pdev)
4022 {
4023 	struct net_device *dev;
4024 	struct macb *bp;
4025 	struct device_node *np = pdev->dev.of_node;
4026 
4027 	dev = platform_get_drvdata(pdev);
4028 
4029 	if (dev) {
4030 		bp = netdev_priv(dev);
4031 		if (dev->phydev)
4032 			phy_disconnect(dev->phydev);
4033 		mdiobus_unregister(bp->mii_bus);
4034 		if (np && of_phy_is_fixed_link(np))
4035 			of_phy_deregister_fixed_link(np);
4036 		dev->phydev = NULL;
4037 		mdiobus_free(bp->mii_bus);
4038 
4039 		unregister_netdev(dev);
4040 		clk_disable_unprepare(bp->tx_clk);
4041 		clk_disable_unprepare(bp->hclk);
4042 		clk_disable_unprepare(bp->pclk);
4043 		clk_disable_unprepare(bp->rx_clk);
4044 		of_node_put(bp->phy_node);
4045 		free_netdev(dev);
4046 	}
4047 
4048 	return 0;
4049 }
4050 
4051 static int __maybe_unused macb_suspend(struct device *dev)
4052 {
4053 	struct platform_device *pdev = to_platform_device(dev);
4054 	struct net_device *netdev = platform_get_drvdata(pdev);
4055 	struct macb *bp = netdev_priv(netdev);
4056 
4057 	netif_carrier_off(netdev);
4058 	netif_device_detach(netdev);
4059 
4060 	if (bp->wol & MACB_WOL_ENABLED) {
4061 		macb_writel(bp, IER, MACB_BIT(WOL));
4062 		macb_writel(bp, WOL, MACB_BIT(MAG));
4063 		enable_irq_wake(bp->queues[0].irq);
4064 	} else {
4065 		clk_disable_unprepare(bp->tx_clk);
4066 		clk_disable_unprepare(bp->hclk);
4067 		clk_disable_unprepare(bp->pclk);
4068 		clk_disable_unprepare(bp->rx_clk);
4069 	}
4070 
4071 	return 0;
4072 }
4073 
4074 static int __maybe_unused macb_resume(struct device *dev)
4075 {
4076 	struct platform_device *pdev = to_platform_device(dev);
4077 	struct net_device *netdev = platform_get_drvdata(pdev);
4078 	struct macb *bp = netdev_priv(netdev);
4079 
4080 	if (bp->wol & MACB_WOL_ENABLED) {
4081 		macb_writel(bp, IDR, MACB_BIT(WOL));
4082 		macb_writel(bp, WOL, 0);
4083 		disable_irq_wake(bp->queues[0].irq);
4084 	} else {
4085 		clk_prepare_enable(bp->pclk);
4086 		clk_prepare_enable(bp->hclk);
4087 		clk_prepare_enable(bp->tx_clk);
4088 		clk_prepare_enable(bp->rx_clk);
4089 	}
4090 
4091 	netif_device_attach(netdev);
4092 
4093 	return 0;
4094 }
4095 
4096 static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
4097 
4098 static struct platform_driver macb_driver = {
4099 	.probe		= macb_probe,
4100 	.remove		= macb_remove,
4101 	.driver		= {
4102 		.name		= "macb",
4103 		.of_match_table	= of_match_ptr(macb_dt_ids),
4104 		.pm	= &macb_pm_ops,
4105 	},
4106 };
4107 
4108 module_platform_driver(macb_driver);
4109 
4110 MODULE_LICENSE("GPL");
4111 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
4112 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
4113 MODULE_ALIAS("platform:macb");
4114