1 /*
2  * Driver for BCM963xx builtin Ethernet mac
3  *
4  * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/clk.h>
24 #include <linux/etherdevice.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/ethtool.h>
28 #include <linux/crc32.h>
29 #include <linux/err.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/platform_device.h>
32 #include <linux/if_vlan.h>
33 
34 #include <bcm63xx_dev_enet.h>
35 #include "bcm63xx_enet.h"
36 
37 static char bcm_enet_driver_name[] = "bcm63xx_enet";
38 static char bcm_enet_driver_version[] = "1.0";
39 
40 static int copybreak __read_mostly = 128;
41 module_param(copybreak, int, 0);
42 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
43 
44 /* io registers memory shared between all devices */
45 static void __iomem *bcm_enet_shared_base[3];
46 
47 /*
48  * io helpers to access mac registers
49  */
50 static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
51 {
52 	return bcm_readl(priv->base + off);
53 }
54 
55 static inline void enet_writel(struct bcm_enet_priv *priv,
56 			       u32 val, u32 off)
57 {
58 	bcm_writel(val, priv->base + off);
59 }
60 
61 /*
62  * io helpers to access switch registers
63  */
64 static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
65 {
66 	return bcm_readl(priv->base + off);
67 }
68 
69 static inline void enetsw_writel(struct bcm_enet_priv *priv,
70 				 u32 val, u32 off)
71 {
72 	bcm_writel(val, priv->base + off);
73 }
74 
75 static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
76 {
77 	return bcm_readw(priv->base + off);
78 }
79 
80 static inline void enetsw_writew(struct bcm_enet_priv *priv,
81 				 u16 val, u32 off)
82 {
83 	bcm_writew(val, priv->base + off);
84 }
85 
86 static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
87 {
88 	return bcm_readb(priv->base + off);
89 }
90 
91 static inline void enetsw_writeb(struct bcm_enet_priv *priv,
92 				 u8 val, u32 off)
93 {
94 	bcm_writeb(val, priv->base + off);
95 }
96 
97 
98 /* io helpers to access shared registers */
99 static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
100 {
101 	return bcm_readl(bcm_enet_shared_base[0] + off);
102 }
103 
104 static inline void enet_dma_writel(struct bcm_enet_priv *priv,
105 				       u32 val, u32 off)
106 {
107 	bcm_writel(val, bcm_enet_shared_base[0] + off);
108 }
109 
110 static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
111 {
112 	return bcm_readl(bcm_enet_shared_base[1] +
113 		bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
114 }
115 
116 static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
117 				       u32 val, u32 off, int chan)
118 {
119 	bcm_writel(val, bcm_enet_shared_base[1] +
120 		bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
121 }
122 
123 static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
124 {
125 	return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
126 }
127 
128 static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
129 				       u32 val, u32 off, int chan)
130 {
131 	bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
132 }
133 
134 /*
135  * write given data into mii register and wait for transfer to end
136  * with timeout (average measured transfer time is 25us)
137  */
138 static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
139 {
140 	int limit;
141 
142 	/* make sure mii interrupt status is cleared */
143 	enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
144 
145 	enet_writel(priv, data, ENET_MIIDATA_REG);
146 	wmb();
147 
148 	/* busy wait on mii interrupt bit, with timeout */
149 	limit = 1000;
150 	do {
151 		if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
152 			break;
153 		udelay(1);
154 	} while (limit-- > 0);
155 
156 	return (limit < 0) ? 1 : 0;
157 }
158 
159 /*
160  * MII internal read callback
161  */
162 static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
163 			      int regnum)
164 {
165 	u32 tmp, val;
166 
167 	tmp = regnum << ENET_MIIDATA_REG_SHIFT;
168 	tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
169 	tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
170 	tmp |= ENET_MIIDATA_OP_READ_MASK;
171 
172 	if (do_mdio_op(priv, tmp))
173 		return -1;
174 
175 	val = enet_readl(priv, ENET_MIIDATA_REG);
176 	val &= 0xffff;
177 	return val;
178 }
179 
180 /*
181  * MII internal write callback
182  */
183 static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
184 			       int regnum, u16 value)
185 {
186 	u32 tmp;
187 
188 	tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
189 	tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
190 	tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
191 	tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
192 	tmp |= ENET_MIIDATA_OP_WRITE_MASK;
193 
194 	(void)do_mdio_op(priv, tmp);
195 	return 0;
196 }
197 
198 /*
199  * MII read callback from phylib
200  */
201 static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
202 				     int regnum)
203 {
204 	return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
205 }
206 
207 /*
208  * MII write callback from phylib
209  */
210 static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
211 				      int regnum, u16 value)
212 {
213 	return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
214 }
215 
216 /*
217  * MII read callback from mii core
218  */
219 static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
220 				  int regnum)
221 {
222 	return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
223 }
224 
225 /*
226  * MII write callback from mii core
227  */
228 static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
229 				    int regnum, int value)
230 {
231 	bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
232 }
233 
234 /*
235  * refill rx queue
236  */
237 static int bcm_enet_refill_rx(struct net_device *dev)
238 {
239 	struct bcm_enet_priv *priv;
240 
241 	priv = netdev_priv(dev);
242 
243 	while (priv->rx_desc_count < priv->rx_ring_size) {
244 		struct bcm_enet_desc *desc;
245 		struct sk_buff *skb;
246 		dma_addr_t p;
247 		int desc_idx;
248 		u32 len_stat;
249 
250 		desc_idx = priv->rx_dirty_desc;
251 		desc = &priv->rx_desc_cpu[desc_idx];
252 
253 		if (!priv->rx_skb[desc_idx]) {
254 			skb = netdev_alloc_skb(dev, priv->rx_skb_size);
255 			if (!skb)
256 				break;
257 			priv->rx_skb[desc_idx] = skb;
258 			p = dma_map_single(&priv->pdev->dev, skb->data,
259 					   priv->rx_skb_size,
260 					   DMA_FROM_DEVICE);
261 			desc->address = p;
262 		}
263 
264 		len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
265 		len_stat |= DMADESC_OWNER_MASK;
266 		if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
267 			len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
268 			priv->rx_dirty_desc = 0;
269 		} else {
270 			priv->rx_dirty_desc++;
271 		}
272 		wmb();
273 		desc->len_stat = len_stat;
274 
275 		priv->rx_desc_count++;
276 
277 		/* tell dma engine we allocated one buffer */
278 		if (priv->dma_has_sram)
279 			enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
280 		else
281 			enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
282 	}
283 
284 	/* If rx ring is still empty, set a timer to try allocating
285 	 * again at a later time. */
286 	if (priv->rx_desc_count == 0 && netif_running(dev)) {
287 		dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
288 		priv->rx_timeout.expires = jiffies + HZ;
289 		add_timer(&priv->rx_timeout);
290 	}
291 
292 	return 0;
293 }
294 
295 /*
296  * timer callback to defer refill rx queue in case we're OOM
297  */
298 static void bcm_enet_refill_rx_timer(unsigned long data)
299 {
300 	struct net_device *dev;
301 	struct bcm_enet_priv *priv;
302 
303 	dev = (struct net_device *)data;
304 	priv = netdev_priv(dev);
305 
306 	spin_lock(&priv->rx_lock);
307 	bcm_enet_refill_rx((struct net_device *)data);
308 	spin_unlock(&priv->rx_lock);
309 }
310 
311 /*
312  * extract packet from rx queue
313  */
314 static int bcm_enet_receive_queue(struct net_device *dev, int budget)
315 {
316 	struct bcm_enet_priv *priv;
317 	struct device *kdev;
318 	int processed;
319 
320 	priv = netdev_priv(dev);
321 	kdev = &priv->pdev->dev;
322 	processed = 0;
323 
324 	/* don't scan ring further than number of refilled
325 	 * descriptor */
326 	if (budget > priv->rx_desc_count)
327 		budget = priv->rx_desc_count;
328 
329 	do {
330 		struct bcm_enet_desc *desc;
331 		struct sk_buff *skb;
332 		int desc_idx;
333 		u32 len_stat;
334 		unsigned int len;
335 
336 		desc_idx = priv->rx_curr_desc;
337 		desc = &priv->rx_desc_cpu[desc_idx];
338 
339 		/* make sure we actually read the descriptor status at
340 		 * each loop */
341 		rmb();
342 
343 		len_stat = desc->len_stat;
344 
345 		/* break if dma ownership belongs to hw */
346 		if (len_stat & DMADESC_OWNER_MASK)
347 			break;
348 
349 		processed++;
350 		priv->rx_curr_desc++;
351 		if (priv->rx_curr_desc == priv->rx_ring_size)
352 			priv->rx_curr_desc = 0;
353 		priv->rx_desc_count--;
354 
355 		/* if the packet does not have start of packet _and_
356 		 * end of packet flag set, then just recycle it */
357 		if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
358 			(DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
359 			dev->stats.rx_dropped++;
360 			continue;
361 		}
362 
363 		/* recycle packet if it's marked as bad */
364 		if (!priv->enet_is_sw &&
365 		    unlikely(len_stat & DMADESC_ERR_MASK)) {
366 			dev->stats.rx_errors++;
367 
368 			if (len_stat & DMADESC_OVSIZE_MASK)
369 				dev->stats.rx_length_errors++;
370 			if (len_stat & DMADESC_CRC_MASK)
371 				dev->stats.rx_crc_errors++;
372 			if (len_stat & DMADESC_UNDER_MASK)
373 				dev->stats.rx_frame_errors++;
374 			if (len_stat & DMADESC_OV_MASK)
375 				dev->stats.rx_fifo_errors++;
376 			continue;
377 		}
378 
379 		/* valid packet */
380 		skb = priv->rx_skb[desc_idx];
381 		len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
382 		/* don't include FCS */
383 		len -= 4;
384 
385 		if (len < copybreak) {
386 			struct sk_buff *nskb;
387 
388 			nskb = netdev_alloc_skb_ip_align(dev, len);
389 			if (!nskb) {
390 				/* forget packet, just rearm desc */
391 				dev->stats.rx_dropped++;
392 				continue;
393 			}
394 
395 			dma_sync_single_for_cpu(kdev, desc->address,
396 						len, DMA_FROM_DEVICE);
397 			memcpy(nskb->data, skb->data, len);
398 			dma_sync_single_for_device(kdev, desc->address,
399 						   len, DMA_FROM_DEVICE);
400 			skb = nskb;
401 		} else {
402 			dma_unmap_single(&priv->pdev->dev, desc->address,
403 					 priv->rx_skb_size, DMA_FROM_DEVICE);
404 			priv->rx_skb[desc_idx] = NULL;
405 		}
406 
407 		skb_put(skb, len);
408 		skb->protocol = eth_type_trans(skb, dev);
409 		dev->stats.rx_packets++;
410 		dev->stats.rx_bytes += len;
411 		netif_receive_skb(skb);
412 
413 	} while (--budget > 0);
414 
415 	if (processed || !priv->rx_desc_count) {
416 		bcm_enet_refill_rx(dev);
417 
418 		/* kick rx dma */
419 		enet_dmac_writel(priv, priv->dma_chan_en_mask,
420 					 ENETDMAC_CHANCFG, priv->rx_chan);
421 	}
422 
423 	return processed;
424 }
425 
426 
427 /*
428  * try to or force reclaim of transmitted buffers
429  */
430 static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
431 {
432 	struct bcm_enet_priv *priv;
433 	int released;
434 
435 	priv = netdev_priv(dev);
436 	released = 0;
437 
438 	while (priv->tx_desc_count < priv->tx_ring_size) {
439 		struct bcm_enet_desc *desc;
440 		struct sk_buff *skb;
441 
442 		/* We run in a bh and fight against start_xmit, which
443 		 * is called with bh disabled  */
444 		spin_lock(&priv->tx_lock);
445 
446 		desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
447 
448 		if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
449 			spin_unlock(&priv->tx_lock);
450 			break;
451 		}
452 
453 		/* ensure other field of the descriptor were not read
454 		 * before we checked ownership */
455 		rmb();
456 
457 		skb = priv->tx_skb[priv->tx_dirty_desc];
458 		priv->tx_skb[priv->tx_dirty_desc] = NULL;
459 		dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
460 				 DMA_TO_DEVICE);
461 
462 		priv->tx_dirty_desc++;
463 		if (priv->tx_dirty_desc == priv->tx_ring_size)
464 			priv->tx_dirty_desc = 0;
465 		priv->tx_desc_count++;
466 
467 		spin_unlock(&priv->tx_lock);
468 
469 		if (desc->len_stat & DMADESC_UNDER_MASK)
470 			dev->stats.tx_errors++;
471 
472 		dev_kfree_skb(skb);
473 		released++;
474 	}
475 
476 	if (netif_queue_stopped(dev) && released)
477 		netif_wake_queue(dev);
478 
479 	return released;
480 }
481 
482 /*
483  * poll func, called by network core
484  */
485 static int bcm_enet_poll(struct napi_struct *napi, int budget)
486 {
487 	struct bcm_enet_priv *priv;
488 	struct net_device *dev;
489 	int tx_work_done, rx_work_done;
490 
491 	priv = container_of(napi, struct bcm_enet_priv, napi);
492 	dev = priv->net_dev;
493 
494 	/* ack interrupts */
495 	enet_dmac_writel(priv, priv->dma_chan_int_mask,
496 			 ENETDMAC_IR, priv->rx_chan);
497 	enet_dmac_writel(priv, priv->dma_chan_int_mask,
498 			 ENETDMAC_IR, priv->tx_chan);
499 
500 	/* reclaim sent skb */
501 	tx_work_done = bcm_enet_tx_reclaim(dev, 0);
502 
503 	spin_lock(&priv->rx_lock);
504 	rx_work_done = bcm_enet_receive_queue(dev, budget);
505 	spin_unlock(&priv->rx_lock);
506 
507 	if (rx_work_done >= budget || tx_work_done > 0) {
508 		/* rx/tx queue is not yet empty/clean */
509 		return rx_work_done;
510 	}
511 
512 	/* no more packet in rx/tx queue, remove device from poll
513 	 * queue */
514 	napi_complete(napi);
515 
516 	/* restore rx/tx interrupt */
517 	enet_dmac_writel(priv, priv->dma_chan_int_mask,
518 			 ENETDMAC_IRMASK, priv->rx_chan);
519 	enet_dmac_writel(priv, priv->dma_chan_int_mask,
520 			 ENETDMAC_IRMASK, priv->tx_chan);
521 
522 	return rx_work_done;
523 }
524 
525 /*
526  * mac interrupt handler
527  */
528 static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
529 {
530 	struct net_device *dev;
531 	struct bcm_enet_priv *priv;
532 	u32 stat;
533 
534 	dev = dev_id;
535 	priv = netdev_priv(dev);
536 
537 	stat = enet_readl(priv, ENET_IR_REG);
538 	if (!(stat & ENET_IR_MIB))
539 		return IRQ_NONE;
540 
541 	/* clear & mask interrupt */
542 	enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
543 	enet_writel(priv, 0, ENET_IRMASK_REG);
544 
545 	/* read mib registers in workqueue */
546 	schedule_work(&priv->mib_update_task);
547 
548 	return IRQ_HANDLED;
549 }
550 
551 /*
552  * rx/tx dma interrupt handler
553  */
554 static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
555 {
556 	struct net_device *dev;
557 	struct bcm_enet_priv *priv;
558 
559 	dev = dev_id;
560 	priv = netdev_priv(dev);
561 
562 	/* mask rx/tx interrupts */
563 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
564 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
565 
566 	napi_schedule(&priv->napi);
567 
568 	return IRQ_HANDLED;
569 }
570 
571 /*
572  * tx request callback
573  */
574 static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
575 {
576 	struct bcm_enet_priv *priv;
577 	struct bcm_enet_desc *desc;
578 	u32 len_stat;
579 	int ret;
580 
581 	priv = netdev_priv(dev);
582 
583 	/* lock against tx reclaim */
584 	spin_lock(&priv->tx_lock);
585 
586 	/* make sure  the tx hw queue  is not full,  should not happen
587 	 * since we stop queue before it's the case */
588 	if (unlikely(!priv->tx_desc_count)) {
589 		netif_stop_queue(dev);
590 		dev_err(&priv->pdev->dev, "xmit called with no tx desc "
591 			"available?\n");
592 		ret = NETDEV_TX_BUSY;
593 		goto out_unlock;
594 	}
595 
596 	/* pad small packets sent on a switch device */
597 	if (priv->enet_is_sw && skb->len < 64) {
598 		int needed = 64 - skb->len;
599 		char *data;
600 
601 		if (unlikely(skb_tailroom(skb) < needed)) {
602 			struct sk_buff *nskb;
603 
604 			nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
605 			if (!nskb) {
606 				ret = NETDEV_TX_BUSY;
607 				goto out_unlock;
608 			}
609 			dev_kfree_skb(skb);
610 			skb = nskb;
611 		}
612 		data = skb_put(skb, needed);
613 		memset(data, 0, needed);
614 	}
615 
616 	/* point to the next available desc */
617 	desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
618 	priv->tx_skb[priv->tx_curr_desc] = skb;
619 
620 	/* fill descriptor */
621 	desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
622 				       DMA_TO_DEVICE);
623 
624 	len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
625 	len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
626 		DMADESC_APPEND_CRC |
627 		DMADESC_OWNER_MASK;
628 
629 	priv->tx_curr_desc++;
630 	if (priv->tx_curr_desc == priv->tx_ring_size) {
631 		priv->tx_curr_desc = 0;
632 		len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
633 	}
634 	priv->tx_desc_count--;
635 
636 	/* dma might be already polling, make sure we update desc
637 	 * fields in correct order */
638 	wmb();
639 	desc->len_stat = len_stat;
640 	wmb();
641 
642 	/* kick tx dma */
643 	enet_dmac_writel(priv, priv->dma_chan_en_mask,
644 				 ENETDMAC_CHANCFG, priv->tx_chan);
645 
646 	/* stop queue if no more desc available */
647 	if (!priv->tx_desc_count)
648 		netif_stop_queue(dev);
649 
650 	dev->stats.tx_bytes += skb->len;
651 	dev->stats.tx_packets++;
652 	ret = NETDEV_TX_OK;
653 
654 out_unlock:
655 	spin_unlock(&priv->tx_lock);
656 	return ret;
657 }
658 
659 /*
660  * Change the interface's mac address.
661  */
662 static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
663 {
664 	struct bcm_enet_priv *priv;
665 	struct sockaddr *addr = p;
666 	u32 val;
667 
668 	priv = netdev_priv(dev);
669 	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
670 
671 	/* use perfect match register 0 to store my mac address */
672 	val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
673 		(dev->dev_addr[4] << 8) | dev->dev_addr[5];
674 	enet_writel(priv, val, ENET_PML_REG(0));
675 
676 	val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
677 	val |= ENET_PMH_DATAVALID_MASK;
678 	enet_writel(priv, val, ENET_PMH_REG(0));
679 
680 	return 0;
681 }
682 
683 /*
684  * Change rx mode (promiscuous/allmulti) and update multicast list
685  */
686 static void bcm_enet_set_multicast_list(struct net_device *dev)
687 {
688 	struct bcm_enet_priv *priv;
689 	struct netdev_hw_addr *ha;
690 	u32 val;
691 	int i;
692 
693 	priv = netdev_priv(dev);
694 
695 	val = enet_readl(priv, ENET_RXCFG_REG);
696 
697 	if (dev->flags & IFF_PROMISC)
698 		val |= ENET_RXCFG_PROMISC_MASK;
699 	else
700 		val &= ~ENET_RXCFG_PROMISC_MASK;
701 
702 	/* only 3 perfect match registers left, first one is used for
703 	 * own mac address */
704 	if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
705 		val |= ENET_RXCFG_ALLMCAST_MASK;
706 	else
707 		val &= ~ENET_RXCFG_ALLMCAST_MASK;
708 
709 	/* no need to set perfect match registers if we catch all
710 	 * multicast */
711 	if (val & ENET_RXCFG_ALLMCAST_MASK) {
712 		enet_writel(priv, val, ENET_RXCFG_REG);
713 		return;
714 	}
715 
716 	i = 0;
717 	netdev_for_each_mc_addr(ha, dev) {
718 		u8 *dmi_addr;
719 		u32 tmp;
720 
721 		if (i == 3)
722 			break;
723 		/* update perfect match registers */
724 		dmi_addr = ha->addr;
725 		tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
726 			(dmi_addr[4] << 8) | dmi_addr[5];
727 		enet_writel(priv, tmp, ENET_PML_REG(i + 1));
728 
729 		tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
730 		tmp |= ENET_PMH_DATAVALID_MASK;
731 		enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
732 	}
733 
734 	for (; i < 3; i++) {
735 		enet_writel(priv, 0, ENET_PML_REG(i + 1));
736 		enet_writel(priv, 0, ENET_PMH_REG(i + 1));
737 	}
738 
739 	enet_writel(priv, val, ENET_RXCFG_REG);
740 }
741 
742 /*
743  * set mac duplex parameters
744  */
745 static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
746 {
747 	u32 val;
748 
749 	val = enet_readl(priv, ENET_TXCTL_REG);
750 	if (fullduplex)
751 		val |= ENET_TXCTL_FD_MASK;
752 	else
753 		val &= ~ENET_TXCTL_FD_MASK;
754 	enet_writel(priv, val, ENET_TXCTL_REG);
755 }
756 
757 /*
758  * set mac flow control parameters
759  */
760 static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
761 {
762 	u32 val;
763 
764 	/* rx flow control (pause frame handling) */
765 	val = enet_readl(priv, ENET_RXCFG_REG);
766 	if (rx_en)
767 		val |= ENET_RXCFG_ENFLOW_MASK;
768 	else
769 		val &= ~ENET_RXCFG_ENFLOW_MASK;
770 	enet_writel(priv, val, ENET_RXCFG_REG);
771 
772 	if (!priv->dma_has_sram)
773 		return;
774 
775 	/* tx flow control (pause frame generation) */
776 	val = enet_dma_readl(priv, ENETDMA_CFG_REG);
777 	if (tx_en)
778 		val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
779 	else
780 		val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
781 	enet_dma_writel(priv, val, ENETDMA_CFG_REG);
782 }
783 
784 /*
785  * link changed callback (from phylib)
786  */
787 static void bcm_enet_adjust_phy_link(struct net_device *dev)
788 {
789 	struct bcm_enet_priv *priv;
790 	struct phy_device *phydev;
791 	int status_changed;
792 
793 	priv = netdev_priv(dev);
794 	phydev = priv->phydev;
795 	status_changed = 0;
796 
797 	if (priv->old_link != phydev->link) {
798 		status_changed = 1;
799 		priv->old_link = phydev->link;
800 	}
801 
802 	/* reflect duplex change in mac configuration */
803 	if (phydev->link && phydev->duplex != priv->old_duplex) {
804 		bcm_enet_set_duplex(priv,
805 				    (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
806 		status_changed = 1;
807 		priv->old_duplex = phydev->duplex;
808 	}
809 
810 	/* enable flow control if remote advertise it (trust phylib to
811 	 * check that duplex is full */
812 	if (phydev->link && phydev->pause != priv->old_pause) {
813 		int rx_pause_en, tx_pause_en;
814 
815 		if (phydev->pause) {
816 			/* pause was advertised by lpa and us */
817 			rx_pause_en = 1;
818 			tx_pause_en = 1;
819 		} else if (!priv->pause_auto) {
820 			/* pause setting overrided by user */
821 			rx_pause_en = priv->pause_rx;
822 			tx_pause_en = priv->pause_tx;
823 		} else {
824 			rx_pause_en = 0;
825 			tx_pause_en = 0;
826 		}
827 
828 		bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
829 		status_changed = 1;
830 		priv->old_pause = phydev->pause;
831 	}
832 
833 	if (status_changed) {
834 		pr_info("%s: link %s", dev->name, phydev->link ?
835 			"UP" : "DOWN");
836 		if (phydev->link)
837 			pr_cont(" - %d/%s - flow control %s", phydev->speed,
838 			       DUPLEX_FULL == phydev->duplex ? "full" : "half",
839 			       phydev->pause == 1 ? "rx&tx" : "off");
840 
841 		pr_cont("\n");
842 	}
843 }
844 
845 /*
846  * link changed callback (if phylib is not used)
847  */
848 static void bcm_enet_adjust_link(struct net_device *dev)
849 {
850 	struct bcm_enet_priv *priv;
851 
852 	priv = netdev_priv(dev);
853 	bcm_enet_set_duplex(priv, priv->force_duplex_full);
854 	bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
855 	netif_carrier_on(dev);
856 
857 	pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
858 		dev->name,
859 		priv->force_speed_100 ? 100 : 10,
860 		priv->force_duplex_full ? "full" : "half",
861 		priv->pause_rx ? "rx" : "off",
862 		priv->pause_tx ? "tx" : "off");
863 }
864 
865 /*
866  * open callback, allocate dma rings & buffers and start rx operation
867  */
868 static int bcm_enet_open(struct net_device *dev)
869 {
870 	struct bcm_enet_priv *priv;
871 	struct sockaddr addr;
872 	struct device *kdev;
873 	struct phy_device *phydev;
874 	int i, ret;
875 	unsigned int size;
876 	char phy_id[MII_BUS_ID_SIZE + 3];
877 	void *p;
878 	u32 val;
879 
880 	priv = netdev_priv(dev);
881 	kdev = &priv->pdev->dev;
882 
883 	if (priv->has_phy) {
884 		/* connect to PHY */
885 		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
886 			 priv->mii_bus->id, priv->phy_id);
887 
888 		phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link,
889 				     PHY_INTERFACE_MODE_MII);
890 
891 		if (IS_ERR(phydev)) {
892 			dev_err(kdev, "could not attach to PHY\n");
893 			return PTR_ERR(phydev);
894 		}
895 
896 		/* mask with MAC supported features */
897 		phydev->supported &= (SUPPORTED_10baseT_Half |
898 				      SUPPORTED_10baseT_Full |
899 				      SUPPORTED_100baseT_Half |
900 				      SUPPORTED_100baseT_Full |
901 				      SUPPORTED_Autoneg |
902 				      SUPPORTED_Pause |
903 				      SUPPORTED_MII);
904 		phydev->advertising = phydev->supported;
905 
906 		if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
907 			phydev->advertising |= SUPPORTED_Pause;
908 		else
909 			phydev->advertising &= ~SUPPORTED_Pause;
910 
911 		dev_info(kdev, "attached PHY at address %d [%s]\n",
912 			 phydev->addr, phydev->drv->name);
913 
914 		priv->old_link = 0;
915 		priv->old_duplex = -1;
916 		priv->old_pause = -1;
917 		priv->phydev = phydev;
918 	}
919 
920 	/* mask all interrupts and request them */
921 	enet_writel(priv, 0, ENET_IRMASK_REG);
922 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
923 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
924 
925 	ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
926 	if (ret)
927 		goto out_phy_disconnect;
928 
929 	ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
930 			  dev->name, dev);
931 	if (ret)
932 		goto out_freeirq;
933 
934 	ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
935 			  0, dev->name, dev);
936 	if (ret)
937 		goto out_freeirq_rx;
938 
939 	/* initialize perfect match registers */
940 	for (i = 0; i < 4; i++) {
941 		enet_writel(priv, 0, ENET_PML_REG(i));
942 		enet_writel(priv, 0, ENET_PMH_REG(i));
943 	}
944 
945 	/* write device mac address */
946 	memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
947 	bcm_enet_set_mac_address(dev, &addr);
948 
949 	/* allocate rx dma ring */
950 	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
951 	p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
952 	if (!p) {
953 		ret = -ENOMEM;
954 		goto out_freeirq_tx;
955 	}
956 
957 	priv->rx_desc_alloc_size = size;
958 	priv->rx_desc_cpu = p;
959 
960 	/* allocate tx dma ring */
961 	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
962 	p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
963 	if (!p) {
964 		ret = -ENOMEM;
965 		goto out_free_rx_ring;
966 	}
967 
968 	priv->tx_desc_alloc_size = size;
969 	priv->tx_desc_cpu = p;
970 
971 	priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
972 			       GFP_KERNEL);
973 	if (!priv->tx_skb) {
974 		ret = -ENOMEM;
975 		goto out_free_tx_ring;
976 	}
977 
978 	priv->tx_desc_count = priv->tx_ring_size;
979 	priv->tx_dirty_desc = 0;
980 	priv->tx_curr_desc = 0;
981 	spin_lock_init(&priv->tx_lock);
982 
983 	/* init & fill rx ring with skbs */
984 	priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
985 			       GFP_KERNEL);
986 	if (!priv->rx_skb) {
987 		ret = -ENOMEM;
988 		goto out_free_tx_skb;
989 	}
990 
991 	priv->rx_desc_count = 0;
992 	priv->rx_dirty_desc = 0;
993 	priv->rx_curr_desc = 0;
994 
995 	/* initialize flow control buffer allocation */
996 	if (priv->dma_has_sram)
997 		enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
998 				ENETDMA_BUFALLOC_REG(priv->rx_chan));
999 	else
1000 		enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1001 				ENETDMAC_BUFALLOC, priv->rx_chan);
1002 
1003 	if (bcm_enet_refill_rx(dev)) {
1004 		dev_err(kdev, "cannot allocate rx skb queue\n");
1005 		ret = -ENOMEM;
1006 		goto out;
1007 	}
1008 
1009 	/* write rx & tx ring addresses */
1010 	if (priv->dma_has_sram) {
1011 		enet_dmas_writel(priv, priv->rx_desc_dma,
1012 				 ENETDMAS_RSTART_REG, priv->rx_chan);
1013 		enet_dmas_writel(priv, priv->tx_desc_dma,
1014 			 ENETDMAS_RSTART_REG, priv->tx_chan);
1015 	} else {
1016 		enet_dmac_writel(priv, priv->rx_desc_dma,
1017 				ENETDMAC_RSTART, priv->rx_chan);
1018 		enet_dmac_writel(priv, priv->tx_desc_dma,
1019 				ENETDMAC_RSTART, priv->tx_chan);
1020 	}
1021 
1022 	/* clear remaining state ram for rx & tx channel */
1023 	if (priv->dma_has_sram) {
1024 		enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
1025 		enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
1026 		enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
1027 		enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
1028 		enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
1029 		enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
1030 	} else {
1031 		enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
1032 		enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
1033 	}
1034 
1035 	/* set max rx/tx length */
1036 	enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
1037 	enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
1038 
1039 	/* set dma maximum burst len */
1040 	enet_dmac_writel(priv, priv->dma_maxburst,
1041 			 ENETDMAC_MAXBURST, priv->rx_chan);
1042 	enet_dmac_writel(priv, priv->dma_maxburst,
1043 			 ENETDMAC_MAXBURST, priv->tx_chan);
1044 
1045 	/* set correct transmit fifo watermark */
1046 	enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1047 
1048 	/* set flow control low/high threshold to 1/3 / 2/3 */
1049 	if (priv->dma_has_sram) {
1050 		val = priv->rx_ring_size / 3;
1051 		enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1052 		val = (priv->rx_ring_size * 2) / 3;
1053 		enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1054 	} else {
1055 		enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
1056 		enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
1057 		enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
1058 	}
1059 
1060 	/* all set, enable mac and interrupts, start dma engine and
1061 	 * kick rx dma channel */
1062 	wmb();
1063 	val = enet_readl(priv, ENET_CTL_REG);
1064 	val |= ENET_CTL_ENABLE_MASK;
1065 	enet_writel(priv, val, ENET_CTL_REG);
1066 	enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1067 	enet_dmac_writel(priv, priv->dma_chan_en_mask,
1068 			 ENETDMAC_CHANCFG, priv->rx_chan);
1069 
1070 	/* watch "mib counters about to overflow" interrupt */
1071 	enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1072 	enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1073 
1074 	/* watch "packet transferred" interrupt in rx and tx */
1075 	enet_dmac_writel(priv, priv->dma_chan_int_mask,
1076 			 ENETDMAC_IR, priv->rx_chan);
1077 	enet_dmac_writel(priv, priv->dma_chan_int_mask,
1078 			 ENETDMAC_IR, priv->tx_chan);
1079 
1080 	/* make sure we enable napi before rx interrupt  */
1081 	napi_enable(&priv->napi);
1082 
1083 	enet_dmac_writel(priv, priv->dma_chan_int_mask,
1084 			 ENETDMAC_IRMASK, priv->rx_chan);
1085 	enet_dmac_writel(priv, priv->dma_chan_int_mask,
1086 			 ENETDMAC_IRMASK, priv->tx_chan);
1087 
1088 	if (priv->has_phy)
1089 		phy_start(priv->phydev);
1090 	else
1091 		bcm_enet_adjust_link(dev);
1092 
1093 	netif_start_queue(dev);
1094 	return 0;
1095 
1096 out:
1097 	for (i = 0; i < priv->rx_ring_size; i++) {
1098 		struct bcm_enet_desc *desc;
1099 
1100 		if (!priv->rx_skb[i])
1101 			continue;
1102 
1103 		desc = &priv->rx_desc_cpu[i];
1104 		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1105 				 DMA_FROM_DEVICE);
1106 		kfree_skb(priv->rx_skb[i]);
1107 	}
1108 	kfree(priv->rx_skb);
1109 
1110 out_free_tx_skb:
1111 	kfree(priv->tx_skb);
1112 
1113 out_free_tx_ring:
1114 	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1115 			  priv->tx_desc_cpu, priv->tx_desc_dma);
1116 
1117 out_free_rx_ring:
1118 	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1119 			  priv->rx_desc_cpu, priv->rx_desc_dma);
1120 
1121 out_freeirq_tx:
1122 	free_irq(priv->irq_tx, dev);
1123 
1124 out_freeirq_rx:
1125 	free_irq(priv->irq_rx, dev);
1126 
1127 out_freeirq:
1128 	free_irq(dev->irq, dev);
1129 
1130 out_phy_disconnect:
1131 	phy_disconnect(priv->phydev);
1132 
1133 	return ret;
1134 }
1135 
1136 /*
1137  * disable mac
1138  */
1139 static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1140 {
1141 	int limit;
1142 	u32 val;
1143 
1144 	val = enet_readl(priv, ENET_CTL_REG);
1145 	val |= ENET_CTL_DISABLE_MASK;
1146 	enet_writel(priv, val, ENET_CTL_REG);
1147 
1148 	limit = 1000;
1149 	do {
1150 		u32 val;
1151 
1152 		val = enet_readl(priv, ENET_CTL_REG);
1153 		if (!(val & ENET_CTL_DISABLE_MASK))
1154 			break;
1155 		udelay(1);
1156 	} while (limit--);
1157 }
1158 
1159 /*
1160  * disable dma in given channel
1161  */
1162 static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1163 {
1164 	int limit;
1165 
1166 	enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
1167 
1168 	limit = 1000;
1169 	do {
1170 		u32 val;
1171 
1172 		val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
1173 		if (!(val & ENETDMAC_CHANCFG_EN_MASK))
1174 			break;
1175 		udelay(1);
1176 	} while (limit--);
1177 }
1178 
1179 /*
1180  * stop callback
1181  */
1182 static int bcm_enet_stop(struct net_device *dev)
1183 {
1184 	struct bcm_enet_priv *priv;
1185 	struct device *kdev;
1186 	int i;
1187 
1188 	priv = netdev_priv(dev);
1189 	kdev = &priv->pdev->dev;
1190 
1191 	netif_stop_queue(dev);
1192 	napi_disable(&priv->napi);
1193 	if (priv->has_phy)
1194 		phy_stop(priv->phydev);
1195 	del_timer_sync(&priv->rx_timeout);
1196 
1197 	/* mask all interrupts */
1198 	enet_writel(priv, 0, ENET_IRMASK_REG);
1199 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
1200 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
1201 
1202 	/* make sure no mib update is scheduled */
1203 	cancel_work_sync(&priv->mib_update_task);
1204 
1205 	/* disable dma & mac */
1206 	bcm_enet_disable_dma(priv, priv->tx_chan);
1207 	bcm_enet_disable_dma(priv, priv->rx_chan);
1208 	bcm_enet_disable_mac(priv);
1209 
1210 	/* force reclaim of all tx buffers */
1211 	bcm_enet_tx_reclaim(dev, 1);
1212 
1213 	/* free the rx skb ring */
1214 	for (i = 0; i < priv->rx_ring_size; i++) {
1215 		struct bcm_enet_desc *desc;
1216 
1217 		if (!priv->rx_skb[i])
1218 			continue;
1219 
1220 		desc = &priv->rx_desc_cpu[i];
1221 		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1222 				 DMA_FROM_DEVICE);
1223 		kfree_skb(priv->rx_skb[i]);
1224 	}
1225 
1226 	/* free remaining allocated memory */
1227 	kfree(priv->rx_skb);
1228 	kfree(priv->tx_skb);
1229 	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1230 			  priv->rx_desc_cpu, priv->rx_desc_dma);
1231 	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1232 			  priv->tx_desc_cpu, priv->tx_desc_dma);
1233 	free_irq(priv->irq_tx, dev);
1234 	free_irq(priv->irq_rx, dev);
1235 	free_irq(dev->irq, dev);
1236 
1237 	/* release phy */
1238 	if (priv->has_phy) {
1239 		phy_disconnect(priv->phydev);
1240 		priv->phydev = NULL;
1241 	}
1242 
1243 	return 0;
1244 }
1245 
1246 /*
1247  * ethtool callbacks
1248  */
1249 struct bcm_enet_stats {
1250 	char stat_string[ETH_GSTRING_LEN];
1251 	int sizeof_stat;
1252 	int stat_offset;
1253 	int mib_reg;
1254 };
1255 
1256 #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m),		\
1257 		     offsetof(struct bcm_enet_priv, m)
1258 #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m),		\
1259 		     offsetof(struct net_device_stats, m)
1260 
1261 static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1262 	{ "rx_packets", DEV_STAT(rx_packets), -1 },
1263 	{ "tx_packets",	DEV_STAT(tx_packets), -1 },
1264 	{ "rx_bytes", DEV_STAT(rx_bytes), -1 },
1265 	{ "tx_bytes", DEV_STAT(tx_bytes), -1 },
1266 	{ "rx_errors", DEV_STAT(rx_errors), -1 },
1267 	{ "tx_errors", DEV_STAT(tx_errors), -1 },
1268 	{ "rx_dropped",	DEV_STAT(rx_dropped), -1 },
1269 	{ "tx_dropped",	DEV_STAT(tx_dropped), -1 },
1270 
1271 	{ "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1272 	{ "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1273 	{ "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1274 	{ "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1275 	{ "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1276 	{ "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1277 	{ "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1278 	{ "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1279 	{ "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1280 	{ "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1281 	{ "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1282 	{ "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1283 	{ "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1284 	{ "rx_dropped",	GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1285 	{ "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1286 	{ "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1287 	{ "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1288 	{ "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1289 	{ "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1290 	{ "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1291 	{ "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1292 
1293 	{ "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1294 	{ "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1295 	{ "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1296 	{ "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1297 	{ "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1298 	{ "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1299 	{ "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1300 	{ "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1301 	{ "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1302 	{ "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1303 	{ "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1304 	{ "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1305 	{ "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1306 	{ "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1307 	{ "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1308 	{ "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1309 	{ "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1310 	{ "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1311 	{ "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1312 	{ "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1313 	{ "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1314 	{ "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1315 
1316 };
1317 
1318 #define BCM_ENET_STATS_LEN	\
1319 	(sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
1320 
1321 static const u32 unused_mib_regs[] = {
1322 	ETH_MIB_TX_ALL_OCTETS,
1323 	ETH_MIB_TX_ALL_PKTS,
1324 	ETH_MIB_RX_ALL_OCTETS,
1325 	ETH_MIB_RX_ALL_PKTS,
1326 };
1327 
1328 
1329 static void bcm_enet_get_drvinfo(struct net_device *netdev,
1330 				 struct ethtool_drvinfo *drvinfo)
1331 {
1332 	strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
1333 	strlcpy(drvinfo->version, bcm_enet_driver_version,
1334 		sizeof(drvinfo->version));
1335 	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
1336 	strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
1337 	drvinfo->n_stats = BCM_ENET_STATS_LEN;
1338 }
1339 
1340 static int bcm_enet_get_sset_count(struct net_device *netdev,
1341 					int string_set)
1342 {
1343 	switch (string_set) {
1344 	case ETH_SS_STATS:
1345 		return BCM_ENET_STATS_LEN;
1346 	default:
1347 		return -EINVAL;
1348 	}
1349 }
1350 
1351 static void bcm_enet_get_strings(struct net_device *netdev,
1352 				 u32 stringset, u8 *data)
1353 {
1354 	int i;
1355 
1356 	switch (stringset) {
1357 	case ETH_SS_STATS:
1358 		for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1359 			memcpy(data + i * ETH_GSTRING_LEN,
1360 			       bcm_enet_gstrings_stats[i].stat_string,
1361 			       ETH_GSTRING_LEN);
1362 		}
1363 		break;
1364 	}
1365 }
1366 
1367 static void update_mib_counters(struct bcm_enet_priv *priv)
1368 {
1369 	int i;
1370 
1371 	for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1372 		const struct bcm_enet_stats *s;
1373 		u32 val;
1374 		char *p;
1375 
1376 		s = &bcm_enet_gstrings_stats[i];
1377 		if (s->mib_reg == -1)
1378 			continue;
1379 
1380 		val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1381 		p = (char *)priv + s->stat_offset;
1382 
1383 		if (s->sizeof_stat == sizeof(u64))
1384 			*(u64 *)p += val;
1385 		else
1386 			*(u32 *)p += val;
1387 	}
1388 
1389 	/* also empty unused mib counters to make sure mib counter
1390 	 * overflow interrupt is cleared */
1391 	for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1392 		(void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1393 }
1394 
1395 static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1396 {
1397 	struct bcm_enet_priv *priv;
1398 
1399 	priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1400 	mutex_lock(&priv->mib_update_lock);
1401 	update_mib_counters(priv);
1402 	mutex_unlock(&priv->mib_update_lock);
1403 
1404 	/* reenable mib interrupt */
1405 	if (netif_running(priv->net_dev))
1406 		enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1407 }
1408 
1409 static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1410 				       struct ethtool_stats *stats,
1411 				       u64 *data)
1412 {
1413 	struct bcm_enet_priv *priv;
1414 	int i;
1415 
1416 	priv = netdev_priv(netdev);
1417 
1418 	mutex_lock(&priv->mib_update_lock);
1419 	update_mib_counters(priv);
1420 
1421 	for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1422 		const struct bcm_enet_stats *s;
1423 		char *p;
1424 
1425 		s = &bcm_enet_gstrings_stats[i];
1426 		if (s->mib_reg == -1)
1427 			p = (char *)&netdev->stats;
1428 		else
1429 			p = (char *)priv;
1430 		p += s->stat_offset;
1431 		data[i] = (s->sizeof_stat == sizeof(u64)) ?
1432 			*(u64 *)p : *(u32 *)p;
1433 	}
1434 	mutex_unlock(&priv->mib_update_lock);
1435 }
1436 
1437 static int bcm_enet_nway_reset(struct net_device *dev)
1438 {
1439 	struct bcm_enet_priv *priv;
1440 
1441 	priv = netdev_priv(dev);
1442 	if (priv->has_phy) {
1443 		if (!priv->phydev)
1444 			return -ENODEV;
1445 		return genphy_restart_aneg(priv->phydev);
1446 	}
1447 
1448 	return -EOPNOTSUPP;
1449 }
1450 
1451 static int bcm_enet_get_settings(struct net_device *dev,
1452 				 struct ethtool_cmd *cmd)
1453 {
1454 	struct bcm_enet_priv *priv;
1455 
1456 	priv = netdev_priv(dev);
1457 
1458 	cmd->maxrxpkt = 0;
1459 	cmd->maxtxpkt = 0;
1460 
1461 	if (priv->has_phy) {
1462 		if (!priv->phydev)
1463 			return -ENODEV;
1464 		return phy_ethtool_gset(priv->phydev, cmd);
1465 	} else {
1466 		cmd->autoneg = 0;
1467 		ethtool_cmd_speed_set(cmd, ((priv->force_speed_100)
1468 					    ? SPEED_100 : SPEED_10));
1469 		cmd->duplex = (priv->force_duplex_full) ?
1470 			DUPLEX_FULL : DUPLEX_HALF;
1471 		cmd->supported = ADVERTISED_10baseT_Half  |
1472 			ADVERTISED_10baseT_Full |
1473 			ADVERTISED_100baseT_Half |
1474 			ADVERTISED_100baseT_Full;
1475 		cmd->advertising = 0;
1476 		cmd->port = PORT_MII;
1477 		cmd->transceiver = XCVR_EXTERNAL;
1478 	}
1479 	return 0;
1480 }
1481 
1482 static int bcm_enet_set_settings(struct net_device *dev,
1483 				 struct ethtool_cmd *cmd)
1484 {
1485 	struct bcm_enet_priv *priv;
1486 
1487 	priv = netdev_priv(dev);
1488 	if (priv->has_phy) {
1489 		if (!priv->phydev)
1490 			return -ENODEV;
1491 		return phy_ethtool_sset(priv->phydev, cmd);
1492 	} else {
1493 
1494 		if (cmd->autoneg ||
1495 		    (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
1496 		    cmd->port != PORT_MII)
1497 			return -EINVAL;
1498 
1499 		priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
1500 		priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
1501 
1502 		if (netif_running(dev))
1503 			bcm_enet_adjust_link(dev);
1504 		return 0;
1505 	}
1506 }
1507 
1508 static void bcm_enet_get_ringparam(struct net_device *dev,
1509 				   struct ethtool_ringparam *ering)
1510 {
1511 	struct bcm_enet_priv *priv;
1512 
1513 	priv = netdev_priv(dev);
1514 
1515 	/* rx/tx ring is actually only limited by memory */
1516 	ering->rx_max_pending = 8192;
1517 	ering->tx_max_pending = 8192;
1518 	ering->rx_pending = priv->rx_ring_size;
1519 	ering->tx_pending = priv->tx_ring_size;
1520 }
1521 
1522 static int bcm_enet_set_ringparam(struct net_device *dev,
1523 				  struct ethtool_ringparam *ering)
1524 {
1525 	struct bcm_enet_priv *priv;
1526 	int was_running;
1527 
1528 	priv = netdev_priv(dev);
1529 
1530 	was_running = 0;
1531 	if (netif_running(dev)) {
1532 		bcm_enet_stop(dev);
1533 		was_running = 1;
1534 	}
1535 
1536 	priv->rx_ring_size = ering->rx_pending;
1537 	priv->tx_ring_size = ering->tx_pending;
1538 
1539 	if (was_running) {
1540 		int err;
1541 
1542 		err = bcm_enet_open(dev);
1543 		if (err)
1544 			dev_close(dev);
1545 		else
1546 			bcm_enet_set_multicast_list(dev);
1547 	}
1548 	return 0;
1549 }
1550 
1551 static void bcm_enet_get_pauseparam(struct net_device *dev,
1552 				    struct ethtool_pauseparam *ecmd)
1553 {
1554 	struct bcm_enet_priv *priv;
1555 
1556 	priv = netdev_priv(dev);
1557 	ecmd->autoneg = priv->pause_auto;
1558 	ecmd->rx_pause = priv->pause_rx;
1559 	ecmd->tx_pause = priv->pause_tx;
1560 }
1561 
1562 static int bcm_enet_set_pauseparam(struct net_device *dev,
1563 				   struct ethtool_pauseparam *ecmd)
1564 {
1565 	struct bcm_enet_priv *priv;
1566 
1567 	priv = netdev_priv(dev);
1568 
1569 	if (priv->has_phy) {
1570 		if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1571 			/* asymetric pause mode not supported,
1572 			 * actually possible but integrated PHY has RO
1573 			 * asym_pause bit */
1574 			return -EINVAL;
1575 		}
1576 	} else {
1577 		/* no pause autoneg on direct mii connection */
1578 		if (ecmd->autoneg)
1579 			return -EINVAL;
1580 	}
1581 
1582 	priv->pause_auto = ecmd->autoneg;
1583 	priv->pause_rx = ecmd->rx_pause;
1584 	priv->pause_tx = ecmd->tx_pause;
1585 
1586 	return 0;
1587 }
1588 
1589 static const struct ethtool_ops bcm_enet_ethtool_ops = {
1590 	.get_strings		= bcm_enet_get_strings,
1591 	.get_sset_count		= bcm_enet_get_sset_count,
1592 	.get_ethtool_stats      = bcm_enet_get_ethtool_stats,
1593 	.nway_reset		= bcm_enet_nway_reset,
1594 	.get_settings		= bcm_enet_get_settings,
1595 	.set_settings		= bcm_enet_set_settings,
1596 	.get_drvinfo		= bcm_enet_get_drvinfo,
1597 	.get_link		= ethtool_op_get_link,
1598 	.get_ringparam		= bcm_enet_get_ringparam,
1599 	.set_ringparam		= bcm_enet_set_ringparam,
1600 	.get_pauseparam		= bcm_enet_get_pauseparam,
1601 	.set_pauseparam		= bcm_enet_set_pauseparam,
1602 };
1603 
1604 static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1605 {
1606 	struct bcm_enet_priv *priv;
1607 
1608 	priv = netdev_priv(dev);
1609 	if (priv->has_phy) {
1610 		if (!priv->phydev)
1611 			return -ENODEV;
1612 		return phy_mii_ioctl(priv->phydev, rq, cmd);
1613 	} else {
1614 		struct mii_if_info mii;
1615 
1616 		mii.dev = dev;
1617 		mii.mdio_read = bcm_enet_mdio_read_mii;
1618 		mii.mdio_write = bcm_enet_mdio_write_mii;
1619 		mii.phy_id = 0;
1620 		mii.phy_id_mask = 0x3f;
1621 		mii.reg_num_mask = 0x1f;
1622 		return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1623 	}
1624 }
1625 
1626 /*
1627  * calculate actual hardware mtu
1628  */
1629 static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
1630 {
1631 	int actual_mtu;
1632 
1633 	actual_mtu = mtu;
1634 
1635 	/* add ethernet header + vlan tag size */
1636 	actual_mtu += VLAN_ETH_HLEN;
1637 
1638 	if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU)
1639 		return -EINVAL;
1640 
1641 	/*
1642 	 * setup maximum size before we get overflow mark in
1643 	 * descriptor, note that this will not prevent reception of
1644 	 * big frames, they will be split into multiple buffers
1645 	 * anyway
1646 	 */
1647 	priv->hw_mtu = actual_mtu;
1648 
1649 	/*
1650 	 * align rx buffer size to dma burst len, account FCS since
1651 	 * it's appended
1652 	 */
1653 	priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1654 				  priv->dma_maxburst * 4);
1655 	return 0;
1656 }
1657 
1658 /*
1659  * adjust mtu, can't be called while device is running
1660  */
1661 static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
1662 {
1663 	int ret;
1664 
1665 	if (netif_running(dev))
1666 		return -EBUSY;
1667 
1668 	ret = compute_hw_mtu(netdev_priv(dev), new_mtu);
1669 	if (ret)
1670 		return ret;
1671 	dev->mtu = new_mtu;
1672 	return 0;
1673 }
1674 
1675 /*
1676  * preinit hardware to allow mii operation while device is down
1677  */
1678 static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1679 {
1680 	u32 val;
1681 	int limit;
1682 
1683 	/* make sure mac is disabled */
1684 	bcm_enet_disable_mac(priv);
1685 
1686 	/* soft reset mac */
1687 	val = ENET_CTL_SRESET_MASK;
1688 	enet_writel(priv, val, ENET_CTL_REG);
1689 	wmb();
1690 
1691 	limit = 1000;
1692 	do {
1693 		val = enet_readl(priv, ENET_CTL_REG);
1694 		if (!(val & ENET_CTL_SRESET_MASK))
1695 			break;
1696 		udelay(1);
1697 	} while (limit--);
1698 
1699 	/* select correct mii interface */
1700 	val = enet_readl(priv, ENET_CTL_REG);
1701 	if (priv->use_external_mii)
1702 		val |= ENET_CTL_EPHYSEL_MASK;
1703 	else
1704 		val &= ~ENET_CTL_EPHYSEL_MASK;
1705 	enet_writel(priv, val, ENET_CTL_REG);
1706 
1707 	/* turn on mdc clock */
1708 	enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1709 		    ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1710 
1711 	/* set mib counters to self-clear when read */
1712 	val = enet_readl(priv, ENET_MIBCTL_REG);
1713 	val |= ENET_MIBCTL_RDCLEAR_MASK;
1714 	enet_writel(priv, val, ENET_MIBCTL_REG);
1715 }
1716 
1717 static const struct net_device_ops bcm_enet_ops = {
1718 	.ndo_open		= bcm_enet_open,
1719 	.ndo_stop		= bcm_enet_stop,
1720 	.ndo_start_xmit		= bcm_enet_start_xmit,
1721 	.ndo_set_mac_address	= bcm_enet_set_mac_address,
1722 	.ndo_set_rx_mode	= bcm_enet_set_multicast_list,
1723 	.ndo_do_ioctl		= bcm_enet_ioctl,
1724 	.ndo_change_mtu		= bcm_enet_change_mtu,
1725 #ifdef CONFIG_NET_POLL_CONTROLLER
1726 	.ndo_poll_controller = bcm_enet_netpoll,
1727 #endif
1728 };
1729 
1730 /*
1731  * allocate netdevice, request register memory and register device.
1732  */
1733 static int bcm_enet_probe(struct platform_device *pdev)
1734 {
1735 	struct bcm_enet_priv *priv;
1736 	struct net_device *dev;
1737 	struct bcm63xx_enet_platform_data *pd;
1738 	struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
1739 	struct mii_bus *bus;
1740 	const char *clk_name;
1741 	int i, ret;
1742 
1743 	/* stop if shared driver failed, assume driver->probe will be
1744 	 * called in the same order we register devices (correct ?) */
1745 	if (!bcm_enet_shared_base[0])
1746 		return -ENODEV;
1747 
1748 	res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1749 	res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1750 	res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1751 	if (!res_irq || !res_irq_rx || !res_irq_tx)
1752 		return -ENODEV;
1753 
1754 	ret = 0;
1755 	dev = alloc_etherdev(sizeof(*priv));
1756 	if (!dev)
1757 		return -ENOMEM;
1758 	priv = netdev_priv(dev);
1759 
1760 	priv->enet_is_sw = false;
1761 	priv->dma_maxburst = BCMENET_DMA_MAXBURST;
1762 
1763 	ret = compute_hw_mtu(priv, dev->mtu);
1764 	if (ret)
1765 		goto out;
1766 
1767 	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1768 	priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
1769 	if (IS_ERR(priv->base)) {
1770 		ret = PTR_ERR(priv->base);
1771 		goto out;
1772 	}
1773 
1774 	dev->irq = priv->irq = res_irq->start;
1775 	priv->irq_rx = res_irq_rx->start;
1776 	priv->irq_tx = res_irq_tx->start;
1777 	priv->mac_id = pdev->id;
1778 
1779 	/* get rx & tx dma channel id for this mac */
1780 	if (priv->mac_id == 0) {
1781 		priv->rx_chan = 0;
1782 		priv->tx_chan = 1;
1783 		clk_name = "enet0";
1784 	} else {
1785 		priv->rx_chan = 2;
1786 		priv->tx_chan = 3;
1787 		clk_name = "enet1";
1788 	}
1789 
1790 	priv->mac_clk = clk_get(&pdev->dev, clk_name);
1791 	if (IS_ERR(priv->mac_clk)) {
1792 		ret = PTR_ERR(priv->mac_clk);
1793 		goto out;
1794 	}
1795 	clk_prepare_enable(priv->mac_clk);
1796 
1797 	/* initialize default and fetch platform data */
1798 	priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1799 	priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1800 
1801 	pd = dev_get_platdata(&pdev->dev);
1802 	if (pd) {
1803 		memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1804 		priv->has_phy = pd->has_phy;
1805 		priv->phy_id = pd->phy_id;
1806 		priv->has_phy_interrupt = pd->has_phy_interrupt;
1807 		priv->phy_interrupt = pd->phy_interrupt;
1808 		priv->use_external_mii = !pd->use_internal_phy;
1809 		priv->pause_auto = pd->pause_auto;
1810 		priv->pause_rx = pd->pause_rx;
1811 		priv->pause_tx = pd->pause_tx;
1812 		priv->force_duplex_full = pd->force_duplex_full;
1813 		priv->force_speed_100 = pd->force_speed_100;
1814 		priv->dma_chan_en_mask = pd->dma_chan_en_mask;
1815 		priv->dma_chan_int_mask = pd->dma_chan_int_mask;
1816 		priv->dma_chan_width = pd->dma_chan_width;
1817 		priv->dma_has_sram = pd->dma_has_sram;
1818 		priv->dma_desc_shift = pd->dma_desc_shift;
1819 	}
1820 
1821 	if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
1822 		/* using internal PHY, enable clock */
1823 		priv->phy_clk = clk_get(&pdev->dev, "ephy");
1824 		if (IS_ERR(priv->phy_clk)) {
1825 			ret = PTR_ERR(priv->phy_clk);
1826 			priv->phy_clk = NULL;
1827 			goto out_put_clk_mac;
1828 		}
1829 		clk_prepare_enable(priv->phy_clk);
1830 	}
1831 
1832 	/* do minimal hardware init to be able to probe mii bus */
1833 	bcm_enet_hw_preinit(priv);
1834 
1835 	/* MII bus registration */
1836 	if (priv->has_phy) {
1837 
1838 		priv->mii_bus = mdiobus_alloc();
1839 		if (!priv->mii_bus) {
1840 			ret = -ENOMEM;
1841 			goto out_uninit_hw;
1842 		}
1843 
1844 		bus = priv->mii_bus;
1845 		bus->name = "bcm63xx_enet MII bus";
1846 		bus->parent = &pdev->dev;
1847 		bus->priv = priv;
1848 		bus->read = bcm_enet_mdio_read_phylib;
1849 		bus->write = bcm_enet_mdio_write_phylib;
1850 		sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id);
1851 
1852 		/* only probe bus where we think the PHY is, because
1853 		 * the mdio read operation return 0 instead of 0xffff
1854 		 * if a slave is not present on hw */
1855 		bus->phy_mask = ~(1 << priv->phy_id);
1856 
1857 		bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR,
1858 					GFP_KERNEL);
1859 		if (!bus->irq) {
1860 			ret = -ENOMEM;
1861 			goto out_free_mdio;
1862 		}
1863 
1864 		if (priv->has_phy_interrupt)
1865 			bus->irq[priv->phy_id] = priv->phy_interrupt;
1866 		else
1867 			bus->irq[priv->phy_id] = PHY_POLL;
1868 
1869 		ret = mdiobus_register(bus);
1870 		if (ret) {
1871 			dev_err(&pdev->dev, "unable to register mdio bus\n");
1872 			goto out_free_mdio;
1873 		}
1874 	} else {
1875 
1876 		/* run platform code to initialize PHY device */
1877 		if (pd->mii_config &&
1878 		    pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1879 				   bcm_enet_mdio_write_mii)) {
1880 			dev_err(&pdev->dev, "unable to configure mdio bus\n");
1881 			goto out_uninit_hw;
1882 		}
1883 	}
1884 
1885 	spin_lock_init(&priv->rx_lock);
1886 
1887 	/* init rx timeout (used for oom) */
1888 	init_timer(&priv->rx_timeout);
1889 	priv->rx_timeout.function = bcm_enet_refill_rx_timer;
1890 	priv->rx_timeout.data = (unsigned long)dev;
1891 
1892 	/* init the mib update lock&work */
1893 	mutex_init(&priv->mib_update_lock);
1894 	INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1895 
1896 	/* zero mib counters */
1897 	for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1898 		enet_writel(priv, 0, ENET_MIB_REG(i));
1899 
1900 	/* register netdevice */
1901 	dev->netdev_ops = &bcm_enet_ops;
1902 	netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1903 
1904 	SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
1905 	SET_NETDEV_DEV(dev, &pdev->dev);
1906 
1907 	ret = register_netdev(dev);
1908 	if (ret)
1909 		goto out_unregister_mdio;
1910 
1911 	netif_carrier_off(dev);
1912 	platform_set_drvdata(pdev, dev);
1913 	priv->pdev = pdev;
1914 	priv->net_dev = dev;
1915 
1916 	return 0;
1917 
1918 out_unregister_mdio:
1919 	if (priv->mii_bus)
1920 		mdiobus_unregister(priv->mii_bus);
1921 
1922 out_free_mdio:
1923 	if (priv->mii_bus)
1924 		mdiobus_free(priv->mii_bus);
1925 
1926 out_uninit_hw:
1927 	/* turn off mdc clock */
1928 	enet_writel(priv, 0, ENET_MIISC_REG);
1929 	if (priv->phy_clk) {
1930 		clk_disable_unprepare(priv->phy_clk);
1931 		clk_put(priv->phy_clk);
1932 	}
1933 
1934 out_put_clk_mac:
1935 	clk_disable_unprepare(priv->mac_clk);
1936 	clk_put(priv->mac_clk);
1937 out:
1938 	free_netdev(dev);
1939 	return ret;
1940 }
1941 
1942 
1943 /*
1944  * exit func, stops hardware and unregisters netdevice
1945  */
1946 static int bcm_enet_remove(struct platform_device *pdev)
1947 {
1948 	struct bcm_enet_priv *priv;
1949 	struct net_device *dev;
1950 
1951 	/* stop netdevice */
1952 	dev = platform_get_drvdata(pdev);
1953 	priv = netdev_priv(dev);
1954 	unregister_netdev(dev);
1955 
1956 	/* turn off mdc clock */
1957 	enet_writel(priv, 0, ENET_MIISC_REG);
1958 
1959 	if (priv->has_phy) {
1960 		mdiobus_unregister(priv->mii_bus);
1961 		mdiobus_free(priv->mii_bus);
1962 	} else {
1963 		struct bcm63xx_enet_platform_data *pd;
1964 
1965 		pd = dev_get_platdata(&pdev->dev);
1966 		if (pd && pd->mii_config)
1967 			pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1968 				       bcm_enet_mdio_write_mii);
1969 	}
1970 
1971 	/* disable hw block clocks */
1972 	if (priv->phy_clk) {
1973 		clk_disable_unprepare(priv->phy_clk);
1974 		clk_put(priv->phy_clk);
1975 	}
1976 	clk_disable_unprepare(priv->mac_clk);
1977 	clk_put(priv->mac_clk);
1978 
1979 	free_netdev(dev);
1980 	return 0;
1981 }
1982 
1983 struct platform_driver bcm63xx_enet_driver = {
1984 	.probe	= bcm_enet_probe,
1985 	.remove	= bcm_enet_remove,
1986 	.driver	= {
1987 		.name	= "bcm63xx_enet",
1988 		.owner  = THIS_MODULE,
1989 	},
1990 };
1991 
1992 /*
1993  * switch mii access callbacks
1994  */
1995 static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
1996 				int ext, int phy_id, int location)
1997 {
1998 	u32 reg;
1999 	int ret;
2000 
2001 	spin_lock_bh(&priv->enetsw_mdio_lock);
2002 	enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
2003 
2004 	reg = ENETSW_MDIOC_RD_MASK |
2005 		(phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
2006 		(location << ENETSW_MDIOC_REG_SHIFT);
2007 
2008 	if (ext)
2009 		reg |= ENETSW_MDIOC_EXT_MASK;
2010 
2011 	enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
2012 	udelay(50);
2013 	ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
2014 	spin_unlock_bh(&priv->enetsw_mdio_lock);
2015 	return ret;
2016 }
2017 
2018 static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
2019 				 int ext, int phy_id, int location,
2020 				 uint16_t data)
2021 {
2022 	u32 reg;
2023 
2024 	spin_lock_bh(&priv->enetsw_mdio_lock);
2025 	enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
2026 
2027 	reg = ENETSW_MDIOC_WR_MASK |
2028 		(phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
2029 		(location << ENETSW_MDIOC_REG_SHIFT);
2030 
2031 	if (ext)
2032 		reg |= ENETSW_MDIOC_EXT_MASK;
2033 
2034 	reg |= data;
2035 
2036 	enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
2037 	udelay(50);
2038 	spin_unlock_bh(&priv->enetsw_mdio_lock);
2039 }
2040 
2041 static inline int bcm_enet_port_is_rgmii(int portid)
2042 {
2043 	return portid >= ENETSW_RGMII_PORT0;
2044 }
2045 
2046 /*
2047  * enet sw PHY polling
2048  */
2049 static void swphy_poll_timer(unsigned long data)
2050 {
2051 	struct bcm_enet_priv *priv = (struct bcm_enet_priv *)data;
2052 	unsigned int i;
2053 
2054 	for (i = 0; i < priv->num_ports; i++) {
2055 		struct bcm63xx_enetsw_port *port;
2056 		int val, j, up, advertise, lpa, lpa2, speed, duplex, media;
2057 		int external_phy = bcm_enet_port_is_rgmii(i);
2058 		u8 override;
2059 
2060 		port = &priv->used_ports[i];
2061 		if (!port->used)
2062 			continue;
2063 
2064 		if (port->bypass_link)
2065 			continue;
2066 
2067 		/* dummy read to clear */
2068 		for (j = 0; j < 2; j++)
2069 			val = bcmenet_sw_mdio_read(priv, external_phy,
2070 						   port->phy_id, MII_BMSR);
2071 
2072 		if (val == 0xffff)
2073 			continue;
2074 
2075 		up = (val & BMSR_LSTATUS) ? 1 : 0;
2076 		if (!(up ^ priv->sw_port_link[i]))
2077 			continue;
2078 
2079 		priv->sw_port_link[i] = up;
2080 
2081 		/* link changed */
2082 		if (!up) {
2083 			dev_info(&priv->pdev->dev, "link DOWN on %s\n",
2084 				 port->name);
2085 			enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2086 				      ENETSW_PORTOV_REG(i));
2087 			enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2088 				      ENETSW_PTCTRL_TXDIS_MASK,
2089 				      ENETSW_PTCTRL_REG(i));
2090 			continue;
2091 		}
2092 
2093 		advertise = bcmenet_sw_mdio_read(priv, external_phy,
2094 						 port->phy_id, MII_ADVERTISE);
2095 
2096 		lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2097 					   MII_LPA);
2098 
2099 		lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2100 					    MII_STAT1000);
2101 
2102 		/* figure out media and duplex from advertise and LPA values */
2103 		media = mii_nway_result(lpa & advertise);
2104 		duplex = (media & ADVERTISE_FULL) ? 1 : 0;
2105 		if (lpa2 & LPA_1000FULL)
2106 			duplex = 1;
2107 
2108 		if (lpa2 & (LPA_1000FULL | LPA_1000HALF))
2109 			speed = 1000;
2110 		else {
2111 			if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
2112 				speed = 100;
2113 			else
2114 				speed = 10;
2115 		}
2116 
2117 		dev_info(&priv->pdev->dev,
2118 			 "link UP on %s, %dMbps, %s-duplex\n",
2119 			 port->name, speed, duplex ? "full" : "half");
2120 
2121 		override = ENETSW_PORTOV_ENABLE_MASK |
2122 			ENETSW_PORTOV_LINKUP_MASK;
2123 
2124 		if (speed == 1000)
2125 			override |= ENETSW_IMPOV_1000_MASK;
2126 		else if (speed == 100)
2127 			override |= ENETSW_IMPOV_100_MASK;
2128 		if (duplex)
2129 			override |= ENETSW_IMPOV_FDX_MASK;
2130 
2131 		enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2132 		enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2133 	}
2134 
2135 	priv->swphy_poll.expires = jiffies + HZ;
2136 	add_timer(&priv->swphy_poll);
2137 }
2138 
2139 /*
2140  * open callback, allocate dma rings & buffers and start rx operation
2141  */
2142 static int bcm_enetsw_open(struct net_device *dev)
2143 {
2144 	struct bcm_enet_priv *priv;
2145 	struct device *kdev;
2146 	int i, ret;
2147 	unsigned int size;
2148 	void *p;
2149 	u32 val;
2150 
2151 	priv = netdev_priv(dev);
2152 	kdev = &priv->pdev->dev;
2153 
2154 	/* mask all interrupts and request them */
2155 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2156 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2157 
2158 	ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2159 			  0, dev->name, dev);
2160 	if (ret)
2161 		goto out_freeirq;
2162 
2163 	if (priv->irq_tx != -1) {
2164 		ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
2165 				  0, dev->name, dev);
2166 		if (ret)
2167 			goto out_freeirq_rx;
2168 	}
2169 
2170 	/* allocate rx dma ring */
2171 	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2172 	p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2173 	if (!p) {
2174 		dev_err(kdev, "cannot allocate rx ring %u\n", size);
2175 		ret = -ENOMEM;
2176 		goto out_freeirq_tx;
2177 	}
2178 
2179 	memset(p, 0, size);
2180 	priv->rx_desc_alloc_size = size;
2181 	priv->rx_desc_cpu = p;
2182 
2183 	/* allocate tx dma ring */
2184 	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2185 	p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2186 	if (!p) {
2187 		dev_err(kdev, "cannot allocate tx ring\n");
2188 		ret = -ENOMEM;
2189 		goto out_free_rx_ring;
2190 	}
2191 
2192 	memset(p, 0, size);
2193 	priv->tx_desc_alloc_size = size;
2194 	priv->tx_desc_cpu = p;
2195 
2196 	priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
2197 			       GFP_KERNEL);
2198 	if (!priv->tx_skb) {
2199 		dev_err(kdev, "cannot allocate rx skb queue\n");
2200 		ret = -ENOMEM;
2201 		goto out_free_tx_ring;
2202 	}
2203 
2204 	priv->tx_desc_count = priv->tx_ring_size;
2205 	priv->tx_dirty_desc = 0;
2206 	priv->tx_curr_desc = 0;
2207 	spin_lock_init(&priv->tx_lock);
2208 
2209 	/* init & fill rx ring with skbs */
2210 	priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
2211 			       GFP_KERNEL);
2212 	if (!priv->rx_skb) {
2213 		dev_err(kdev, "cannot allocate rx skb queue\n");
2214 		ret = -ENOMEM;
2215 		goto out_free_tx_skb;
2216 	}
2217 
2218 	priv->rx_desc_count = 0;
2219 	priv->rx_dirty_desc = 0;
2220 	priv->rx_curr_desc = 0;
2221 
2222 	/* disable all ports */
2223 	for (i = 0; i < priv->num_ports; i++) {
2224 		enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2225 			      ENETSW_PORTOV_REG(i));
2226 		enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2227 			      ENETSW_PTCTRL_TXDIS_MASK,
2228 			      ENETSW_PTCTRL_REG(i));
2229 
2230 		priv->sw_port_link[i] = 0;
2231 	}
2232 
2233 	/* reset mib */
2234 	val = enetsw_readb(priv, ENETSW_GMCR_REG);
2235 	val |= ENETSW_GMCR_RST_MIB_MASK;
2236 	enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2237 	mdelay(1);
2238 	val &= ~ENETSW_GMCR_RST_MIB_MASK;
2239 	enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2240 	mdelay(1);
2241 
2242 	/* force CPU port state */
2243 	val = enetsw_readb(priv, ENETSW_IMPOV_REG);
2244 	val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
2245 	enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
2246 
2247 	/* enable switch forward engine */
2248 	val = enetsw_readb(priv, ENETSW_SWMODE_REG);
2249 	val |= ENETSW_SWMODE_FWD_EN_MASK;
2250 	enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
2251 
2252 	/* enable jumbo on all ports */
2253 	enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
2254 	enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
2255 
2256 	/* initialize flow control buffer allocation */
2257 	enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
2258 			ENETDMA_BUFALLOC_REG(priv->rx_chan));
2259 
2260 	if (bcm_enet_refill_rx(dev)) {
2261 		dev_err(kdev, "cannot allocate rx skb queue\n");
2262 		ret = -ENOMEM;
2263 		goto out;
2264 	}
2265 
2266 	/* write rx & tx ring addresses */
2267 	enet_dmas_writel(priv, priv->rx_desc_dma,
2268 			 ENETDMAS_RSTART_REG, priv->rx_chan);
2269 	enet_dmas_writel(priv, priv->tx_desc_dma,
2270 			 ENETDMAS_RSTART_REG, priv->tx_chan);
2271 
2272 	/* clear remaining state ram for rx & tx channel */
2273 	enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
2274 	enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
2275 	enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
2276 	enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
2277 	enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
2278 	enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
2279 
2280 	/* set dma maximum burst len */
2281 	enet_dmac_writel(priv, priv->dma_maxburst,
2282 			 ENETDMAC_MAXBURST, priv->rx_chan);
2283 	enet_dmac_writel(priv, priv->dma_maxburst,
2284 			 ENETDMAC_MAXBURST, priv->tx_chan);
2285 
2286 	/* set flow control low/high threshold to 1/3 / 2/3 */
2287 	val = priv->rx_ring_size / 3;
2288 	enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
2289 	val = (priv->rx_ring_size * 2) / 3;
2290 	enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
2291 
2292 	/* all set, enable mac and interrupts, start dma engine and
2293 	 * kick rx dma channel
2294 	 */
2295 	wmb();
2296 	enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
2297 	enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
2298 			 ENETDMAC_CHANCFG, priv->rx_chan);
2299 
2300 	/* watch "packet transferred" interrupt in rx and tx */
2301 	enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2302 			 ENETDMAC_IR, priv->rx_chan);
2303 	enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2304 			 ENETDMAC_IR, priv->tx_chan);
2305 
2306 	/* make sure we enable napi before rx interrupt  */
2307 	napi_enable(&priv->napi);
2308 
2309 	enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2310 			 ENETDMAC_IRMASK, priv->rx_chan);
2311 	enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2312 			 ENETDMAC_IRMASK, priv->tx_chan);
2313 
2314 	netif_carrier_on(dev);
2315 	netif_start_queue(dev);
2316 
2317 	/* apply override config for bypass_link ports here. */
2318 	for (i = 0; i < priv->num_ports; i++) {
2319 		struct bcm63xx_enetsw_port *port;
2320 		u8 override;
2321 		port = &priv->used_ports[i];
2322 		if (!port->used)
2323 			continue;
2324 
2325 		if (!port->bypass_link)
2326 			continue;
2327 
2328 		override = ENETSW_PORTOV_ENABLE_MASK |
2329 			ENETSW_PORTOV_LINKUP_MASK;
2330 
2331 		switch (port->force_speed) {
2332 		case 1000:
2333 			override |= ENETSW_IMPOV_1000_MASK;
2334 			break;
2335 		case 100:
2336 			override |= ENETSW_IMPOV_100_MASK;
2337 			break;
2338 		case 10:
2339 			break;
2340 		default:
2341 			pr_warn("invalid forced speed on port %s: assume 10\n",
2342 			       port->name);
2343 			break;
2344 		}
2345 
2346 		if (port->force_duplex_full)
2347 			override |= ENETSW_IMPOV_FDX_MASK;
2348 
2349 
2350 		enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2351 		enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2352 	}
2353 
2354 	/* start phy polling timer */
2355 	init_timer(&priv->swphy_poll);
2356 	priv->swphy_poll.function = swphy_poll_timer;
2357 	priv->swphy_poll.data = (unsigned long)priv;
2358 	priv->swphy_poll.expires = jiffies;
2359 	add_timer(&priv->swphy_poll);
2360 	return 0;
2361 
2362 out:
2363 	for (i = 0; i < priv->rx_ring_size; i++) {
2364 		struct bcm_enet_desc *desc;
2365 
2366 		if (!priv->rx_skb[i])
2367 			continue;
2368 
2369 		desc = &priv->rx_desc_cpu[i];
2370 		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2371 				 DMA_FROM_DEVICE);
2372 		kfree_skb(priv->rx_skb[i]);
2373 	}
2374 	kfree(priv->rx_skb);
2375 
2376 out_free_tx_skb:
2377 	kfree(priv->tx_skb);
2378 
2379 out_free_tx_ring:
2380 	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2381 			  priv->tx_desc_cpu, priv->tx_desc_dma);
2382 
2383 out_free_rx_ring:
2384 	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2385 			  priv->rx_desc_cpu, priv->rx_desc_dma);
2386 
2387 out_freeirq_tx:
2388 	if (priv->irq_tx != -1)
2389 		free_irq(priv->irq_tx, dev);
2390 
2391 out_freeirq_rx:
2392 	free_irq(priv->irq_rx, dev);
2393 
2394 out_freeirq:
2395 	return ret;
2396 }
2397 
2398 /* stop callback */
2399 static int bcm_enetsw_stop(struct net_device *dev)
2400 {
2401 	struct bcm_enet_priv *priv;
2402 	struct device *kdev;
2403 	int i;
2404 
2405 	priv = netdev_priv(dev);
2406 	kdev = &priv->pdev->dev;
2407 
2408 	del_timer_sync(&priv->swphy_poll);
2409 	netif_stop_queue(dev);
2410 	napi_disable(&priv->napi);
2411 	del_timer_sync(&priv->rx_timeout);
2412 
2413 	/* mask all interrupts */
2414 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2415 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2416 
2417 	/* disable dma & mac */
2418 	bcm_enet_disable_dma(priv, priv->tx_chan);
2419 	bcm_enet_disable_dma(priv, priv->rx_chan);
2420 
2421 	/* force reclaim of all tx buffers */
2422 	bcm_enet_tx_reclaim(dev, 1);
2423 
2424 	/* free the rx skb ring */
2425 	for (i = 0; i < priv->rx_ring_size; i++) {
2426 		struct bcm_enet_desc *desc;
2427 
2428 		if (!priv->rx_skb[i])
2429 			continue;
2430 
2431 		desc = &priv->rx_desc_cpu[i];
2432 		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2433 				 DMA_FROM_DEVICE);
2434 		kfree_skb(priv->rx_skb[i]);
2435 	}
2436 
2437 	/* free remaining allocated memory */
2438 	kfree(priv->rx_skb);
2439 	kfree(priv->tx_skb);
2440 	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2441 			  priv->rx_desc_cpu, priv->rx_desc_dma);
2442 	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2443 			  priv->tx_desc_cpu, priv->tx_desc_dma);
2444 	if (priv->irq_tx != -1)
2445 		free_irq(priv->irq_tx, dev);
2446 	free_irq(priv->irq_rx, dev);
2447 
2448 	return 0;
2449 }
2450 
2451 /* try to sort out phy external status by walking the used_port field
2452  * in the bcm_enet_priv structure. in case the phy address is not
2453  * assigned to any physical port on the switch, assume it is external
2454  * (and yell at the user).
2455  */
2456 static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
2457 {
2458 	int i;
2459 
2460 	for (i = 0; i < priv->num_ports; ++i) {
2461 		if (!priv->used_ports[i].used)
2462 			continue;
2463 		if (priv->used_ports[i].phy_id == phy_id)
2464 			return bcm_enet_port_is_rgmii(i);
2465 	}
2466 
2467 	printk_once(KERN_WARNING  "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
2468 		    phy_id);
2469 	return 1;
2470 }
2471 
2472 /* can't use bcmenet_sw_mdio_read directly as we need to sort out
2473  * external/internal status of the given phy_id first.
2474  */
2475 static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id,
2476 				    int location)
2477 {
2478 	struct bcm_enet_priv *priv;
2479 
2480 	priv = netdev_priv(dev);
2481 	return bcmenet_sw_mdio_read(priv,
2482 				    bcm_enetsw_phy_is_external(priv, phy_id),
2483 				    phy_id, location);
2484 }
2485 
2486 /* can't use bcmenet_sw_mdio_write directly as we need to sort out
2487  * external/internal status of the given phy_id first.
2488  */
2489 static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id,
2490 				      int location,
2491 				      int val)
2492 {
2493 	struct bcm_enet_priv *priv;
2494 
2495 	priv = netdev_priv(dev);
2496 	bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
2497 			      phy_id, location, val);
2498 }
2499 
2500 static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2501 {
2502 	struct mii_if_info mii;
2503 
2504 	mii.dev = dev;
2505 	mii.mdio_read = bcm_enetsw_mii_mdio_read;
2506 	mii.mdio_write = bcm_enetsw_mii_mdio_write;
2507 	mii.phy_id = 0;
2508 	mii.phy_id_mask = 0x3f;
2509 	mii.reg_num_mask = 0x1f;
2510 	return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
2511 
2512 }
2513 
2514 static const struct net_device_ops bcm_enetsw_ops = {
2515 	.ndo_open		= bcm_enetsw_open,
2516 	.ndo_stop		= bcm_enetsw_stop,
2517 	.ndo_start_xmit		= bcm_enet_start_xmit,
2518 	.ndo_change_mtu		= bcm_enet_change_mtu,
2519 	.ndo_do_ioctl		= bcm_enetsw_ioctl,
2520 };
2521 
2522 
2523 static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
2524 	{ "rx_packets", DEV_STAT(rx_packets), -1 },
2525 	{ "tx_packets",	DEV_STAT(tx_packets), -1 },
2526 	{ "rx_bytes", DEV_STAT(rx_bytes), -1 },
2527 	{ "tx_bytes", DEV_STAT(tx_bytes), -1 },
2528 	{ "rx_errors", DEV_STAT(rx_errors), -1 },
2529 	{ "tx_errors", DEV_STAT(tx_errors), -1 },
2530 	{ "rx_dropped",	DEV_STAT(rx_dropped), -1 },
2531 	{ "tx_dropped",	DEV_STAT(tx_dropped), -1 },
2532 
2533 	{ "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT },
2534 	{ "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST },
2535 	{ "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST },
2536 	{ "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT },
2537 	{ "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 },
2538 	{ "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 },
2539 	{ "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 },
2540 	{ "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 },
2541 	{ "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023},
2542 	{ "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max),
2543 	  ETHSW_MIB_RX_1024_1522 },
2544 	{ "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047),
2545 	  ETHSW_MIB_RX_1523_2047 },
2546 	{ "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095),
2547 	  ETHSW_MIB_RX_2048_4095 },
2548 	{ "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191),
2549 	  ETHSW_MIB_RX_4096_8191 },
2550 	{ "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728),
2551 	  ETHSW_MIB_RX_8192_9728 },
2552 	{ "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR },
2553 	{ "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC },
2554 	{ "tx_dropped",	GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP },
2555 	{ "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND },
2556 	{ "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE },
2557 
2558 	{ "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT },
2559 	{ "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST },
2560 	{ "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT },
2561 	{ "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT },
2562 	{ "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE },
2563 	{ "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS },
2564 
2565 };
2566 
2567 #define BCM_ENETSW_STATS_LEN	\
2568 	(sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats))
2569 
2570 static void bcm_enetsw_get_strings(struct net_device *netdev,
2571 				   u32 stringset, u8 *data)
2572 {
2573 	int i;
2574 
2575 	switch (stringset) {
2576 	case ETH_SS_STATS:
2577 		for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2578 			memcpy(data + i * ETH_GSTRING_LEN,
2579 			       bcm_enetsw_gstrings_stats[i].stat_string,
2580 			       ETH_GSTRING_LEN);
2581 		}
2582 		break;
2583 	}
2584 }
2585 
2586 static int bcm_enetsw_get_sset_count(struct net_device *netdev,
2587 				     int string_set)
2588 {
2589 	switch (string_set) {
2590 	case ETH_SS_STATS:
2591 		return BCM_ENETSW_STATS_LEN;
2592 	default:
2593 		return -EINVAL;
2594 	}
2595 }
2596 
2597 static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
2598 				   struct ethtool_drvinfo *drvinfo)
2599 {
2600 	strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
2601 	strncpy(drvinfo->version, bcm_enet_driver_version, 32);
2602 	strncpy(drvinfo->fw_version, "N/A", 32);
2603 	strncpy(drvinfo->bus_info, "bcm63xx", 32);
2604 	drvinfo->n_stats = BCM_ENETSW_STATS_LEN;
2605 }
2606 
2607 static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
2608 					 struct ethtool_stats *stats,
2609 					 u64 *data)
2610 {
2611 	struct bcm_enet_priv *priv;
2612 	int i;
2613 
2614 	priv = netdev_priv(netdev);
2615 
2616 	for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2617 		const struct bcm_enet_stats *s;
2618 		u32 lo, hi;
2619 		char *p;
2620 		int reg;
2621 
2622 		s = &bcm_enetsw_gstrings_stats[i];
2623 
2624 		reg = s->mib_reg;
2625 		if (reg == -1)
2626 			continue;
2627 
2628 		lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
2629 		p = (char *)priv + s->stat_offset;
2630 
2631 		if (s->sizeof_stat == sizeof(u64)) {
2632 			hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
2633 			*(u64 *)p = ((u64)hi << 32 | lo);
2634 		} else {
2635 			*(u32 *)p = lo;
2636 		}
2637 	}
2638 
2639 	for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2640 		const struct bcm_enet_stats *s;
2641 		char *p;
2642 
2643 		s = &bcm_enetsw_gstrings_stats[i];
2644 
2645 		if (s->mib_reg == -1)
2646 			p = (char *)&netdev->stats + s->stat_offset;
2647 		else
2648 			p = (char *)priv + s->stat_offset;
2649 
2650 		data[i] = (s->sizeof_stat == sizeof(u64)) ?
2651 			*(u64 *)p : *(u32 *)p;
2652 	}
2653 }
2654 
2655 static void bcm_enetsw_get_ringparam(struct net_device *dev,
2656 				     struct ethtool_ringparam *ering)
2657 {
2658 	struct bcm_enet_priv *priv;
2659 
2660 	priv = netdev_priv(dev);
2661 
2662 	/* rx/tx ring is actually only limited by memory */
2663 	ering->rx_max_pending = 8192;
2664 	ering->tx_max_pending = 8192;
2665 	ering->rx_mini_max_pending = 0;
2666 	ering->rx_jumbo_max_pending = 0;
2667 	ering->rx_pending = priv->rx_ring_size;
2668 	ering->tx_pending = priv->tx_ring_size;
2669 }
2670 
2671 static int bcm_enetsw_set_ringparam(struct net_device *dev,
2672 				    struct ethtool_ringparam *ering)
2673 {
2674 	struct bcm_enet_priv *priv;
2675 	int was_running;
2676 
2677 	priv = netdev_priv(dev);
2678 
2679 	was_running = 0;
2680 	if (netif_running(dev)) {
2681 		bcm_enetsw_stop(dev);
2682 		was_running = 1;
2683 	}
2684 
2685 	priv->rx_ring_size = ering->rx_pending;
2686 	priv->tx_ring_size = ering->tx_pending;
2687 
2688 	if (was_running) {
2689 		int err;
2690 
2691 		err = bcm_enetsw_open(dev);
2692 		if (err)
2693 			dev_close(dev);
2694 	}
2695 	return 0;
2696 }
2697 
2698 static struct ethtool_ops bcm_enetsw_ethtool_ops = {
2699 	.get_strings		= bcm_enetsw_get_strings,
2700 	.get_sset_count		= bcm_enetsw_get_sset_count,
2701 	.get_ethtool_stats      = bcm_enetsw_get_ethtool_stats,
2702 	.get_drvinfo		= bcm_enetsw_get_drvinfo,
2703 	.get_ringparam		= bcm_enetsw_get_ringparam,
2704 	.set_ringparam		= bcm_enetsw_set_ringparam,
2705 };
2706 
2707 /* allocate netdevice, request register memory and register device. */
2708 static int bcm_enetsw_probe(struct platform_device *pdev)
2709 {
2710 	struct bcm_enet_priv *priv;
2711 	struct net_device *dev;
2712 	struct bcm63xx_enetsw_platform_data *pd;
2713 	struct resource *res_mem;
2714 	int ret, irq_rx, irq_tx;
2715 
2716 	/* stop if shared driver failed, assume driver->probe will be
2717 	 * called in the same order we register devices (correct ?)
2718 	 */
2719 	if (!bcm_enet_shared_base[0])
2720 		return -ENODEV;
2721 
2722 	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2723 	irq_rx = platform_get_irq(pdev, 0);
2724 	irq_tx = platform_get_irq(pdev, 1);
2725 	if (!res_mem || irq_rx < 0)
2726 		return -ENODEV;
2727 
2728 	ret = 0;
2729 	dev = alloc_etherdev(sizeof(*priv));
2730 	if (!dev)
2731 		return -ENOMEM;
2732 	priv = netdev_priv(dev);
2733 	memset(priv, 0, sizeof(*priv));
2734 
2735 	/* initialize default and fetch platform data */
2736 	priv->enet_is_sw = true;
2737 	priv->irq_rx = irq_rx;
2738 	priv->irq_tx = irq_tx;
2739 	priv->rx_ring_size = BCMENET_DEF_RX_DESC;
2740 	priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2741 	priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2742 
2743 	pd = dev_get_platdata(&pdev->dev);
2744 	if (pd) {
2745 		memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
2746 		memcpy(priv->used_ports, pd->used_ports,
2747 		       sizeof(pd->used_ports));
2748 		priv->num_ports = pd->num_ports;
2749 		priv->dma_has_sram = pd->dma_has_sram;
2750 		priv->dma_chan_en_mask = pd->dma_chan_en_mask;
2751 		priv->dma_chan_int_mask = pd->dma_chan_int_mask;
2752 		priv->dma_chan_width = pd->dma_chan_width;
2753 	}
2754 
2755 	ret = compute_hw_mtu(priv, dev->mtu);
2756 	if (ret)
2757 		goto out;
2758 
2759 	if (!request_mem_region(res_mem->start, resource_size(res_mem),
2760 				"bcm63xx_enetsw")) {
2761 		ret = -EBUSY;
2762 		goto out;
2763 	}
2764 
2765 	priv->base = ioremap(res_mem->start, resource_size(res_mem));
2766 	if (priv->base == NULL) {
2767 		ret = -ENOMEM;
2768 		goto out_release_mem;
2769 	}
2770 
2771 	priv->mac_clk = clk_get(&pdev->dev, "enetsw");
2772 	if (IS_ERR(priv->mac_clk)) {
2773 		ret = PTR_ERR(priv->mac_clk);
2774 		goto out_unmap;
2775 	}
2776 	clk_enable(priv->mac_clk);
2777 
2778 	priv->rx_chan = 0;
2779 	priv->tx_chan = 1;
2780 	spin_lock_init(&priv->rx_lock);
2781 
2782 	/* init rx timeout (used for oom) */
2783 	init_timer(&priv->rx_timeout);
2784 	priv->rx_timeout.function = bcm_enet_refill_rx_timer;
2785 	priv->rx_timeout.data = (unsigned long)dev;
2786 
2787 	/* register netdevice */
2788 	dev->netdev_ops = &bcm_enetsw_ops;
2789 	netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
2790 	SET_ETHTOOL_OPS(dev, &bcm_enetsw_ethtool_ops);
2791 	SET_NETDEV_DEV(dev, &pdev->dev);
2792 
2793 	spin_lock_init(&priv->enetsw_mdio_lock);
2794 
2795 	ret = register_netdev(dev);
2796 	if (ret)
2797 		goto out_put_clk;
2798 
2799 	netif_carrier_off(dev);
2800 	platform_set_drvdata(pdev, dev);
2801 	priv->pdev = pdev;
2802 	priv->net_dev = dev;
2803 
2804 	return 0;
2805 
2806 out_put_clk:
2807 	clk_put(priv->mac_clk);
2808 
2809 out_unmap:
2810 	iounmap(priv->base);
2811 
2812 out_release_mem:
2813 	release_mem_region(res_mem->start, resource_size(res_mem));
2814 out:
2815 	free_netdev(dev);
2816 	return ret;
2817 }
2818 
2819 
2820 /* exit func, stops hardware and unregisters netdevice */
2821 static int bcm_enetsw_remove(struct platform_device *pdev)
2822 {
2823 	struct bcm_enet_priv *priv;
2824 	struct net_device *dev;
2825 	struct resource *res;
2826 
2827 	/* stop netdevice */
2828 	dev = platform_get_drvdata(pdev);
2829 	priv = netdev_priv(dev);
2830 	unregister_netdev(dev);
2831 
2832 	/* release device resources */
2833 	iounmap(priv->base);
2834 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2835 	release_mem_region(res->start, resource_size(res));
2836 
2837 	free_netdev(dev);
2838 	return 0;
2839 }
2840 
2841 struct platform_driver bcm63xx_enetsw_driver = {
2842 	.probe	= bcm_enetsw_probe,
2843 	.remove	= bcm_enetsw_remove,
2844 	.driver	= {
2845 		.name	= "bcm63xx_enetsw",
2846 		.owner  = THIS_MODULE,
2847 	},
2848 };
2849 
2850 /* reserve & remap memory space shared between all macs */
2851 static int bcm_enet_shared_probe(struct platform_device *pdev)
2852 {
2853 	struct resource *res;
2854 	void __iomem *p[3];
2855 	unsigned int i;
2856 
2857 	memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
2858 
2859 	for (i = 0; i < 3; i++) {
2860 		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
2861 		p[i] = devm_ioremap_resource(&pdev->dev, res);
2862 		if (IS_ERR(p[i]))
2863 			return PTR_ERR(p[i]);
2864 	}
2865 
2866 	memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
2867 
2868 	return 0;
2869 }
2870 
2871 static int bcm_enet_shared_remove(struct platform_device *pdev)
2872 {
2873 	return 0;
2874 }
2875 
2876 /* this "shared" driver is needed because both macs share a single
2877  * address space
2878  */
2879 struct platform_driver bcm63xx_enet_shared_driver = {
2880 	.probe	= bcm_enet_shared_probe,
2881 	.remove	= bcm_enet_shared_remove,
2882 	.driver	= {
2883 		.name	= "bcm63xx_enet_shared",
2884 		.owner  = THIS_MODULE,
2885 	},
2886 };
2887 
2888 /* entry point */
2889 static int __init bcm_enet_init(void)
2890 {
2891 	int ret;
2892 
2893 	ret = platform_driver_register(&bcm63xx_enet_shared_driver);
2894 	if (ret)
2895 		return ret;
2896 
2897 	ret = platform_driver_register(&bcm63xx_enet_driver);
2898 	if (ret)
2899 		platform_driver_unregister(&bcm63xx_enet_shared_driver);
2900 
2901 	ret = platform_driver_register(&bcm63xx_enetsw_driver);
2902 	if (ret) {
2903 		platform_driver_unregister(&bcm63xx_enet_driver);
2904 		platform_driver_unregister(&bcm63xx_enet_shared_driver);
2905 	}
2906 
2907 	return ret;
2908 }
2909 
2910 static void __exit bcm_enet_exit(void)
2911 {
2912 	platform_driver_unregister(&bcm63xx_enet_driver);
2913 	platform_driver_unregister(&bcm63xx_enetsw_driver);
2914 	platform_driver_unregister(&bcm63xx_enet_shared_driver);
2915 }
2916 
2917 
2918 module_init(bcm_enet_init);
2919 module_exit(bcm_enet_exit);
2920 
2921 MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
2922 MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
2923 MODULE_LICENSE("GPL");
2924