xref: /openbmc/linux/drivers/net/ethernet/dnet.c (revision 36bccb11)
1 /*
2  * Dave DNET Ethernet Controller driver
3  *
4  * Copyright (C) 2008 Dave S.r.l. <www.dave.eu>
5  * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, <yanok@emcraft.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/platform_device.h>
23 #include <linux/phy.h>
24 
25 #include "dnet.h"
26 
27 #undef DEBUG
28 
29 /* function for reading internal MAC register */
30 static u16 dnet_readw_mac(struct dnet *bp, u16 reg)
31 {
32 	u16 data_read;
33 
34 	/* issue a read */
35 	dnet_writel(bp, reg, MACREG_ADDR);
36 
37 	/* since a read/write op to the MAC is very slow,
38 	 * we must wait before reading the data */
39 	ndelay(500);
40 
41 	/* read data read from the MAC register */
42 	data_read = dnet_readl(bp, MACREG_DATA);
43 
44 	/* all done */
45 	return data_read;
46 }
47 
48 /* function for writing internal MAC register */
49 static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val)
50 {
51 	/* load data to write */
52 	dnet_writel(bp, val, MACREG_DATA);
53 
54 	/* issue a write */
55 	dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR);
56 
57 	/* since a read/write op to the MAC is very slow,
58 	 * we must wait before exiting */
59 	ndelay(500);
60 }
61 
62 static void __dnet_set_hwaddr(struct dnet *bp)
63 {
64 	u16 tmp;
65 
66 	tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr);
67 	dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
68 	tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2));
69 	dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
70 	tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4));
71 	dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
72 }
73 
74 static void dnet_get_hwaddr(struct dnet *bp)
75 {
76 	u16 tmp;
77 	u8 addr[6];
78 
79 	/*
80 	 * from MAC docs:
81 	 * "Note that the MAC address is stored in the registers in Hexadecimal
82 	 * form. For example, to set the MAC Address to: AC-DE-48-00-00-80
83 	 * would require writing 0xAC (octet 0) to address 0x0B (high byte of
84 	 * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of
85 	 * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of
86 	 * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of
87 	 * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of
88 	 * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of
89 	 * Mac_addr[15:0]).
90 	 */
91 	tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG);
92 	*((__be16 *)addr) = cpu_to_be16(tmp);
93 	tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG);
94 	*((__be16 *)(addr + 2)) = cpu_to_be16(tmp);
95 	tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG);
96 	*((__be16 *)(addr + 4)) = cpu_to_be16(tmp);
97 
98 	if (is_valid_ether_addr(addr))
99 		memcpy(bp->dev->dev_addr, addr, sizeof(addr));
100 }
101 
102 static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
103 {
104 	struct dnet *bp = bus->priv;
105 	u16 value;
106 
107 	while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
108 				& DNET_INTERNAL_GMII_MNG_CMD_FIN))
109 		cpu_relax();
110 
111 	/* only 5 bits allowed for phy-addr and reg_offset */
112 	mii_id &= 0x1f;
113 	regnum &= 0x1f;
114 
115 	/* prepare reg_value for a read */
116 	value = (mii_id << 8);
117 	value |= regnum;
118 
119 	/* write control word */
120 	dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value);
121 
122 	/* wait for end of transfer */
123 	while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
124 				& DNET_INTERNAL_GMII_MNG_CMD_FIN))
125 		cpu_relax();
126 
127 	value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG);
128 
129 	pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value);
130 
131 	return value;
132 }
133 
134 static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
135 			   u16 value)
136 {
137 	struct dnet *bp = bus->priv;
138 	u16 tmp;
139 
140 	pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value);
141 
142 	while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
143 				& DNET_INTERNAL_GMII_MNG_CMD_FIN))
144 		cpu_relax();
145 
146 	/* prepare for a write operation */
147 	tmp = (1 << 13);
148 
149 	/* only 5 bits allowed for phy-addr and reg_offset */
150 	mii_id &= 0x1f;
151 	regnum &= 0x1f;
152 
153 	/* only 16 bits on data */
154 	value &= 0xffff;
155 
156 	/* prepare reg_value for a write */
157 	tmp |= (mii_id << 8);
158 	tmp |= regnum;
159 
160 	/* write data to write first */
161 	dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value);
162 
163 	/* write control word */
164 	dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp);
165 
166 	while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
167 				& DNET_INTERNAL_GMII_MNG_CMD_FIN))
168 		cpu_relax();
169 
170 	return 0;
171 }
172 
173 static void dnet_handle_link_change(struct net_device *dev)
174 {
175 	struct dnet *bp = netdev_priv(dev);
176 	struct phy_device *phydev = bp->phy_dev;
177 	unsigned long flags;
178 	u32 mode_reg, ctl_reg;
179 
180 	int status_change = 0;
181 
182 	spin_lock_irqsave(&bp->lock, flags);
183 
184 	mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG);
185 	ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
186 
187 	if (phydev->link) {
188 		if (bp->duplex != phydev->duplex) {
189 			if (phydev->duplex)
190 				ctl_reg &=
191 				    ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP);
192 			else
193 				ctl_reg |=
194 				    DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP;
195 
196 			bp->duplex = phydev->duplex;
197 			status_change = 1;
198 		}
199 
200 		if (bp->speed != phydev->speed) {
201 			status_change = 1;
202 			switch (phydev->speed) {
203 			case 1000:
204 				mode_reg |= DNET_INTERNAL_MODE_GBITEN;
205 				break;
206 			case 100:
207 			case 10:
208 				mode_reg &= ~DNET_INTERNAL_MODE_GBITEN;
209 				break;
210 			default:
211 				printk(KERN_WARNING
212 				       "%s: Ack!  Speed (%d) is not "
213 				       "10/100/1000!\n", dev->name,
214 				       phydev->speed);
215 				break;
216 			}
217 			bp->speed = phydev->speed;
218 		}
219 	}
220 
221 	if (phydev->link != bp->link) {
222 		if (phydev->link) {
223 			mode_reg |=
224 			    (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN);
225 		} else {
226 			mode_reg &=
227 			    ~(DNET_INTERNAL_MODE_RXEN |
228 			      DNET_INTERNAL_MODE_TXEN);
229 			bp->speed = 0;
230 			bp->duplex = -1;
231 		}
232 		bp->link = phydev->link;
233 
234 		status_change = 1;
235 	}
236 
237 	if (status_change) {
238 		dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg);
239 		dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg);
240 	}
241 
242 	spin_unlock_irqrestore(&bp->lock, flags);
243 
244 	if (status_change) {
245 		if (phydev->link)
246 			printk(KERN_INFO "%s: link up (%d/%s)\n",
247 			       dev->name, phydev->speed,
248 			       DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
249 		else
250 			printk(KERN_INFO "%s: link down\n", dev->name);
251 	}
252 }
253 
254 static int dnet_mii_probe(struct net_device *dev)
255 {
256 	struct dnet *bp = netdev_priv(dev);
257 	struct phy_device *phydev = NULL;
258 	int phy_addr;
259 
260 	/* find the first phy */
261 	for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
262 		if (bp->mii_bus->phy_map[phy_addr]) {
263 			phydev = bp->mii_bus->phy_map[phy_addr];
264 			break;
265 		}
266 	}
267 
268 	if (!phydev) {
269 		printk(KERN_ERR "%s: no PHY found\n", dev->name);
270 		return -ENODEV;
271 	}
272 
273 	/* TODO : add pin_irq */
274 
275 	/* attach the mac to the phy */
276 	if (bp->capabilities & DNET_HAS_RMII) {
277 		phydev = phy_connect(dev, dev_name(&phydev->dev),
278 				     &dnet_handle_link_change,
279 				     PHY_INTERFACE_MODE_RMII);
280 	} else {
281 		phydev = phy_connect(dev, dev_name(&phydev->dev),
282 				     &dnet_handle_link_change,
283 				     PHY_INTERFACE_MODE_MII);
284 	}
285 
286 	if (IS_ERR(phydev)) {
287 		printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
288 		return PTR_ERR(phydev);
289 	}
290 
291 	/* mask with MAC supported features */
292 	if (bp->capabilities & DNET_HAS_GIGABIT)
293 		phydev->supported &= PHY_GBIT_FEATURES;
294 	else
295 		phydev->supported &= PHY_BASIC_FEATURES;
296 
297 	phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
298 
299 	phydev->advertising = phydev->supported;
300 
301 	bp->link = 0;
302 	bp->speed = 0;
303 	bp->duplex = -1;
304 	bp->phy_dev = phydev;
305 
306 	return 0;
307 }
308 
309 static int dnet_mii_init(struct dnet *bp)
310 {
311 	int err, i;
312 
313 	bp->mii_bus = mdiobus_alloc();
314 	if (bp->mii_bus == NULL)
315 		return -ENOMEM;
316 
317 	bp->mii_bus->name = "dnet_mii_bus";
318 	bp->mii_bus->read = &dnet_mdio_read;
319 	bp->mii_bus->write = &dnet_mdio_write;
320 
321 	snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
322 		bp->pdev->name, bp->pdev->id);
323 
324 	bp->mii_bus->priv = bp;
325 
326 	bp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
327 	if (!bp->mii_bus->irq) {
328 		err = -ENOMEM;
329 		goto err_out;
330 	}
331 
332 	for (i = 0; i < PHY_MAX_ADDR; i++)
333 		bp->mii_bus->irq[i] = PHY_POLL;
334 
335 	if (mdiobus_register(bp->mii_bus)) {
336 		err = -ENXIO;
337 		goto err_out_free_mdio_irq;
338 	}
339 
340 	if (dnet_mii_probe(bp->dev) != 0) {
341 		err = -ENXIO;
342 		goto err_out_unregister_bus;
343 	}
344 
345 	return 0;
346 
347 err_out_unregister_bus:
348 	mdiobus_unregister(bp->mii_bus);
349 err_out_free_mdio_irq:
350 	kfree(bp->mii_bus->irq);
351 err_out:
352 	mdiobus_free(bp->mii_bus);
353 	return err;
354 }
355 
356 /* For Neptune board: LINK1000 as Link LED and TX as activity LED */
357 static int dnet_phy_marvell_fixup(struct phy_device *phydev)
358 {
359 	return phy_write(phydev, 0x18, 0x4148);
360 }
361 
362 static void dnet_update_stats(struct dnet *bp)
363 {
364 	u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT;
365 	u32 *p = &bp->hw_stats.rx_pkt_ignr;
366 	u32 *end = &bp->hw_stats.rx_byte + 1;
367 
368 	WARN_ON((unsigned long)(end - p - 1) !=
369 		(DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4);
370 
371 	for (; p < end; p++, reg++)
372 		*p += readl(reg);
373 
374 	reg = bp->regs + DNET_TX_UNICAST_CNT;
375 	p = &bp->hw_stats.tx_unicast;
376 	end = &bp->hw_stats.tx_byte + 1;
377 
378 	WARN_ON((unsigned long)(end - p - 1) !=
379 		(DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4);
380 
381 	for (; p < end; p++, reg++)
382 		*p += readl(reg);
383 }
384 
385 static int dnet_poll(struct napi_struct *napi, int budget)
386 {
387 	struct dnet *bp = container_of(napi, struct dnet, napi);
388 	struct net_device *dev = bp->dev;
389 	int npackets = 0;
390 	unsigned int pkt_len;
391 	struct sk_buff *skb;
392 	unsigned int *data_ptr;
393 	u32 int_enable;
394 	u32 cmd_word;
395 	int i;
396 
397 	while (npackets < budget) {
398 		/*
399 		 * break out of while loop if there are no more
400 		 * packets waiting
401 		 */
402 		if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) {
403 			napi_complete(napi);
404 			int_enable = dnet_readl(bp, INTR_ENB);
405 			int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
406 			dnet_writel(bp, int_enable, INTR_ENB);
407 			return 0;
408 		}
409 
410 		cmd_word = dnet_readl(bp, RX_LEN_FIFO);
411 		pkt_len = cmd_word & 0xFFFF;
412 
413 		if (cmd_word & 0xDF180000)
414 			printk(KERN_ERR "%s packet receive error %x\n",
415 			       __func__, cmd_word);
416 
417 		skb = netdev_alloc_skb(dev, pkt_len + 5);
418 		if (skb != NULL) {
419 			/* Align IP on 16 byte boundaries */
420 			skb_reserve(skb, 2);
421 			/*
422 			 * 'skb_put()' points to the start of sk_buff
423 			 * data area.
424 			 */
425 			data_ptr = (unsigned int *)skb_put(skb, pkt_len);
426 			for (i = 0; i < (pkt_len + 3) >> 2; i++)
427 				*data_ptr++ = dnet_readl(bp, RX_DATA_FIFO);
428 			skb->protocol = eth_type_trans(skb, dev);
429 			netif_receive_skb(skb);
430 			npackets++;
431 		} else
432 			printk(KERN_NOTICE
433 			       "%s: No memory to allocate a sk_buff of "
434 			       "size %u.\n", dev->name, pkt_len);
435 	}
436 
437 	budget -= npackets;
438 
439 	if (npackets < budget) {
440 		/* We processed all packets available.  Tell NAPI it can
441 		 * stop polling then re-enable rx interrupts */
442 		napi_complete(napi);
443 		int_enable = dnet_readl(bp, INTR_ENB);
444 		int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
445 		dnet_writel(bp, int_enable, INTR_ENB);
446 		return 0;
447 	}
448 
449 	/* There are still packets waiting */
450 	return 1;
451 }
452 
453 static irqreturn_t dnet_interrupt(int irq, void *dev_id)
454 {
455 	struct net_device *dev = dev_id;
456 	struct dnet *bp = netdev_priv(dev);
457 	u32 int_src, int_enable, int_current;
458 	unsigned long flags;
459 	unsigned int handled = 0;
460 
461 	spin_lock_irqsave(&bp->lock, flags);
462 
463 	/* read and clear the DNET irq (clear on read) */
464 	int_src = dnet_readl(bp, INTR_SRC);
465 	int_enable = dnet_readl(bp, INTR_ENB);
466 	int_current = int_src & int_enable;
467 
468 	/* restart the queue if we had stopped it for TX fifo almost full */
469 	if (int_current & DNET_INTR_SRC_TX_FIFOAE) {
470 		int_enable = dnet_readl(bp, INTR_ENB);
471 		int_enable &= ~DNET_INTR_ENB_TX_FIFOAE;
472 		dnet_writel(bp, int_enable, INTR_ENB);
473 		netif_wake_queue(dev);
474 		handled = 1;
475 	}
476 
477 	/* RX FIFO error checking */
478 	if (int_current &
479 	    (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) {
480 		printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__,
481 		       dnet_readl(bp, RX_STATUS), int_current);
482 		/* we can only flush the RX FIFOs */
483 		dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL);
484 		ndelay(500);
485 		dnet_writel(bp, 0, SYS_CTL);
486 		handled = 1;
487 	}
488 
489 	/* TX FIFO error checking */
490 	if (int_current &
491 	    (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) {
492 		printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__,
493 		       dnet_readl(bp, TX_STATUS), int_current);
494 		/* we can only flush the TX FIFOs */
495 		dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL);
496 		ndelay(500);
497 		dnet_writel(bp, 0, SYS_CTL);
498 		handled = 1;
499 	}
500 
501 	if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) {
502 		if (napi_schedule_prep(&bp->napi)) {
503 			/*
504 			 * There's no point taking any more interrupts
505 			 * until we have processed the buffers
506 			 */
507 			/* Disable Rx interrupts and schedule NAPI poll */
508 			int_enable = dnet_readl(bp, INTR_ENB);
509 			int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF;
510 			dnet_writel(bp, int_enable, INTR_ENB);
511 			__napi_schedule(&bp->napi);
512 		}
513 		handled = 1;
514 	}
515 
516 	if (!handled)
517 		pr_debug("%s: irq %x remains\n", __func__, int_current);
518 
519 	spin_unlock_irqrestore(&bp->lock, flags);
520 
521 	return IRQ_RETVAL(handled);
522 }
523 
524 #ifdef DEBUG
525 static inline void dnet_print_skb(struct sk_buff *skb)
526 {
527 	int k;
528 	printk(KERN_DEBUG PFX "data:");
529 	for (k = 0; k < skb->len; k++)
530 		printk(" %02x", (unsigned int)skb->data[k]);
531 	printk("\n");
532 }
533 #else
534 #define dnet_print_skb(skb)	do {} while (0)
535 #endif
536 
537 static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
538 {
539 
540 	struct dnet *bp = netdev_priv(dev);
541 	u32 tx_status, irq_enable;
542 	unsigned int len, i, tx_cmd, wrsz;
543 	unsigned long flags;
544 	unsigned int *bufp;
545 
546 	tx_status = dnet_readl(bp, TX_STATUS);
547 
548 	pr_debug("start_xmit: len %u head %p data %p\n",
549 	       skb->len, skb->head, skb->data);
550 	dnet_print_skb(skb);
551 
552 	/* frame size (words) */
553 	len = (skb->len + 3) >> 2;
554 
555 	spin_lock_irqsave(&bp->lock, flags);
556 
557 	tx_status = dnet_readl(bp, TX_STATUS);
558 
559 	bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL);
560 	wrsz = (u32) skb->len + 3;
561 	wrsz += ((unsigned long) skb->data) & 0x3;
562 	wrsz >>= 2;
563 	tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len;
564 
565 	/* check if there is enough room for the current frame */
566 	if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) {
567 		for (i = 0; i < wrsz; i++)
568 			dnet_writel(bp, *bufp++, TX_DATA_FIFO);
569 
570 		/*
571 		 * inform MAC that a packet's written and ready to be
572 		 * shipped out
573 		 */
574 		dnet_writel(bp, tx_cmd, TX_LEN_FIFO);
575 	}
576 
577 	if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) {
578 		netif_stop_queue(dev);
579 		tx_status = dnet_readl(bp, INTR_SRC);
580 		irq_enable = dnet_readl(bp, INTR_ENB);
581 		irq_enable |= DNET_INTR_ENB_TX_FIFOAE;
582 		dnet_writel(bp, irq_enable, INTR_ENB);
583 	}
584 
585 	skb_tx_timestamp(skb);
586 
587 	/* free the buffer */
588 	dev_kfree_skb(skb);
589 
590 	spin_unlock_irqrestore(&bp->lock, flags);
591 
592 	return NETDEV_TX_OK;
593 }
594 
595 static void dnet_reset_hw(struct dnet *bp)
596 {
597 	/* put ts_mac in IDLE state i.e. disable rx/tx */
598 	dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN);
599 
600 	/*
601 	 * RX FIFO almost full threshold: only cmd FIFO almost full is
602 	 * implemented for RX side
603 	 */
604 	dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH);
605 	/*
606 	 * TX FIFO almost empty threshold: only data FIFO almost empty
607 	 * is implemented for TX side
608 	 */
609 	dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH);
610 
611 	/* flush rx/tx fifos */
612 	dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH,
613 			SYS_CTL);
614 	msleep(1);
615 	dnet_writel(bp, 0, SYS_CTL);
616 }
617 
618 static void dnet_init_hw(struct dnet *bp)
619 {
620 	u32 config;
621 
622 	dnet_reset_hw(bp);
623 	__dnet_set_hwaddr(bp);
624 
625 	config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
626 
627 	if (bp->dev->flags & IFF_PROMISC)
628 		/* Copy All Frames */
629 		config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC;
630 	if (!(bp->dev->flags & IFF_BROADCAST))
631 		/* No BroadCast */
632 		config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST;
633 
634 	config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE |
635 	    DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST |
636 	    DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL |
637 	    DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS;
638 
639 	dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config);
640 
641 	/* clear irq before enabling them */
642 	config = dnet_readl(bp, INTR_SRC);
643 
644 	/* enable RX/TX interrupt, recv packet ready interrupt */
645 	dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY |
646 			DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR |
647 			DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL |
648 			DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM |
649 			DNET_INTR_ENB_RX_PKTRDY, INTR_ENB);
650 }
651 
652 static int dnet_open(struct net_device *dev)
653 {
654 	struct dnet *bp = netdev_priv(dev);
655 
656 	/* if the phy is not yet register, retry later */
657 	if (!bp->phy_dev)
658 		return -EAGAIN;
659 
660 	napi_enable(&bp->napi);
661 	dnet_init_hw(bp);
662 
663 	phy_start_aneg(bp->phy_dev);
664 
665 	/* schedule a link state check */
666 	phy_start(bp->phy_dev);
667 
668 	netif_start_queue(dev);
669 
670 	return 0;
671 }
672 
673 static int dnet_close(struct net_device *dev)
674 {
675 	struct dnet *bp = netdev_priv(dev);
676 
677 	netif_stop_queue(dev);
678 	napi_disable(&bp->napi);
679 
680 	if (bp->phy_dev)
681 		phy_stop(bp->phy_dev);
682 
683 	dnet_reset_hw(bp);
684 	netif_carrier_off(dev);
685 
686 	return 0;
687 }
688 
689 static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat)
690 {
691 	pr_debug("%s\n", __func__);
692 	pr_debug("----------------------------- RX statistics "
693 		 "-------------------------------\n");
694 	pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr);
695 	pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err);
696 	pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm);
697 	pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm);
698 	pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol);
699 	pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err);
700 	pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt);
701 	pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm);
702 	pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm);
703 	pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast);
704 	pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast);
705 	pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag);
706 	pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink);
707 	pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib);
708 	pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd);
709 	pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte);
710 	pr_debug("----------------------------- TX statistics "
711 		 "-------------------------------\n");
712 	pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast);
713 	pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm);
714 	pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast);
715 	pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast);
716 	pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag);
717 	pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs);
718 	pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo);
719 	pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte);
720 }
721 
722 static struct net_device_stats *dnet_get_stats(struct net_device *dev)
723 {
724 
725 	struct dnet *bp = netdev_priv(dev);
726 	struct net_device_stats *nstat = &dev->stats;
727 	struct dnet_stats *hwstat = &bp->hw_stats;
728 
729 	/* read stats from hardware */
730 	dnet_update_stats(bp);
731 
732 	/* Convert HW stats into netdevice stats */
733 	nstat->rx_errors = (hwstat->rx_len_chk_err +
734 			    hwstat->rx_lng_frm + hwstat->rx_shrt_frm +
735 			    /* ignore IGP violation error
736 			    hwstat->rx_ipg_viol + */
737 			    hwstat->rx_crc_err +
738 			    hwstat->rx_pre_shrink +
739 			    hwstat->rx_drib_nib + hwstat->rx_unsup_opcd);
740 	nstat->tx_errors = hwstat->tx_bad_fcs;
741 	nstat->rx_length_errors = (hwstat->rx_len_chk_err +
742 				   hwstat->rx_lng_frm +
743 				   hwstat->rx_shrt_frm + hwstat->rx_pre_shrink);
744 	nstat->rx_crc_errors = hwstat->rx_crc_err;
745 	nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib;
746 	nstat->rx_packets = hwstat->rx_ok_pkt;
747 	nstat->tx_packets = (hwstat->tx_unicast +
748 			     hwstat->tx_multicast + hwstat->tx_brdcast);
749 	nstat->rx_bytes = hwstat->rx_byte;
750 	nstat->tx_bytes = hwstat->tx_byte;
751 	nstat->multicast = hwstat->rx_multicast;
752 	nstat->rx_missed_errors = hwstat->rx_pkt_ignr;
753 
754 	dnet_print_pretty_hwstats(hwstat);
755 
756 	return nstat;
757 }
758 
759 static int dnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
760 {
761 	struct dnet *bp = netdev_priv(dev);
762 	struct phy_device *phydev = bp->phy_dev;
763 
764 	if (!phydev)
765 		return -ENODEV;
766 
767 	return phy_ethtool_gset(phydev, cmd);
768 }
769 
770 static int dnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
771 {
772 	struct dnet *bp = netdev_priv(dev);
773 	struct phy_device *phydev = bp->phy_dev;
774 
775 	if (!phydev)
776 		return -ENODEV;
777 
778 	return phy_ethtool_sset(phydev, cmd);
779 }
780 
781 static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
782 {
783 	struct dnet *bp = netdev_priv(dev);
784 	struct phy_device *phydev = bp->phy_dev;
785 
786 	if (!netif_running(dev))
787 		return -EINVAL;
788 
789 	if (!phydev)
790 		return -ENODEV;
791 
792 	return phy_mii_ioctl(phydev, rq, cmd);
793 }
794 
795 static void dnet_get_drvinfo(struct net_device *dev,
796 			     struct ethtool_drvinfo *info)
797 {
798 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
799 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
800 	strlcpy(info->bus_info, "0", sizeof(info->bus_info));
801 }
802 
803 static const struct ethtool_ops dnet_ethtool_ops = {
804 	.get_settings		= dnet_get_settings,
805 	.set_settings		= dnet_set_settings,
806 	.get_drvinfo		= dnet_get_drvinfo,
807 	.get_link		= ethtool_op_get_link,
808 	.get_ts_info		= ethtool_op_get_ts_info,
809 };
810 
811 static const struct net_device_ops dnet_netdev_ops = {
812 	.ndo_open		= dnet_open,
813 	.ndo_stop		= dnet_close,
814 	.ndo_get_stats		= dnet_get_stats,
815 	.ndo_start_xmit		= dnet_start_xmit,
816 	.ndo_do_ioctl		= dnet_ioctl,
817 	.ndo_set_mac_address	= eth_mac_addr,
818 	.ndo_validate_addr	= eth_validate_addr,
819 	.ndo_change_mtu		= eth_change_mtu,
820 };
821 
822 static int dnet_probe(struct platform_device *pdev)
823 {
824 	struct resource *res;
825 	struct net_device *dev;
826 	struct dnet *bp;
827 	struct phy_device *phydev;
828 	int err = -ENXIO;
829 	unsigned int mem_base, mem_size, irq;
830 
831 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
832 	if (!res) {
833 		dev_err(&pdev->dev, "no mmio resource defined\n");
834 		goto err_out;
835 	}
836 	mem_base = res->start;
837 	mem_size = resource_size(res);
838 	irq = platform_get_irq(pdev, 0);
839 
840 	if (!request_mem_region(mem_base, mem_size, DRV_NAME)) {
841 		dev_err(&pdev->dev, "no memory region available\n");
842 		err = -EBUSY;
843 		goto err_out;
844 	}
845 
846 	err = -ENOMEM;
847 	dev = alloc_etherdev(sizeof(*bp));
848 	if (!dev)
849 		goto err_out_release_mem;
850 
851 	/* TODO: Actually, we have some interesting features... */
852 	dev->features |= 0;
853 
854 	bp = netdev_priv(dev);
855 	bp->dev = dev;
856 
857 	platform_set_drvdata(pdev, dev);
858 	SET_NETDEV_DEV(dev, &pdev->dev);
859 
860 	spin_lock_init(&bp->lock);
861 
862 	bp->regs = ioremap(mem_base, mem_size);
863 	if (!bp->regs) {
864 		dev_err(&pdev->dev, "failed to map registers, aborting.\n");
865 		err = -ENOMEM;
866 		goto err_out_free_dev;
867 	}
868 
869 	dev->irq = irq;
870 	err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev);
871 	if (err) {
872 		dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
873 		       irq, err);
874 		goto err_out_iounmap;
875 	}
876 
877 	dev->netdev_ops = &dnet_netdev_ops;
878 	netif_napi_add(dev, &bp->napi, dnet_poll, 64);
879 	dev->ethtool_ops = &dnet_ethtool_ops;
880 
881 	dev->base_addr = (unsigned long)bp->regs;
882 
883 	bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK;
884 
885 	dnet_get_hwaddr(bp);
886 
887 	if (!is_valid_ether_addr(dev->dev_addr)) {
888 		/* choose a random ethernet address */
889 		eth_hw_addr_random(dev);
890 		__dnet_set_hwaddr(bp);
891 	}
892 
893 	err = register_netdev(dev);
894 	if (err) {
895 		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
896 		goto err_out_free_irq;
897 	}
898 
899 	/* register the PHY board fixup (for Marvell 88E1111) */
900 	err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0,
901 					 dnet_phy_marvell_fixup);
902 	/* we can live without it, so just issue a warning */
903 	if (err)
904 		dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n");
905 
906 	err = dnet_mii_init(bp);
907 	if (err)
908 		goto err_out_unregister_netdev;
909 
910 	dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
911 	       bp->regs, mem_base, dev->irq, dev->dev_addr);
912 	dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
913 	       (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
914 	       (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
915 	       (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
916 	       (bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
917 	phydev = bp->phy_dev;
918 	dev_info(&pdev->dev, "attached PHY driver [%s] "
919 	       "(mii_bus:phy_addr=%s, irq=%d)\n",
920 	       phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
921 
922 	return 0;
923 
924 err_out_unregister_netdev:
925 	unregister_netdev(dev);
926 err_out_free_irq:
927 	free_irq(dev->irq, dev);
928 err_out_iounmap:
929 	iounmap(bp->regs);
930 err_out_free_dev:
931 	free_netdev(dev);
932 err_out_release_mem:
933 	release_mem_region(mem_base, mem_size);
934 err_out:
935 	return err;
936 }
937 
938 static int dnet_remove(struct platform_device *pdev)
939 {
940 
941 	struct net_device *dev;
942 	struct dnet *bp;
943 
944 	dev = platform_get_drvdata(pdev);
945 
946 	if (dev) {
947 		bp = netdev_priv(dev);
948 		if (bp->phy_dev)
949 			phy_disconnect(bp->phy_dev);
950 		mdiobus_unregister(bp->mii_bus);
951 		kfree(bp->mii_bus->irq);
952 		mdiobus_free(bp->mii_bus);
953 		unregister_netdev(dev);
954 		free_irq(dev->irq, dev);
955 		iounmap(bp->regs);
956 		free_netdev(dev);
957 	}
958 
959 	return 0;
960 }
961 
962 static struct platform_driver dnet_driver = {
963 	.probe		= dnet_probe,
964 	.remove		= dnet_remove,
965 	.driver		= {
966 		.name		= "dnet",
967 	},
968 };
969 
970 module_platform_driver(dnet_driver);
971 
972 MODULE_LICENSE("GPL");
973 MODULE_DESCRIPTION("Dave DNET Ethernet driver");
974 MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, "
975 	      "Matteo Vit <matteo.vit@dave.eu>");
976