1 /*
2  *      Davicom DM9000 Fast Ethernet driver for Linux.
3  * 	Copyright (C) 1997  Sten Wang
4  *
5  * 	This program is free software; you can redistribute it and/or
6  * 	modify it under the terms of the GNU General Public License
7  * 	as published by the Free Software Foundation; either version 2
8  * 	of the License, or (at your option) any later version.
9  *
10  * 	This program is distributed in the hope that it will be useful,
11  * 	but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * 	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * 	GNU General Public License for more details.
14  *
15  * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
16  *
17  * Additional updates, Copyright:
18  *	Ben Dooks <ben@simtec.co.uk>
19  *	Sascha Hauer <s.hauer@pengutronix.de>
20  */
21 
22 #include <linux/module.h>
23 #include <linux/ioport.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/interrupt.h>
27 #include <linux/skbuff.h>
28 #include <linux/spinlock.h>
29 #include <linux/crc32.h>
30 #include <linux/mii.h>
31 #include <linux/of.h>
32 #include <linux/of_net.h>
33 #include <linux/ethtool.h>
34 #include <linux/dm9000.h>
35 #include <linux/delay.h>
36 #include <linux/platform_device.h>
37 #include <linux/irq.h>
38 #include <linux/slab.h>
39 #include <linux/regulator/consumer.h>
40 #include <linux/gpio.h>
41 #include <linux/of_gpio.h>
42 
43 #include <asm/delay.h>
44 #include <asm/irq.h>
45 #include <asm/io.h>
46 
47 #include "dm9000.h"
48 
49 /* Board/System/Debug information/definition ---------------- */
50 
51 #define DM9000_PHY		0x40	/* PHY address 0x01 */
52 
53 #define CARDNAME	"dm9000"
54 #define DRV_VERSION	"1.31"
55 
56 /*
57  * Transmit timeout, default 5 seconds.
58  */
59 static int watchdog = 5000;
60 module_param(watchdog, int, 0400);
61 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
62 
63 /*
64  * Debug messages level
65  */
66 static int debug;
67 module_param(debug, int, 0644);
68 MODULE_PARM_DESC(debug, "dm9000 debug level (0-4)");
69 
70 /* DM9000 register address locking.
71  *
72  * The DM9000 uses an address register to control where data written
73  * to the data register goes. This means that the address register
74  * must be preserved over interrupts or similar calls.
75  *
76  * During interrupt and other critical calls, a spinlock is used to
77  * protect the system, but the calls themselves save the address
78  * in the address register in case they are interrupting another
79  * access to the device.
80  *
81  * For general accesses a lock is provided so that calls which are
82  * allowed to sleep are serialised so that the address register does
83  * not need to be saved. This lock also serves to serialise access
84  * to the EEPROM and PHY access registers which are shared between
85  * these two devices.
86  */
87 
88 /* The driver supports the original DM9000E, and now the two newer
89  * devices, DM9000A and DM9000B.
90  */
91 
92 enum dm9000_type {
93 	TYPE_DM9000E,	/* original DM9000 */
94 	TYPE_DM9000A,
95 	TYPE_DM9000B
96 };
97 
98 /* Structure/enum declaration ------------------------------- */
99 struct board_info {
100 
101 	void __iomem	*io_addr;	/* Register I/O base address */
102 	void __iomem	*io_data;	/* Data I/O address */
103 	u16		 irq;		/* IRQ */
104 
105 	u16		tx_pkt_cnt;
106 	u16		queue_pkt_len;
107 	u16		queue_start_addr;
108 	u16		queue_ip_summed;
109 	u16		dbug_cnt;
110 	u8		io_mode;		/* 0:word, 2:byte */
111 	u8		phy_addr;
112 	u8		imr_all;
113 
114 	unsigned int	flags;
115 	unsigned int	in_timeout:1;
116 	unsigned int	in_suspend:1;
117 	unsigned int	wake_supported:1;
118 
119 	enum dm9000_type type;
120 
121 	void (*inblk)(void __iomem *port, void *data, int length);
122 	void (*outblk)(void __iomem *port, void *data, int length);
123 	void (*dumpblk)(void __iomem *port, int length);
124 
125 	struct device	*dev;	     /* parent device */
126 
127 	struct resource	*addr_res;   /* resources found */
128 	struct resource *data_res;
129 	struct resource	*addr_req;   /* resources requested */
130 	struct resource *data_req;
131 	struct resource *irq_res;
132 
133 	int		 irq_wake;
134 
135 	struct mutex	 addr_lock;	/* phy and eeprom access lock */
136 
137 	struct delayed_work phy_poll;
138 	struct net_device  *ndev;
139 
140 	spinlock_t	lock;
141 
142 	struct mii_if_info mii;
143 	u32		msg_enable;
144 	u32		wake_state;
145 
146 	int		ip_summed;
147 };
148 
149 /* debug code */
150 
151 #define dm9000_dbg(db, lev, msg...) do {		\
152 	if ((lev) < debug) {				\
153 		dev_dbg(db->dev, msg);			\
154 	}						\
155 } while (0)
156 
157 static inline struct board_info *to_dm9000_board(struct net_device *dev)
158 {
159 	return netdev_priv(dev);
160 }
161 
162 /* DM9000 network board routine ---------------------------- */
163 
164 /*
165  *   Read a byte from I/O port
166  */
167 static u8
168 ior(struct board_info *db, int reg)
169 {
170 	writeb(reg, db->io_addr);
171 	return readb(db->io_data);
172 }
173 
174 /*
175  *   Write a byte to I/O port
176  */
177 
178 static void
179 iow(struct board_info *db, int reg, int value)
180 {
181 	writeb(reg, db->io_addr);
182 	writeb(value, db->io_data);
183 }
184 
185 static void
186 dm9000_reset(struct board_info *db)
187 {
188 	dev_dbg(db->dev, "resetting device\n");
189 
190 	/* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29
191 	 * The essential point is that we have to do a double reset, and the
192 	 * instruction is to set LBK into MAC internal loopback mode.
193 	 */
194 	iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
195 	udelay(100); /* Application note says at least 20 us */
196 	if (ior(db, DM9000_NCR) & 1)
197 		dev_err(db->dev, "dm9000 did not respond to first reset\n");
198 
199 	iow(db, DM9000_NCR, 0);
200 	iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
201 	udelay(100);
202 	if (ior(db, DM9000_NCR) & 1)
203 		dev_err(db->dev, "dm9000 did not respond to second reset\n");
204 }
205 
206 /* routines for sending block to chip */
207 
208 static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
209 {
210 	iowrite8_rep(reg, data, count);
211 }
212 
213 static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
214 {
215 	iowrite16_rep(reg, data, (count+1) >> 1);
216 }
217 
218 static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
219 {
220 	iowrite32_rep(reg, data, (count+3) >> 2);
221 }
222 
223 /* input block from chip to memory */
224 
225 static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
226 {
227 	ioread8_rep(reg, data, count);
228 }
229 
230 
231 static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
232 {
233 	ioread16_rep(reg, data, (count+1) >> 1);
234 }
235 
236 static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
237 {
238 	ioread32_rep(reg, data, (count+3) >> 2);
239 }
240 
241 /* dump block from chip to null */
242 
243 static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
244 {
245 	int i;
246 	int tmp;
247 
248 	for (i = 0; i < count; i++)
249 		tmp = readb(reg);
250 }
251 
252 static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
253 {
254 	int i;
255 	int tmp;
256 
257 	count = (count + 1) >> 1;
258 
259 	for (i = 0; i < count; i++)
260 		tmp = readw(reg);
261 }
262 
263 static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
264 {
265 	int i;
266 	int tmp;
267 
268 	count = (count + 3) >> 2;
269 
270 	for (i = 0; i < count; i++)
271 		tmp = readl(reg);
272 }
273 
274 /*
275  * Sleep, either by using msleep() or if we are suspending, then
276  * use mdelay() to sleep.
277  */
278 static void dm9000_msleep(struct board_info *db, unsigned int ms)
279 {
280 	if (db->in_suspend || db->in_timeout)
281 		mdelay(ms);
282 	else
283 		msleep(ms);
284 }
285 
286 /* Read a word from phyxcer */
287 static int
288 dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
289 {
290 	struct board_info *db = netdev_priv(dev);
291 	unsigned long flags;
292 	unsigned int reg_save;
293 	int ret;
294 
295 	mutex_lock(&db->addr_lock);
296 
297 	spin_lock_irqsave(&db->lock, flags);
298 
299 	/* Save previous register address */
300 	reg_save = readb(db->io_addr);
301 
302 	/* Fill the phyxcer register into REG_0C */
303 	iow(db, DM9000_EPAR, DM9000_PHY | reg);
304 
305 	/* Issue phyxcer read command */
306 	iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
307 
308 	writeb(reg_save, db->io_addr);
309 	spin_unlock_irqrestore(&db->lock, flags);
310 
311 	dm9000_msleep(db, 1);		/* Wait read complete */
312 
313 	spin_lock_irqsave(&db->lock, flags);
314 	reg_save = readb(db->io_addr);
315 
316 	iow(db, DM9000_EPCR, 0x0);	/* Clear phyxcer read command */
317 
318 	/* The read data keeps on REG_0D & REG_0E */
319 	ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
320 
321 	/* restore the previous address */
322 	writeb(reg_save, db->io_addr);
323 	spin_unlock_irqrestore(&db->lock, flags);
324 
325 	mutex_unlock(&db->addr_lock);
326 
327 	dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
328 	return ret;
329 }
330 
331 /* Write a word to phyxcer */
332 static void
333 dm9000_phy_write(struct net_device *dev,
334 		 int phyaddr_unused, int reg, int value)
335 {
336 	struct board_info *db = netdev_priv(dev);
337 	unsigned long flags;
338 	unsigned long reg_save;
339 
340 	dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
341 	if (!db->in_timeout)
342 		mutex_lock(&db->addr_lock);
343 
344 	spin_lock_irqsave(&db->lock, flags);
345 
346 	/* Save previous register address */
347 	reg_save = readb(db->io_addr);
348 
349 	/* Fill the phyxcer register into REG_0C */
350 	iow(db, DM9000_EPAR, DM9000_PHY | reg);
351 
352 	/* Fill the written data into REG_0D & REG_0E */
353 	iow(db, DM9000_EPDRL, value);
354 	iow(db, DM9000_EPDRH, value >> 8);
355 
356 	/* Issue phyxcer write command */
357 	iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
358 
359 	writeb(reg_save, db->io_addr);
360 	spin_unlock_irqrestore(&db->lock, flags);
361 
362 	dm9000_msleep(db, 1);		/* Wait write complete */
363 
364 	spin_lock_irqsave(&db->lock, flags);
365 	reg_save = readb(db->io_addr);
366 
367 	iow(db, DM9000_EPCR, 0x0);	/* Clear phyxcer write command */
368 
369 	/* restore the previous address */
370 	writeb(reg_save, db->io_addr);
371 
372 	spin_unlock_irqrestore(&db->lock, flags);
373 	if (!db->in_timeout)
374 		mutex_unlock(&db->addr_lock);
375 }
376 
377 /* dm9000_set_io
378  *
379  * select the specified set of io routines to use with the
380  * device
381  */
382 
383 static void dm9000_set_io(struct board_info *db, int byte_width)
384 {
385 	/* use the size of the data resource to work out what IO
386 	 * routines we want to use
387 	 */
388 
389 	switch (byte_width) {
390 	case 1:
391 		db->dumpblk = dm9000_dumpblk_8bit;
392 		db->outblk  = dm9000_outblk_8bit;
393 		db->inblk   = dm9000_inblk_8bit;
394 		break;
395 
396 
397 	case 3:
398 		dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
399 	case 2:
400 		db->dumpblk = dm9000_dumpblk_16bit;
401 		db->outblk  = dm9000_outblk_16bit;
402 		db->inblk   = dm9000_inblk_16bit;
403 		break;
404 
405 	case 4:
406 	default:
407 		db->dumpblk = dm9000_dumpblk_32bit;
408 		db->outblk  = dm9000_outblk_32bit;
409 		db->inblk   = dm9000_inblk_32bit;
410 		break;
411 	}
412 }
413 
414 static void dm9000_schedule_poll(struct board_info *db)
415 {
416 	if (db->type == TYPE_DM9000E)
417 		schedule_delayed_work(&db->phy_poll, HZ * 2);
418 }
419 
420 static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
421 {
422 	struct board_info *dm = to_dm9000_board(dev);
423 
424 	if (!netif_running(dev))
425 		return -EINVAL;
426 
427 	return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
428 }
429 
430 static unsigned int
431 dm9000_read_locked(struct board_info *db, int reg)
432 {
433 	unsigned long flags;
434 	unsigned int ret;
435 
436 	spin_lock_irqsave(&db->lock, flags);
437 	ret = ior(db, reg);
438 	spin_unlock_irqrestore(&db->lock, flags);
439 
440 	return ret;
441 }
442 
443 static int dm9000_wait_eeprom(struct board_info *db)
444 {
445 	unsigned int status;
446 	int timeout = 8;	/* wait max 8msec */
447 
448 	/* The DM9000 data sheets say we should be able to
449 	 * poll the ERRE bit in EPCR to wait for the EEPROM
450 	 * operation. From testing several chips, this bit
451 	 * does not seem to work.
452 	 *
453 	 * We attempt to use the bit, but fall back to the
454 	 * timeout (which is why we do not return an error
455 	 * on expiry) to say that the EEPROM operation has
456 	 * completed.
457 	 */
458 
459 	while (1) {
460 		status = dm9000_read_locked(db, DM9000_EPCR);
461 
462 		if ((status & EPCR_ERRE) == 0)
463 			break;
464 
465 		msleep(1);
466 
467 		if (timeout-- < 0) {
468 			dev_dbg(db->dev, "timeout waiting EEPROM\n");
469 			break;
470 		}
471 	}
472 
473 	return 0;
474 }
475 
476 /*
477  *  Read a word data from EEPROM
478  */
479 static void
480 dm9000_read_eeprom(struct board_info *db, int offset, u8 *to)
481 {
482 	unsigned long flags;
483 
484 	if (db->flags & DM9000_PLATF_NO_EEPROM) {
485 		to[0] = 0xff;
486 		to[1] = 0xff;
487 		return;
488 	}
489 
490 	mutex_lock(&db->addr_lock);
491 
492 	spin_lock_irqsave(&db->lock, flags);
493 
494 	iow(db, DM9000_EPAR, offset);
495 	iow(db, DM9000_EPCR, EPCR_ERPRR);
496 
497 	spin_unlock_irqrestore(&db->lock, flags);
498 
499 	dm9000_wait_eeprom(db);
500 
501 	/* delay for at-least 150uS */
502 	msleep(1);
503 
504 	spin_lock_irqsave(&db->lock, flags);
505 
506 	iow(db, DM9000_EPCR, 0x0);
507 
508 	to[0] = ior(db, DM9000_EPDRL);
509 	to[1] = ior(db, DM9000_EPDRH);
510 
511 	spin_unlock_irqrestore(&db->lock, flags);
512 
513 	mutex_unlock(&db->addr_lock);
514 }
515 
516 /*
517  * Write a word data to SROM
518  */
519 static void
520 dm9000_write_eeprom(struct board_info *db, int offset, u8 *data)
521 {
522 	unsigned long flags;
523 
524 	if (db->flags & DM9000_PLATF_NO_EEPROM)
525 		return;
526 
527 	mutex_lock(&db->addr_lock);
528 
529 	spin_lock_irqsave(&db->lock, flags);
530 	iow(db, DM9000_EPAR, offset);
531 	iow(db, DM9000_EPDRH, data[1]);
532 	iow(db, DM9000_EPDRL, data[0]);
533 	iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
534 	spin_unlock_irqrestore(&db->lock, flags);
535 
536 	dm9000_wait_eeprom(db);
537 
538 	mdelay(1);	/* wait at least 150uS to clear */
539 
540 	spin_lock_irqsave(&db->lock, flags);
541 	iow(db, DM9000_EPCR, 0);
542 	spin_unlock_irqrestore(&db->lock, flags);
543 
544 	mutex_unlock(&db->addr_lock);
545 }
546 
547 /* ethtool ops */
548 
549 static void dm9000_get_drvinfo(struct net_device *dev,
550 			       struct ethtool_drvinfo *info)
551 {
552 	struct board_info *dm = to_dm9000_board(dev);
553 
554 	strlcpy(info->driver, CARDNAME, sizeof(info->driver));
555 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
556 	strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
557 		sizeof(info->bus_info));
558 }
559 
560 static u32 dm9000_get_msglevel(struct net_device *dev)
561 {
562 	struct board_info *dm = to_dm9000_board(dev);
563 
564 	return dm->msg_enable;
565 }
566 
567 static void dm9000_set_msglevel(struct net_device *dev, u32 value)
568 {
569 	struct board_info *dm = to_dm9000_board(dev);
570 
571 	dm->msg_enable = value;
572 }
573 
574 static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
575 {
576 	struct board_info *dm = to_dm9000_board(dev);
577 
578 	mii_ethtool_gset(&dm->mii, cmd);
579 	return 0;
580 }
581 
582 static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
583 {
584 	struct board_info *dm = to_dm9000_board(dev);
585 
586 	return mii_ethtool_sset(&dm->mii, cmd);
587 }
588 
589 static int dm9000_nway_reset(struct net_device *dev)
590 {
591 	struct board_info *dm = to_dm9000_board(dev);
592 	return mii_nway_restart(&dm->mii);
593 }
594 
595 static int dm9000_set_features(struct net_device *dev,
596 	netdev_features_t features)
597 {
598 	struct board_info *dm = to_dm9000_board(dev);
599 	netdev_features_t changed = dev->features ^ features;
600 	unsigned long flags;
601 
602 	if (!(changed & NETIF_F_RXCSUM))
603 		return 0;
604 
605 	spin_lock_irqsave(&dm->lock, flags);
606 	iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
607 	spin_unlock_irqrestore(&dm->lock, flags);
608 
609 	return 0;
610 }
611 
612 static u32 dm9000_get_link(struct net_device *dev)
613 {
614 	struct board_info *dm = to_dm9000_board(dev);
615 	u32 ret;
616 
617 	if (dm->flags & DM9000_PLATF_EXT_PHY)
618 		ret = mii_link_ok(&dm->mii);
619 	else
620 		ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
621 
622 	return ret;
623 }
624 
625 #define DM_EEPROM_MAGIC		(0x444D394B)
626 
627 static int dm9000_get_eeprom_len(struct net_device *dev)
628 {
629 	return 128;
630 }
631 
632 static int dm9000_get_eeprom(struct net_device *dev,
633 			     struct ethtool_eeprom *ee, u8 *data)
634 {
635 	struct board_info *dm = to_dm9000_board(dev);
636 	int offset = ee->offset;
637 	int len = ee->len;
638 	int i;
639 
640 	/* EEPROM access is aligned to two bytes */
641 
642 	if ((len & 1) != 0 || (offset & 1) != 0)
643 		return -EINVAL;
644 
645 	if (dm->flags & DM9000_PLATF_NO_EEPROM)
646 		return -ENOENT;
647 
648 	ee->magic = DM_EEPROM_MAGIC;
649 
650 	for (i = 0; i < len; i += 2)
651 		dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
652 
653 	return 0;
654 }
655 
656 static int dm9000_set_eeprom(struct net_device *dev,
657 			     struct ethtool_eeprom *ee, u8 *data)
658 {
659 	struct board_info *dm = to_dm9000_board(dev);
660 	int offset = ee->offset;
661 	int len = ee->len;
662 	int done;
663 
664 	/* EEPROM access is aligned to two bytes */
665 
666 	if (dm->flags & DM9000_PLATF_NO_EEPROM)
667 		return -ENOENT;
668 
669 	if (ee->magic != DM_EEPROM_MAGIC)
670 		return -EINVAL;
671 
672 	while (len > 0) {
673 		if (len & 1 || offset & 1) {
674 			int which = offset & 1;
675 			u8 tmp[2];
676 
677 			dm9000_read_eeprom(dm, offset / 2, tmp);
678 			tmp[which] = *data;
679 			dm9000_write_eeprom(dm, offset / 2, tmp);
680 
681 			done = 1;
682 		} else {
683 			dm9000_write_eeprom(dm, offset / 2, data);
684 			done = 2;
685 		}
686 
687 		data += done;
688 		offset += done;
689 		len -= done;
690 	}
691 
692 	return 0;
693 }
694 
695 static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
696 {
697 	struct board_info *dm = to_dm9000_board(dev);
698 
699 	memset(w, 0, sizeof(struct ethtool_wolinfo));
700 
701 	/* note, we could probably support wake-phy too */
702 	w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
703 	w->wolopts = dm->wake_state;
704 }
705 
706 static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
707 {
708 	struct board_info *dm = to_dm9000_board(dev);
709 	unsigned long flags;
710 	u32 opts = w->wolopts;
711 	u32 wcr = 0;
712 
713 	if (!dm->wake_supported)
714 		return -EOPNOTSUPP;
715 
716 	if (opts & ~WAKE_MAGIC)
717 		return -EINVAL;
718 
719 	if (opts & WAKE_MAGIC)
720 		wcr |= WCR_MAGICEN;
721 
722 	mutex_lock(&dm->addr_lock);
723 
724 	spin_lock_irqsave(&dm->lock, flags);
725 	iow(dm, DM9000_WCR, wcr);
726 	spin_unlock_irqrestore(&dm->lock, flags);
727 
728 	mutex_unlock(&dm->addr_lock);
729 
730 	if (dm->wake_state != opts) {
731 		/* change in wol state, update IRQ state */
732 
733 		if (!dm->wake_state)
734 			irq_set_irq_wake(dm->irq_wake, 1);
735 		else if (dm->wake_state && !opts)
736 			irq_set_irq_wake(dm->irq_wake, 0);
737 	}
738 
739 	dm->wake_state = opts;
740 	return 0;
741 }
742 
743 static const struct ethtool_ops dm9000_ethtool_ops = {
744 	.get_drvinfo		= dm9000_get_drvinfo,
745 	.get_settings		= dm9000_get_settings,
746 	.set_settings		= dm9000_set_settings,
747 	.get_msglevel		= dm9000_get_msglevel,
748 	.set_msglevel		= dm9000_set_msglevel,
749 	.nway_reset		= dm9000_nway_reset,
750 	.get_link		= dm9000_get_link,
751 	.get_wol		= dm9000_get_wol,
752 	.set_wol		= dm9000_set_wol,
753 	.get_eeprom_len		= dm9000_get_eeprom_len,
754 	.get_eeprom		= dm9000_get_eeprom,
755 	.set_eeprom		= dm9000_set_eeprom,
756 };
757 
758 static void dm9000_show_carrier(struct board_info *db,
759 				unsigned carrier, unsigned nsr)
760 {
761 	int lpa;
762 	struct net_device *ndev = db->ndev;
763 	struct mii_if_info *mii = &db->mii;
764 	unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
765 
766 	if (carrier) {
767 		lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
768 		dev_info(db->dev,
769 			 "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n",
770 			 ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
771 			 (ncr & NCR_FDX) ? "full" : "half", lpa);
772 	} else {
773 		dev_info(db->dev, "%s: link down\n", ndev->name);
774 	}
775 }
776 
777 static void
778 dm9000_poll_work(struct work_struct *w)
779 {
780 	struct delayed_work *dw = to_delayed_work(w);
781 	struct board_info *db = container_of(dw, struct board_info, phy_poll);
782 	struct net_device *ndev = db->ndev;
783 
784 	if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
785 	    !(db->flags & DM9000_PLATF_EXT_PHY)) {
786 		unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
787 		unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
788 		unsigned new_carrier;
789 
790 		new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
791 
792 		if (old_carrier != new_carrier) {
793 			if (netif_msg_link(db))
794 				dm9000_show_carrier(db, new_carrier, nsr);
795 
796 			if (!new_carrier)
797 				netif_carrier_off(ndev);
798 			else
799 				netif_carrier_on(ndev);
800 		}
801 	} else
802 		mii_check_media(&db->mii, netif_msg_link(db), 0);
803 
804 	if (netif_running(ndev))
805 		dm9000_schedule_poll(db);
806 }
807 
808 /* dm9000_release_board
809  *
810  * release a board, and any mapped resources
811  */
812 
813 static void
814 dm9000_release_board(struct platform_device *pdev, struct board_info *db)
815 {
816 	/* unmap our resources */
817 
818 	iounmap(db->io_addr);
819 	iounmap(db->io_data);
820 
821 	/* release the resources */
822 
823 	if (db->data_req)
824 		release_resource(db->data_req);
825 	kfree(db->data_req);
826 
827 	if (db->addr_req)
828 		release_resource(db->addr_req);
829 	kfree(db->addr_req);
830 }
831 
832 static unsigned char dm9000_type_to_char(enum dm9000_type type)
833 {
834 	switch (type) {
835 	case TYPE_DM9000E: return 'e';
836 	case TYPE_DM9000A: return 'a';
837 	case TYPE_DM9000B: return 'b';
838 	}
839 
840 	return '?';
841 }
842 
843 /*
844  *  Set DM9000 multicast address
845  */
846 static void
847 dm9000_hash_table_unlocked(struct net_device *dev)
848 {
849 	struct board_info *db = netdev_priv(dev);
850 	struct netdev_hw_addr *ha;
851 	int i, oft;
852 	u32 hash_val;
853 	u16 hash_table[4] = { 0, 0, 0, 0x8000 }; /* broadcast address */
854 	u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
855 
856 	dm9000_dbg(db, 1, "entering %s\n", __func__);
857 
858 	for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
859 		iow(db, oft, dev->dev_addr[i]);
860 
861 	if (dev->flags & IFF_PROMISC)
862 		rcr |= RCR_PRMSC;
863 
864 	if (dev->flags & IFF_ALLMULTI)
865 		rcr |= RCR_ALL;
866 
867 	/* the multicast address in Hash Table : 64 bits */
868 	netdev_for_each_mc_addr(ha, dev) {
869 		hash_val = ether_crc_le(6, ha->addr) & 0x3f;
870 		hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
871 	}
872 
873 	/* Write the hash table to MAC MD table */
874 	for (i = 0, oft = DM9000_MAR; i < 4; i++) {
875 		iow(db, oft++, hash_table[i]);
876 		iow(db, oft++, hash_table[i] >> 8);
877 	}
878 
879 	iow(db, DM9000_RCR, rcr);
880 }
881 
882 static void
883 dm9000_hash_table(struct net_device *dev)
884 {
885 	struct board_info *db = netdev_priv(dev);
886 	unsigned long flags;
887 
888 	spin_lock_irqsave(&db->lock, flags);
889 	dm9000_hash_table_unlocked(dev);
890 	spin_unlock_irqrestore(&db->lock, flags);
891 }
892 
893 static void
894 dm9000_mask_interrupts(struct board_info *db)
895 {
896 	iow(db, DM9000_IMR, IMR_PAR);
897 }
898 
899 static void
900 dm9000_unmask_interrupts(struct board_info *db)
901 {
902 	iow(db, DM9000_IMR, db->imr_all);
903 }
904 
905 /*
906  * Initialize dm9000 board
907  */
908 static void
909 dm9000_init_dm9000(struct net_device *dev)
910 {
911 	struct board_info *db = netdev_priv(dev);
912 	unsigned int imr;
913 	unsigned int ncr;
914 
915 	dm9000_dbg(db, 1, "entering %s\n", __func__);
916 
917 	dm9000_reset(db);
918 	dm9000_mask_interrupts(db);
919 
920 	/* I/O mode */
921 	db->io_mode = ior(db, DM9000_ISR) >> 6;	/* ISR bit7:6 keeps I/O mode */
922 
923 	/* Checksum mode */
924 	if (dev->hw_features & NETIF_F_RXCSUM)
925 		iow(db, DM9000_RCSR,
926 			(dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
927 
928 	iow(db, DM9000_GPCR, GPCR_GEP_CNTL);	/* Let GPIO0 output */
929 	iow(db, DM9000_GPR, 0);
930 
931 	/* If we are dealing with DM9000B, some extra steps are required: a
932 	 * manual phy reset, and setting init params.
933 	 */
934 	if (db->type == TYPE_DM9000B) {
935 		dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
936 		dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM);
937 	}
938 
939 	ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
940 
941 	/* if wol is needed, then always set NCR_WAKEEN otherwise we end
942 	 * up dumping the wake events if we disable this. There is already
943 	 * a wake-mask in DM9000_WCR */
944 	if (db->wake_supported)
945 		ncr |= NCR_WAKEEN;
946 
947 	iow(db, DM9000_NCR, ncr);
948 
949 	/* Program operating register */
950 	iow(db, DM9000_TCR, 0);	        /* TX Polling clear */
951 	iow(db, DM9000_BPTR, 0x3f);	/* Less 3Kb, 200us */
952 	iow(db, DM9000_FCR, 0xff);	/* Flow Control */
953 	iow(db, DM9000_SMCR, 0);        /* Special Mode */
954 	/* clear TX status */
955 	iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
956 	iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
957 
958 	/* Set address filter table */
959 	dm9000_hash_table_unlocked(dev);
960 
961 	imr = IMR_PAR | IMR_PTM | IMR_PRM;
962 	if (db->type != TYPE_DM9000E)
963 		imr |= IMR_LNKCHNG;
964 
965 	db->imr_all = imr;
966 
967 	/* Init Driver variable */
968 	db->tx_pkt_cnt = 0;
969 	db->queue_pkt_len = 0;
970 	dev->trans_start = jiffies;
971 }
972 
973 /* Our watchdog timed out. Called by the networking layer */
974 static void dm9000_timeout(struct net_device *dev)
975 {
976 	struct board_info *db = netdev_priv(dev);
977 	u8 reg_save;
978 	unsigned long flags;
979 
980 	/* Save previous register address */
981 	spin_lock_irqsave(&db->lock, flags);
982 	db->in_timeout = 1;
983 	reg_save = readb(db->io_addr);
984 
985 	netif_stop_queue(dev);
986 	dm9000_init_dm9000(dev);
987 	dm9000_unmask_interrupts(db);
988 	/* We can accept TX packets again */
989 	dev->trans_start = jiffies; /* prevent tx timeout */
990 	netif_wake_queue(dev);
991 
992 	/* Restore previous register address */
993 	writeb(reg_save, db->io_addr);
994 	db->in_timeout = 0;
995 	spin_unlock_irqrestore(&db->lock, flags);
996 }
997 
998 static void dm9000_send_packet(struct net_device *dev,
999 			       int ip_summed,
1000 			       u16 pkt_len)
1001 {
1002 	struct board_info *dm = to_dm9000_board(dev);
1003 
1004 	/* The DM9000 is not smart enough to leave fragmented packets alone. */
1005 	if (dm->ip_summed != ip_summed) {
1006 		if (ip_summed == CHECKSUM_NONE)
1007 			iow(dm, DM9000_TCCR, 0);
1008 		else
1009 			iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
1010 		dm->ip_summed = ip_summed;
1011 	}
1012 
1013 	/* Set TX length to DM9000 */
1014 	iow(dm, DM9000_TXPLL, pkt_len);
1015 	iow(dm, DM9000_TXPLH, pkt_len >> 8);
1016 
1017 	/* Issue TX polling command */
1018 	iow(dm, DM9000_TCR, TCR_TXREQ);	/* Cleared after TX complete */
1019 }
1020 
1021 /*
1022  *  Hardware start transmission.
1023  *  Send a packet to media from the upper layer.
1024  */
1025 static int
1026 dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
1027 {
1028 	unsigned long flags;
1029 	struct board_info *db = netdev_priv(dev);
1030 
1031 	dm9000_dbg(db, 3, "%s:\n", __func__);
1032 
1033 	if (db->tx_pkt_cnt > 1)
1034 		return NETDEV_TX_BUSY;
1035 
1036 	spin_lock_irqsave(&db->lock, flags);
1037 
1038 	/* Move data to DM9000 TX RAM */
1039 	writeb(DM9000_MWCMD, db->io_addr);
1040 
1041 	(db->outblk)(db->io_data, skb->data, skb->len);
1042 	dev->stats.tx_bytes += skb->len;
1043 
1044 	db->tx_pkt_cnt++;
1045 	/* TX control: First packet immediately send, second packet queue */
1046 	if (db->tx_pkt_cnt == 1) {
1047 		dm9000_send_packet(dev, skb->ip_summed, skb->len);
1048 	} else {
1049 		/* Second packet */
1050 		db->queue_pkt_len = skb->len;
1051 		db->queue_ip_summed = skb->ip_summed;
1052 		netif_stop_queue(dev);
1053 	}
1054 
1055 	spin_unlock_irqrestore(&db->lock, flags);
1056 
1057 	/* free this SKB */
1058 	dev_consume_skb_any(skb);
1059 
1060 	return NETDEV_TX_OK;
1061 }
1062 
1063 /*
1064  * DM9000 interrupt handler
1065  * receive the packet to upper layer, free the transmitted packet
1066  */
1067 
1068 static void dm9000_tx_done(struct net_device *dev, struct board_info *db)
1069 {
1070 	int tx_status = ior(db, DM9000_NSR);	/* Got TX status */
1071 
1072 	if (tx_status & (NSR_TX2END | NSR_TX1END)) {
1073 		/* One packet sent complete */
1074 		db->tx_pkt_cnt--;
1075 		dev->stats.tx_packets++;
1076 
1077 		if (netif_msg_tx_done(db))
1078 			dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
1079 
1080 		/* Queue packet check & send */
1081 		if (db->tx_pkt_cnt > 0)
1082 			dm9000_send_packet(dev, db->queue_ip_summed,
1083 					   db->queue_pkt_len);
1084 		netif_wake_queue(dev);
1085 	}
1086 }
1087 
1088 struct dm9000_rxhdr {
1089 	u8	RxPktReady;
1090 	u8	RxStatus;
1091 	__le16	RxLen;
1092 } __packed;
1093 
1094 /*
1095  *  Received a packet and pass to upper layer
1096  */
1097 static void
1098 dm9000_rx(struct net_device *dev)
1099 {
1100 	struct board_info *db = netdev_priv(dev);
1101 	struct dm9000_rxhdr rxhdr;
1102 	struct sk_buff *skb;
1103 	u8 rxbyte, *rdptr;
1104 	bool GoodPacket;
1105 	int RxLen;
1106 
1107 	/* Check packet ready or not */
1108 	do {
1109 		ior(db, DM9000_MRCMDX);	/* Dummy read */
1110 
1111 		/* Get most updated data */
1112 		rxbyte = readb(db->io_data);
1113 
1114 		/* Status check: this byte must be 0 or 1 */
1115 		if (rxbyte & DM9000_PKT_ERR) {
1116 			dev_warn(db->dev, "status check fail: %d\n", rxbyte);
1117 			iow(db, DM9000_RCR, 0x00);	/* Stop Device */
1118 			return;
1119 		}
1120 
1121 		if (!(rxbyte & DM9000_PKT_RDY))
1122 			return;
1123 
1124 		/* A packet ready now  & Get status/length */
1125 		GoodPacket = true;
1126 		writeb(DM9000_MRCMD, db->io_addr);
1127 
1128 		(db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
1129 
1130 		RxLen = le16_to_cpu(rxhdr.RxLen);
1131 
1132 		if (netif_msg_rx_status(db))
1133 			dev_dbg(db->dev, "RX: status %02x, length %04x\n",
1134 				rxhdr.RxStatus, RxLen);
1135 
1136 		/* Packet Status check */
1137 		if (RxLen < 0x40) {
1138 			GoodPacket = false;
1139 			if (netif_msg_rx_err(db))
1140 				dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
1141 		}
1142 
1143 		if (RxLen > DM9000_PKT_MAX) {
1144 			dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
1145 		}
1146 
1147 		/* rxhdr.RxStatus is identical to RSR register. */
1148 		if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
1149 				      RSR_PLE | RSR_RWTO |
1150 				      RSR_LCS | RSR_RF)) {
1151 			GoodPacket = false;
1152 			if (rxhdr.RxStatus & RSR_FOE) {
1153 				if (netif_msg_rx_err(db))
1154 					dev_dbg(db->dev, "fifo error\n");
1155 				dev->stats.rx_fifo_errors++;
1156 			}
1157 			if (rxhdr.RxStatus & RSR_CE) {
1158 				if (netif_msg_rx_err(db))
1159 					dev_dbg(db->dev, "crc error\n");
1160 				dev->stats.rx_crc_errors++;
1161 			}
1162 			if (rxhdr.RxStatus & RSR_RF) {
1163 				if (netif_msg_rx_err(db))
1164 					dev_dbg(db->dev, "length error\n");
1165 				dev->stats.rx_length_errors++;
1166 			}
1167 		}
1168 
1169 		/* Move data from DM9000 */
1170 		if (GoodPacket &&
1171 		    ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) {
1172 			skb_reserve(skb, 2);
1173 			rdptr = (u8 *) skb_put(skb, RxLen - 4);
1174 
1175 			/* Read received packet from RX SRAM */
1176 
1177 			(db->inblk)(db->io_data, rdptr, RxLen);
1178 			dev->stats.rx_bytes += RxLen;
1179 
1180 			/* Pass to upper layer */
1181 			skb->protocol = eth_type_trans(skb, dev);
1182 			if (dev->features & NETIF_F_RXCSUM) {
1183 				if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
1184 					skb->ip_summed = CHECKSUM_UNNECESSARY;
1185 				else
1186 					skb_checksum_none_assert(skb);
1187 			}
1188 			netif_rx(skb);
1189 			dev->stats.rx_packets++;
1190 
1191 		} else {
1192 			/* need to dump the packet's data */
1193 
1194 			(db->dumpblk)(db->io_data, RxLen);
1195 		}
1196 	} while (rxbyte & DM9000_PKT_RDY);
1197 }
1198 
1199 static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1200 {
1201 	struct net_device *dev = dev_id;
1202 	struct board_info *db = netdev_priv(dev);
1203 	int int_status;
1204 	unsigned long flags;
1205 	u8 reg_save;
1206 
1207 	dm9000_dbg(db, 3, "entering %s\n", __func__);
1208 
1209 	/* A real interrupt coming */
1210 
1211 	/* holders of db->lock must always block IRQs */
1212 	spin_lock_irqsave(&db->lock, flags);
1213 
1214 	/* Save previous register address */
1215 	reg_save = readb(db->io_addr);
1216 
1217 	dm9000_mask_interrupts(db);
1218 	/* Got DM9000 interrupt status */
1219 	int_status = ior(db, DM9000_ISR);	/* Got ISR */
1220 	iow(db, DM9000_ISR, int_status);	/* Clear ISR status */
1221 
1222 	if (netif_msg_intr(db))
1223 		dev_dbg(db->dev, "interrupt status %02x\n", int_status);
1224 
1225 	/* Received the coming packet */
1226 	if (int_status & ISR_PRS)
1227 		dm9000_rx(dev);
1228 
1229 	/* Transmit Interrupt check */
1230 	if (int_status & ISR_PTS)
1231 		dm9000_tx_done(dev, db);
1232 
1233 	if (db->type != TYPE_DM9000E) {
1234 		if (int_status & ISR_LNKCHNG) {
1235 			/* fire a link-change request */
1236 			schedule_delayed_work(&db->phy_poll, 1);
1237 		}
1238 	}
1239 
1240 	dm9000_unmask_interrupts(db);
1241 	/* Restore previous register address */
1242 	writeb(reg_save, db->io_addr);
1243 
1244 	spin_unlock_irqrestore(&db->lock, flags);
1245 
1246 	return IRQ_HANDLED;
1247 }
1248 
1249 static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
1250 {
1251 	struct net_device *dev = dev_id;
1252 	struct board_info *db = netdev_priv(dev);
1253 	unsigned long flags;
1254 	unsigned nsr, wcr;
1255 
1256 	spin_lock_irqsave(&db->lock, flags);
1257 
1258 	nsr = ior(db, DM9000_NSR);
1259 	wcr = ior(db, DM9000_WCR);
1260 
1261 	dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
1262 
1263 	if (nsr & NSR_WAKEST) {
1264 		/* clear, so we can avoid */
1265 		iow(db, DM9000_NSR, NSR_WAKEST);
1266 
1267 		if (wcr & WCR_LINKST)
1268 			dev_info(db->dev, "wake by link status change\n");
1269 		if (wcr & WCR_SAMPLEST)
1270 			dev_info(db->dev, "wake by sample packet\n");
1271 		if (wcr & WCR_MAGICST)
1272 			dev_info(db->dev, "wake by magic packet\n");
1273 		if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
1274 			dev_err(db->dev, "wake signalled with no reason? "
1275 				"NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
1276 	}
1277 
1278 	spin_unlock_irqrestore(&db->lock, flags);
1279 
1280 	return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
1281 }
1282 
1283 #ifdef CONFIG_NET_POLL_CONTROLLER
1284 /*
1285  *Used by netconsole
1286  */
1287 static void dm9000_poll_controller(struct net_device *dev)
1288 {
1289 	disable_irq(dev->irq);
1290 	dm9000_interrupt(dev->irq, dev);
1291 	enable_irq(dev->irq);
1292 }
1293 #endif
1294 
1295 /*
1296  *  Open the interface.
1297  *  The interface is opened whenever "ifconfig" actives it.
1298  */
1299 static int
1300 dm9000_open(struct net_device *dev)
1301 {
1302 	struct board_info *db = netdev_priv(dev);
1303 	unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
1304 
1305 	if (netif_msg_ifup(db))
1306 		dev_dbg(db->dev, "enabling %s\n", dev->name);
1307 
1308 	/* If there is no IRQ type specified, default to something that
1309 	 * may work, and tell the user that this is a problem */
1310 
1311 	if (irqflags == IRQF_TRIGGER_NONE)
1312 		irqflags = irq_get_trigger_type(dev->irq);
1313 
1314 	if (irqflags == IRQF_TRIGGER_NONE)
1315 		dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1316 
1317 	irqflags |= IRQF_SHARED;
1318 
1319 	/* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
1320 	iow(db, DM9000_GPR, 0);	/* REG_1F bit0 activate phyxcer */
1321 	mdelay(1); /* delay needs by DM9000B */
1322 
1323 	/* Initialize DM9000 board */
1324 	dm9000_init_dm9000(dev);
1325 
1326 	if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1327 		return -EAGAIN;
1328 	/* Now that we have an interrupt handler hooked up we can unmask
1329 	 * our interrupts
1330 	 */
1331 	dm9000_unmask_interrupts(db);
1332 
1333 	/* Init driver variable */
1334 	db->dbug_cnt = 0;
1335 
1336 	mii_check_media(&db->mii, netif_msg_link(db), 1);
1337 	netif_start_queue(dev);
1338 
1339 	/* Poll initial link status */
1340 	schedule_delayed_work(&db->phy_poll, 1);
1341 
1342 	return 0;
1343 }
1344 
1345 static void
1346 dm9000_shutdown(struct net_device *dev)
1347 {
1348 	struct board_info *db = netdev_priv(dev);
1349 
1350 	/* RESET device */
1351 	dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);	/* PHY RESET */
1352 	iow(db, DM9000_GPR, 0x01);	/* Power-Down PHY */
1353 	dm9000_mask_interrupts(db);
1354 	iow(db, DM9000_RCR, 0x00);	/* Disable RX */
1355 }
1356 
1357 /*
1358  * Stop the interface.
1359  * The interface is stopped when it is brought.
1360  */
1361 static int
1362 dm9000_stop(struct net_device *ndev)
1363 {
1364 	struct board_info *db = netdev_priv(ndev);
1365 
1366 	if (netif_msg_ifdown(db))
1367 		dev_dbg(db->dev, "shutting down %s\n", ndev->name);
1368 
1369 	cancel_delayed_work_sync(&db->phy_poll);
1370 
1371 	netif_stop_queue(ndev);
1372 	netif_carrier_off(ndev);
1373 
1374 	/* free interrupt */
1375 	free_irq(ndev->irq, ndev);
1376 
1377 	dm9000_shutdown(ndev);
1378 
1379 	return 0;
1380 }
1381 
1382 static const struct net_device_ops dm9000_netdev_ops = {
1383 	.ndo_open		= dm9000_open,
1384 	.ndo_stop		= dm9000_stop,
1385 	.ndo_start_xmit		= dm9000_start_xmit,
1386 	.ndo_tx_timeout		= dm9000_timeout,
1387 	.ndo_set_rx_mode	= dm9000_hash_table,
1388 	.ndo_do_ioctl		= dm9000_ioctl,
1389 	.ndo_change_mtu		= eth_change_mtu,
1390 	.ndo_set_features	= dm9000_set_features,
1391 	.ndo_validate_addr	= eth_validate_addr,
1392 	.ndo_set_mac_address	= eth_mac_addr,
1393 #ifdef CONFIG_NET_POLL_CONTROLLER
1394 	.ndo_poll_controller	= dm9000_poll_controller,
1395 #endif
1396 };
1397 
1398 static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
1399 {
1400 	struct dm9000_plat_data *pdata;
1401 	struct device_node *np = dev->of_node;
1402 	const void *mac_addr;
1403 
1404 	if (!IS_ENABLED(CONFIG_OF) || !np)
1405 		return ERR_PTR(-ENXIO);
1406 
1407 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1408 	if (!pdata)
1409 		return ERR_PTR(-ENOMEM);
1410 
1411 	if (of_find_property(np, "davicom,ext-phy", NULL))
1412 		pdata->flags |= DM9000_PLATF_EXT_PHY;
1413 	if (of_find_property(np, "davicom,no-eeprom", NULL))
1414 		pdata->flags |= DM9000_PLATF_NO_EEPROM;
1415 
1416 	mac_addr = of_get_mac_address(np);
1417 	if (mac_addr)
1418 		memcpy(pdata->dev_addr, mac_addr, sizeof(pdata->dev_addr));
1419 
1420 	return pdata;
1421 }
1422 
1423 /*
1424  * Search DM9000 board, allocate space and register it
1425  */
1426 static int
1427 dm9000_probe(struct platform_device *pdev)
1428 {
1429 	struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev);
1430 	struct board_info *db;	/* Point a board information structure */
1431 	struct net_device *ndev;
1432 	struct device *dev = &pdev->dev;
1433 	const unsigned char *mac_src;
1434 	int ret = 0;
1435 	int iosize;
1436 	int i;
1437 	u32 id_val;
1438 	int reset_gpios;
1439 	enum of_gpio_flags flags;
1440 	struct regulator *power;
1441 
1442 	power = devm_regulator_get(dev, "vcc");
1443 	if (IS_ERR(power)) {
1444 		if (PTR_ERR(power) == -EPROBE_DEFER)
1445 			return -EPROBE_DEFER;
1446 		dev_dbg(dev, "no regulator provided\n");
1447 	} else {
1448 		ret = regulator_enable(power);
1449 		if (ret != 0) {
1450 			dev_err(dev,
1451 				"Failed to enable power regulator: %d\n", ret);
1452 			return ret;
1453 		}
1454 		dev_dbg(dev, "regulator enabled\n");
1455 	}
1456 
1457 	reset_gpios = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0,
1458 					      &flags);
1459 	if (gpio_is_valid(reset_gpios)) {
1460 		ret = devm_gpio_request_one(dev, reset_gpios, flags,
1461 					    "dm9000_reset");
1462 		if (ret) {
1463 			dev_err(dev, "failed to request reset gpio %d: %d\n",
1464 				reset_gpios, ret);
1465 			return -ENODEV;
1466 		}
1467 
1468 		/* According to manual PWRST# Low Period Min 1ms */
1469 		msleep(2);
1470 		gpio_set_value(reset_gpios, 1);
1471 		/* Needs 3ms to read eeprom when PWRST is deasserted */
1472 		msleep(4);
1473 	}
1474 
1475 	if (!pdata) {
1476 		pdata = dm9000_parse_dt(&pdev->dev);
1477 		if (IS_ERR(pdata))
1478 			return PTR_ERR(pdata);
1479 	}
1480 
1481 	/* Init network device */
1482 	ndev = alloc_etherdev(sizeof(struct board_info));
1483 	if (!ndev)
1484 		return -ENOMEM;
1485 
1486 	SET_NETDEV_DEV(ndev, &pdev->dev);
1487 
1488 	dev_dbg(&pdev->dev, "dm9000_probe()\n");
1489 
1490 	/* setup board info structure */
1491 	db = netdev_priv(ndev);
1492 
1493 	db->dev = &pdev->dev;
1494 	db->ndev = ndev;
1495 
1496 	spin_lock_init(&db->lock);
1497 	mutex_init(&db->addr_lock);
1498 
1499 	INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
1500 
1501 	db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1502 	db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1503 	db->irq_res  = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1504 
1505 	if (db->addr_res == NULL || db->data_res == NULL ||
1506 	    db->irq_res == NULL) {
1507 		dev_err(db->dev, "insufficient resources\n");
1508 		ret = -ENOENT;
1509 		goto out;
1510 	}
1511 
1512 	db->irq_wake = platform_get_irq(pdev, 1);
1513 	if (db->irq_wake >= 0) {
1514 		dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
1515 
1516 		ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
1517 				  IRQF_SHARED, dev_name(db->dev), ndev);
1518 		if (ret) {
1519 			dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
1520 		} else {
1521 
1522 			/* test to see if irq is really wakeup capable */
1523 			ret = irq_set_irq_wake(db->irq_wake, 1);
1524 			if (ret) {
1525 				dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1526 					db->irq_wake, ret);
1527 				ret = 0;
1528 			} else {
1529 				irq_set_irq_wake(db->irq_wake, 0);
1530 				db->wake_supported = 1;
1531 			}
1532 		}
1533 	}
1534 
1535 	iosize = resource_size(db->addr_res);
1536 	db->addr_req = request_mem_region(db->addr_res->start, iosize,
1537 					  pdev->name);
1538 
1539 	if (db->addr_req == NULL) {
1540 		dev_err(db->dev, "cannot claim address reg area\n");
1541 		ret = -EIO;
1542 		goto out;
1543 	}
1544 
1545 	db->io_addr = ioremap(db->addr_res->start, iosize);
1546 
1547 	if (db->io_addr == NULL) {
1548 		dev_err(db->dev, "failed to ioremap address reg\n");
1549 		ret = -EINVAL;
1550 		goto out;
1551 	}
1552 
1553 	iosize = resource_size(db->data_res);
1554 	db->data_req = request_mem_region(db->data_res->start, iosize,
1555 					  pdev->name);
1556 
1557 	if (db->data_req == NULL) {
1558 		dev_err(db->dev, "cannot claim data reg area\n");
1559 		ret = -EIO;
1560 		goto out;
1561 	}
1562 
1563 	db->io_data = ioremap(db->data_res->start, iosize);
1564 
1565 	if (db->io_data == NULL) {
1566 		dev_err(db->dev, "failed to ioremap data reg\n");
1567 		ret = -EINVAL;
1568 		goto out;
1569 	}
1570 
1571 	/* fill in parameters for net-dev structure */
1572 	ndev->base_addr = (unsigned long)db->io_addr;
1573 	ndev->irq	= db->irq_res->start;
1574 
1575 	/* ensure at least we have a default set of IO routines */
1576 	dm9000_set_io(db, iosize);
1577 
1578 	/* check to see if anything is being over-ridden */
1579 	if (pdata != NULL) {
1580 		/* check to see if the driver wants to over-ride the
1581 		 * default IO width */
1582 
1583 		if (pdata->flags & DM9000_PLATF_8BITONLY)
1584 			dm9000_set_io(db, 1);
1585 
1586 		if (pdata->flags & DM9000_PLATF_16BITONLY)
1587 			dm9000_set_io(db, 2);
1588 
1589 		if (pdata->flags & DM9000_PLATF_32BITONLY)
1590 			dm9000_set_io(db, 4);
1591 
1592 		/* check to see if there are any IO routine
1593 		 * over-rides */
1594 
1595 		if (pdata->inblk != NULL)
1596 			db->inblk = pdata->inblk;
1597 
1598 		if (pdata->outblk != NULL)
1599 			db->outblk = pdata->outblk;
1600 
1601 		if (pdata->dumpblk != NULL)
1602 			db->dumpblk = pdata->dumpblk;
1603 
1604 		db->flags = pdata->flags;
1605 	}
1606 
1607 #ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
1608 	db->flags |= DM9000_PLATF_SIMPLE_PHY;
1609 #endif
1610 
1611 	dm9000_reset(db);
1612 
1613 	/* try multiple times, DM9000 sometimes gets the read wrong */
1614 	for (i = 0; i < 8; i++) {
1615 		id_val  = ior(db, DM9000_VIDL);
1616 		id_val |= (u32)ior(db, DM9000_VIDH) << 8;
1617 		id_val |= (u32)ior(db, DM9000_PIDL) << 16;
1618 		id_val |= (u32)ior(db, DM9000_PIDH) << 24;
1619 
1620 		if (id_val == DM9000_ID)
1621 			break;
1622 		dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
1623 	}
1624 
1625 	if (id_val != DM9000_ID) {
1626 		dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
1627 		ret = -ENODEV;
1628 		goto out;
1629 	}
1630 
1631 	/* Identify what type of DM9000 we are working on */
1632 
1633 	id_val = ior(db, DM9000_CHIPR);
1634 	dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
1635 
1636 	switch (id_val) {
1637 	case CHIPR_DM9000A:
1638 		db->type = TYPE_DM9000A;
1639 		break;
1640 	case CHIPR_DM9000B:
1641 		db->type = TYPE_DM9000B;
1642 		break;
1643 	default:
1644 		dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
1645 		db->type = TYPE_DM9000E;
1646 	}
1647 
1648 	/* dm9000a/b are capable of hardware checksum offload */
1649 	if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
1650 		ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
1651 		ndev->features |= ndev->hw_features;
1652 	}
1653 
1654 	/* from this point we assume that we have found a DM9000 */
1655 
1656 	ndev->netdev_ops	= &dm9000_netdev_ops;
1657 	ndev->watchdog_timeo	= msecs_to_jiffies(watchdog);
1658 	ndev->ethtool_ops	= &dm9000_ethtool_ops;
1659 
1660 	db->msg_enable       = NETIF_MSG_LINK;
1661 	db->mii.phy_id_mask  = 0x1f;
1662 	db->mii.reg_num_mask = 0x1f;
1663 	db->mii.force_media  = 0;
1664 	db->mii.full_duplex  = 0;
1665 	db->mii.dev	     = ndev;
1666 	db->mii.mdio_read    = dm9000_phy_read;
1667 	db->mii.mdio_write   = dm9000_phy_write;
1668 
1669 	mac_src = "eeprom";
1670 
1671 	/* try reading the node address from the attached EEPROM */
1672 	for (i = 0; i < 6; i += 2)
1673 		dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
1674 
1675 	if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1676 		mac_src = "platform data";
1677 		memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
1678 	}
1679 
1680 	if (!is_valid_ether_addr(ndev->dev_addr)) {
1681 		/* try reading from mac */
1682 
1683 		mac_src = "chip";
1684 		for (i = 0; i < 6; i++)
1685 			ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
1686 	}
1687 
1688 	if (!is_valid_ether_addr(ndev->dev_addr)) {
1689 		dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
1690 			 "set using ifconfig\n", ndev->name);
1691 
1692 		eth_hw_addr_random(ndev);
1693 		mac_src = "random";
1694 	}
1695 
1696 
1697 	platform_set_drvdata(pdev, ndev);
1698 	ret = register_netdev(ndev);
1699 
1700 	if (ret == 0)
1701 		printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
1702 		       ndev->name, dm9000_type_to_char(db->type),
1703 		       db->io_addr, db->io_data, ndev->irq,
1704 		       ndev->dev_addr, mac_src);
1705 	return 0;
1706 
1707 out:
1708 	dev_err(db->dev, "not found (%d).\n", ret);
1709 
1710 	dm9000_release_board(pdev, db);
1711 	free_netdev(ndev);
1712 
1713 	return ret;
1714 }
1715 
1716 static int
1717 dm9000_drv_suspend(struct device *dev)
1718 {
1719 	struct platform_device *pdev = to_platform_device(dev);
1720 	struct net_device *ndev = platform_get_drvdata(pdev);
1721 	struct board_info *db;
1722 
1723 	if (ndev) {
1724 		db = netdev_priv(ndev);
1725 		db->in_suspend = 1;
1726 
1727 		if (!netif_running(ndev))
1728 			return 0;
1729 
1730 		netif_device_detach(ndev);
1731 
1732 		/* only shutdown if not using WoL */
1733 		if (!db->wake_state)
1734 			dm9000_shutdown(ndev);
1735 	}
1736 	return 0;
1737 }
1738 
1739 static int
1740 dm9000_drv_resume(struct device *dev)
1741 {
1742 	struct platform_device *pdev = to_platform_device(dev);
1743 	struct net_device *ndev = platform_get_drvdata(pdev);
1744 	struct board_info *db = netdev_priv(ndev);
1745 
1746 	if (ndev) {
1747 		if (netif_running(ndev)) {
1748 			/* reset if we were not in wake mode to ensure if
1749 			 * the device was powered off it is in a known state */
1750 			if (!db->wake_state) {
1751 				dm9000_init_dm9000(ndev);
1752 				dm9000_unmask_interrupts(db);
1753 			}
1754 
1755 			netif_device_attach(ndev);
1756 		}
1757 
1758 		db->in_suspend = 0;
1759 	}
1760 	return 0;
1761 }
1762 
1763 static const struct dev_pm_ops dm9000_drv_pm_ops = {
1764 	.suspend	= dm9000_drv_suspend,
1765 	.resume		= dm9000_drv_resume,
1766 };
1767 
1768 static int
1769 dm9000_drv_remove(struct platform_device *pdev)
1770 {
1771 	struct net_device *ndev = platform_get_drvdata(pdev);
1772 
1773 	unregister_netdev(ndev);
1774 	dm9000_release_board(pdev, netdev_priv(ndev));
1775 	free_netdev(ndev);		/* free device structure */
1776 
1777 	dev_dbg(&pdev->dev, "released and freed device\n");
1778 	return 0;
1779 }
1780 
1781 #ifdef CONFIG_OF
1782 static const struct of_device_id dm9000_of_matches[] = {
1783 	{ .compatible = "davicom,dm9000", },
1784 	{ /* sentinel */ }
1785 };
1786 MODULE_DEVICE_TABLE(of, dm9000_of_matches);
1787 #endif
1788 
1789 static struct platform_driver dm9000_driver = {
1790 	.driver	= {
1791 		.name    = "dm9000",
1792 		.pm	 = &dm9000_drv_pm_ops,
1793 		.of_match_table = of_match_ptr(dm9000_of_matches),
1794 	},
1795 	.probe   = dm9000_probe,
1796 	.remove  = dm9000_drv_remove,
1797 };
1798 
1799 module_platform_driver(dm9000_driver);
1800 
1801 MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1802 MODULE_DESCRIPTION("Davicom DM9000 network driver");
1803 MODULE_LICENSE("GPL");
1804 MODULE_ALIAS("platform:dm9000");
1805