1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Davicom DM9000 Fast Ethernet driver for Linux. 4 * Copyright (C) 1997 Sten Wang 5 * 6 * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved. 7 * 8 * Additional updates, Copyright: 9 * Ben Dooks <ben@simtec.co.uk> 10 * Sascha Hauer <s.hauer@pengutronix.de> 11 */ 12 13 #include <linux/module.h> 14 #include <linux/ioport.h> 15 #include <linux/netdevice.h> 16 #include <linux/etherdevice.h> 17 #include <linux/interrupt.h> 18 #include <linux/skbuff.h> 19 #include <linux/spinlock.h> 20 #include <linux/crc32.h> 21 #include <linux/mii.h> 22 #include <linux/of.h> 23 #include <linux/of_net.h> 24 #include <linux/ethtool.h> 25 #include <linux/dm9000.h> 26 #include <linux/delay.h> 27 #include <linux/platform_device.h> 28 #include <linux/irq.h> 29 #include <linux/slab.h> 30 #include <linux/regulator/consumer.h> 31 #include <linux/gpio.h> 32 #include <linux/of_gpio.h> 33 34 #include <asm/delay.h> 35 #include <asm/irq.h> 36 #include <asm/io.h> 37 38 #include "dm9000.h" 39 40 /* Board/System/Debug information/definition ---------------- */ 41 42 #define DM9000_PHY 0x40 /* PHY address 0x01 */ 43 44 #define CARDNAME "dm9000" 45 46 /* 47 * Transmit timeout, default 5 seconds. 48 */ 49 static int watchdog = 5000; 50 module_param(watchdog, int, 0400); 51 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); 52 53 /* 54 * Debug messages level 55 */ 56 static int debug; 57 module_param(debug, int, 0644); 58 MODULE_PARM_DESC(debug, "dm9000 debug level (0-6)"); 59 60 /* DM9000 register address locking. 61 * 62 * The DM9000 uses an address register to control where data written 63 * to the data register goes. This means that the address register 64 * must be preserved over interrupts or similar calls. 65 * 66 * During interrupt and other critical calls, a spinlock is used to 67 * protect the system, but the calls themselves save the address 68 * in the address register in case they are interrupting another 69 * access to the device. 70 * 71 * For general accesses a lock is provided so that calls which are 72 * allowed to sleep are serialised so that the address register does 73 * not need to be saved. This lock also serves to serialise access 74 * to the EEPROM and PHY access registers which are shared between 75 * these two devices. 76 */ 77 78 /* The driver supports the original DM9000E, and now the two newer 79 * devices, DM9000A and DM9000B. 80 */ 81 82 enum dm9000_type { 83 TYPE_DM9000E, /* original DM9000 */ 84 TYPE_DM9000A, 85 TYPE_DM9000B 86 }; 87 88 /* Structure/enum declaration ------------------------------- */ 89 struct board_info { 90 91 void __iomem *io_addr; /* Register I/O base address */ 92 void __iomem *io_data; /* Data I/O address */ 93 u16 irq; /* IRQ */ 94 95 u16 tx_pkt_cnt; 96 u16 queue_pkt_len; 97 u16 queue_start_addr; 98 u16 queue_ip_summed; 99 u16 dbug_cnt; 100 u8 io_mode; /* 0:word, 2:byte */ 101 u8 phy_addr; 102 u8 imr_all; 103 104 unsigned int flags; 105 unsigned int in_timeout:1; 106 unsigned int in_suspend:1; 107 unsigned int wake_supported:1; 108 109 enum dm9000_type type; 110 111 void (*inblk)(void __iomem *port, void *data, int length); 112 void (*outblk)(void __iomem *port, void *data, int length); 113 void (*dumpblk)(void __iomem *port, int length); 114 115 struct device *dev; /* parent device */ 116 117 struct resource *addr_res; /* resources found */ 118 struct resource *data_res; 119 struct resource *addr_req; /* resources requested */ 120 struct resource *data_req; 121 122 int irq_wake; 123 124 struct mutex addr_lock; /* phy and eeprom access lock */ 125 126 struct delayed_work phy_poll; 127 struct net_device *ndev; 128 129 spinlock_t lock; 130 131 struct mii_if_info mii; 132 u32 msg_enable; 133 u32 wake_state; 134 135 int ip_summed; 136 }; 137 138 /* debug code */ 139 140 #define dm9000_dbg(db, lev, msg...) do { \ 141 if ((lev) < debug) { \ 142 dev_dbg(db->dev, msg); \ 143 } \ 144 } while (0) 145 146 static inline struct board_info *to_dm9000_board(struct net_device *dev) 147 { 148 return netdev_priv(dev); 149 } 150 151 /* DM9000 network board routine ---------------------------- */ 152 153 /* 154 * Read a byte from I/O port 155 */ 156 static u8 157 ior(struct board_info *db, int reg) 158 { 159 writeb(reg, db->io_addr); 160 return readb(db->io_data); 161 } 162 163 /* 164 * Write a byte to I/O port 165 */ 166 167 static void 168 iow(struct board_info *db, int reg, int value) 169 { 170 writeb(reg, db->io_addr); 171 writeb(value, db->io_data); 172 } 173 174 static void 175 dm9000_reset(struct board_info *db) 176 { 177 dev_dbg(db->dev, "resetting device\n"); 178 179 /* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29 180 * The essential point is that we have to do a double reset, and the 181 * instruction is to set LBK into MAC internal loopback mode. 182 */ 183 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK); 184 udelay(100); /* Application note says at least 20 us */ 185 if (ior(db, DM9000_NCR) & 1) 186 dev_err(db->dev, "dm9000 did not respond to first reset\n"); 187 188 iow(db, DM9000_NCR, 0); 189 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK); 190 udelay(100); 191 if (ior(db, DM9000_NCR) & 1) 192 dev_err(db->dev, "dm9000 did not respond to second reset\n"); 193 } 194 195 /* routines for sending block to chip */ 196 197 static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count) 198 { 199 iowrite8_rep(reg, data, count); 200 } 201 202 static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count) 203 { 204 iowrite16_rep(reg, data, (count+1) >> 1); 205 } 206 207 static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count) 208 { 209 iowrite32_rep(reg, data, (count+3) >> 2); 210 } 211 212 /* input block from chip to memory */ 213 214 static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count) 215 { 216 ioread8_rep(reg, data, count); 217 } 218 219 220 static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count) 221 { 222 ioread16_rep(reg, data, (count+1) >> 1); 223 } 224 225 static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count) 226 { 227 ioread32_rep(reg, data, (count+3) >> 2); 228 } 229 230 /* dump block from chip to null */ 231 232 static void dm9000_dumpblk_8bit(void __iomem *reg, int count) 233 { 234 int i; 235 236 for (i = 0; i < count; i++) 237 readb(reg); 238 } 239 240 static void dm9000_dumpblk_16bit(void __iomem *reg, int count) 241 { 242 int i; 243 244 count = (count + 1) >> 1; 245 246 for (i = 0; i < count; i++) 247 readw(reg); 248 } 249 250 static void dm9000_dumpblk_32bit(void __iomem *reg, int count) 251 { 252 int i; 253 254 count = (count + 3) >> 2; 255 256 for (i = 0; i < count; i++) 257 readl(reg); 258 } 259 260 /* 261 * Sleep, either by using msleep() or if we are suspending, then 262 * use mdelay() to sleep. 263 */ 264 static void dm9000_msleep(struct board_info *db, unsigned int ms) 265 { 266 if (db->in_suspend || db->in_timeout) 267 mdelay(ms); 268 else 269 msleep(ms); 270 } 271 272 /* Read a word from phyxcer */ 273 static int 274 dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) 275 { 276 struct board_info *db = netdev_priv(dev); 277 unsigned long flags; 278 unsigned int reg_save; 279 int ret; 280 281 mutex_lock(&db->addr_lock); 282 283 spin_lock_irqsave(&db->lock, flags); 284 285 /* Save previous register address */ 286 reg_save = readb(db->io_addr); 287 288 /* Fill the phyxcer register into REG_0C */ 289 iow(db, DM9000_EPAR, DM9000_PHY | reg); 290 291 /* Issue phyxcer read command */ 292 iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); 293 294 writeb(reg_save, db->io_addr); 295 spin_unlock_irqrestore(&db->lock, flags); 296 297 dm9000_msleep(db, 1); /* Wait read complete */ 298 299 spin_lock_irqsave(&db->lock, flags); 300 reg_save = readb(db->io_addr); 301 302 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ 303 304 /* The read data keeps on REG_0D & REG_0E */ 305 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); 306 307 /* restore the previous address */ 308 writeb(reg_save, db->io_addr); 309 spin_unlock_irqrestore(&db->lock, flags); 310 311 mutex_unlock(&db->addr_lock); 312 313 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); 314 return ret; 315 } 316 317 /* Write a word to phyxcer */ 318 static void 319 dm9000_phy_write(struct net_device *dev, 320 int phyaddr_unused, int reg, int value) 321 { 322 struct board_info *db = netdev_priv(dev); 323 unsigned long flags; 324 unsigned long reg_save; 325 326 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); 327 if (!db->in_timeout) 328 mutex_lock(&db->addr_lock); 329 330 spin_lock_irqsave(&db->lock, flags); 331 332 /* Save previous register address */ 333 reg_save = readb(db->io_addr); 334 335 /* Fill the phyxcer register into REG_0C */ 336 iow(db, DM9000_EPAR, DM9000_PHY | reg); 337 338 /* Fill the written data into REG_0D & REG_0E */ 339 iow(db, DM9000_EPDRL, value); 340 iow(db, DM9000_EPDRH, value >> 8); 341 342 /* Issue phyxcer write command */ 343 iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); 344 345 writeb(reg_save, db->io_addr); 346 spin_unlock_irqrestore(&db->lock, flags); 347 348 dm9000_msleep(db, 1); /* Wait write complete */ 349 350 spin_lock_irqsave(&db->lock, flags); 351 reg_save = readb(db->io_addr); 352 353 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ 354 355 /* restore the previous address */ 356 writeb(reg_save, db->io_addr); 357 358 spin_unlock_irqrestore(&db->lock, flags); 359 if (!db->in_timeout) 360 mutex_unlock(&db->addr_lock); 361 } 362 363 /* dm9000_set_io 364 * 365 * select the specified set of io routines to use with the 366 * device 367 */ 368 369 static void dm9000_set_io(struct board_info *db, int byte_width) 370 { 371 /* use the size of the data resource to work out what IO 372 * routines we want to use 373 */ 374 375 switch (byte_width) { 376 case 1: 377 db->dumpblk = dm9000_dumpblk_8bit; 378 db->outblk = dm9000_outblk_8bit; 379 db->inblk = dm9000_inblk_8bit; 380 break; 381 382 383 case 3: 384 dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n"); 385 fallthrough; 386 case 2: 387 db->dumpblk = dm9000_dumpblk_16bit; 388 db->outblk = dm9000_outblk_16bit; 389 db->inblk = dm9000_inblk_16bit; 390 break; 391 392 case 4: 393 default: 394 db->dumpblk = dm9000_dumpblk_32bit; 395 db->outblk = dm9000_outblk_32bit; 396 db->inblk = dm9000_inblk_32bit; 397 break; 398 } 399 } 400 401 static void dm9000_schedule_poll(struct board_info *db) 402 { 403 if (db->type == TYPE_DM9000E) 404 schedule_delayed_work(&db->phy_poll, HZ * 2); 405 } 406 407 static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 408 { 409 struct board_info *dm = to_dm9000_board(dev); 410 411 if (!netif_running(dev)) 412 return -EINVAL; 413 414 return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL); 415 } 416 417 static unsigned int 418 dm9000_read_locked(struct board_info *db, int reg) 419 { 420 unsigned long flags; 421 unsigned int ret; 422 423 spin_lock_irqsave(&db->lock, flags); 424 ret = ior(db, reg); 425 spin_unlock_irqrestore(&db->lock, flags); 426 427 return ret; 428 } 429 430 static int dm9000_wait_eeprom(struct board_info *db) 431 { 432 unsigned int status; 433 int timeout = 8; /* wait max 8msec */ 434 435 /* The DM9000 data sheets say we should be able to 436 * poll the ERRE bit in EPCR to wait for the EEPROM 437 * operation. From testing several chips, this bit 438 * does not seem to work. 439 * 440 * We attempt to use the bit, but fall back to the 441 * timeout (which is why we do not return an error 442 * on expiry) to say that the EEPROM operation has 443 * completed. 444 */ 445 446 while (1) { 447 status = dm9000_read_locked(db, DM9000_EPCR); 448 449 if ((status & EPCR_ERRE) == 0) 450 break; 451 452 msleep(1); 453 454 if (timeout-- < 0) { 455 dev_dbg(db->dev, "timeout waiting EEPROM\n"); 456 break; 457 } 458 } 459 460 return 0; 461 } 462 463 /* 464 * Read a word data from EEPROM 465 */ 466 static void 467 dm9000_read_eeprom(struct board_info *db, int offset, u8 *to) 468 { 469 unsigned long flags; 470 471 if (db->flags & DM9000_PLATF_NO_EEPROM) { 472 to[0] = 0xff; 473 to[1] = 0xff; 474 return; 475 } 476 477 mutex_lock(&db->addr_lock); 478 479 spin_lock_irqsave(&db->lock, flags); 480 481 iow(db, DM9000_EPAR, offset); 482 iow(db, DM9000_EPCR, EPCR_ERPRR); 483 484 spin_unlock_irqrestore(&db->lock, flags); 485 486 dm9000_wait_eeprom(db); 487 488 /* delay for at-least 150uS */ 489 msleep(1); 490 491 spin_lock_irqsave(&db->lock, flags); 492 493 iow(db, DM9000_EPCR, 0x0); 494 495 to[0] = ior(db, DM9000_EPDRL); 496 to[1] = ior(db, DM9000_EPDRH); 497 498 spin_unlock_irqrestore(&db->lock, flags); 499 500 mutex_unlock(&db->addr_lock); 501 } 502 503 /* 504 * Write a word data to SROM 505 */ 506 static void 507 dm9000_write_eeprom(struct board_info *db, int offset, u8 *data) 508 { 509 unsigned long flags; 510 511 if (db->flags & DM9000_PLATF_NO_EEPROM) 512 return; 513 514 mutex_lock(&db->addr_lock); 515 516 spin_lock_irqsave(&db->lock, flags); 517 iow(db, DM9000_EPAR, offset); 518 iow(db, DM9000_EPDRH, data[1]); 519 iow(db, DM9000_EPDRL, data[0]); 520 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW); 521 spin_unlock_irqrestore(&db->lock, flags); 522 523 dm9000_wait_eeprom(db); 524 525 mdelay(1); /* wait at least 150uS to clear */ 526 527 spin_lock_irqsave(&db->lock, flags); 528 iow(db, DM9000_EPCR, 0); 529 spin_unlock_irqrestore(&db->lock, flags); 530 531 mutex_unlock(&db->addr_lock); 532 } 533 534 /* ethtool ops */ 535 536 static void dm9000_get_drvinfo(struct net_device *dev, 537 struct ethtool_drvinfo *info) 538 { 539 struct board_info *dm = to_dm9000_board(dev); 540 541 strlcpy(info->driver, CARDNAME, sizeof(info->driver)); 542 strlcpy(info->bus_info, to_platform_device(dm->dev)->name, 543 sizeof(info->bus_info)); 544 } 545 546 static u32 dm9000_get_msglevel(struct net_device *dev) 547 { 548 struct board_info *dm = to_dm9000_board(dev); 549 550 return dm->msg_enable; 551 } 552 553 static void dm9000_set_msglevel(struct net_device *dev, u32 value) 554 { 555 struct board_info *dm = to_dm9000_board(dev); 556 557 dm->msg_enable = value; 558 } 559 560 static int dm9000_get_link_ksettings(struct net_device *dev, 561 struct ethtool_link_ksettings *cmd) 562 { 563 struct board_info *dm = to_dm9000_board(dev); 564 565 mii_ethtool_get_link_ksettings(&dm->mii, cmd); 566 return 0; 567 } 568 569 static int dm9000_set_link_ksettings(struct net_device *dev, 570 const struct ethtool_link_ksettings *cmd) 571 { 572 struct board_info *dm = to_dm9000_board(dev); 573 574 return mii_ethtool_set_link_ksettings(&dm->mii, cmd); 575 } 576 577 static int dm9000_nway_reset(struct net_device *dev) 578 { 579 struct board_info *dm = to_dm9000_board(dev); 580 return mii_nway_restart(&dm->mii); 581 } 582 583 static int dm9000_set_features(struct net_device *dev, 584 netdev_features_t features) 585 { 586 struct board_info *dm = to_dm9000_board(dev); 587 netdev_features_t changed = dev->features ^ features; 588 unsigned long flags; 589 590 if (!(changed & NETIF_F_RXCSUM)) 591 return 0; 592 593 spin_lock_irqsave(&dm->lock, flags); 594 iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0); 595 spin_unlock_irqrestore(&dm->lock, flags); 596 597 return 0; 598 } 599 600 static u32 dm9000_get_link(struct net_device *dev) 601 { 602 struct board_info *dm = to_dm9000_board(dev); 603 u32 ret; 604 605 if (dm->flags & DM9000_PLATF_EXT_PHY) 606 ret = mii_link_ok(&dm->mii); 607 else 608 ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0; 609 610 return ret; 611 } 612 613 #define DM_EEPROM_MAGIC (0x444D394B) 614 615 static int dm9000_get_eeprom_len(struct net_device *dev) 616 { 617 return 128; 618 } 619 620 static int dm9000_get_eeprom(struct net_device *dev, 621 struct ethtool_eeprom *ee, u8 *data) 622 { 623 struct board_info *dm = to_dm9000_board(dev); 624 int offset = ee->offset; 625 int len = ee->len; 626 int i; 627 628 /* EEPROM access is aligned to two bytes */ 629 630 if ((len & 1) != 0 || (offset & 1) != 0) 631 return -EINVAL; 632 633 if (dm->flags & DM9000_PLATF_NO_EEPROM) 634 return -ENOENT; 635 636 ee->magic = DM_EEPROM_MAGIC; 637 638 for (i = 0; i < len; i += 2) 639 dm9000_read_eeprom(dm, (offset + i) / 2, data + i); 640 641 return 0; 642 } 643 644 static int dm9000_set_eeprom(struct net_device *dev, 645 struct ethtool_eeprom *ee, u8 *data) 646 { 647 struct board_info *dm = to_dm9000_board(dev); 648 int offset = ee->offset; 649 int len = ee->len; 650 int done; 651 652 /* EEPROM access is aligned to two bytes */ 653 654 if (dm->flags & DM9000_PLATF_NO_EEPROM) 655 return -ENOENT; 656 657 if (ee->magic != DM_EEPROM_MAGIC) 658 return -EINVAL; 659 660 while (len > 0) { 661 if (len & 1 || offset & 1) { 662 int which = offset & 1; 663 u8 tmp[2]; 664 665 dm9000_read_eeprom(dm, offset / 2, tmp); 666 tmp[which] = *data; 667 dm9000_write_eeprom(dm, offset / 2, tmp); 668 669 done = 1; 670 } else { 671 dm9000_write_eeprom(dm, offset / 2, data); 672 done = 2; 673 } 674 675 data += done; 676 offset += done; 677 len -= done; 678 } 679 680 return 0; 681 } 682 683 static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) 684 { 685 struct board_info *dm = to_dm9000_board(dev); 686 687 memset(w, 0, sizeof(struct ethtool_wolinfo)); 688 689 /* note, we could probably support wake-phy too */ 690 w->supported = dm->wake_supported ? WAKE_MAGIC : 0; 691 w->wolopts = dm->wake_state; 692 } 693 694 static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) 695 { 696 struct board_info *dm = to_dm9000_board(dev); 697 unsigned long flags; 698 u32 opts = w->wolopts; 699 u32 wcr = 0; 700 701 if (!dm->wake_supported) 702 return -EOPNOTSUPP; 703 704 if (opts & ~WAKE_MAGIC) 705 return -EINVAL; 706 707 if (opts & WAKE_MAGIC) 708 wcr |= WCR_MAGICEN; 709 710 mutex_lock(&dm->addr_lock); 711 712 spin_lock_irqsave(&dm->lock, flags); 713 iow(dm, DM9000_WCR, wcr); 714 spin_unlock_irqrestore(&dm->lock, flags); 715 716 mutex_unlock(&dm->addr_lock); 717 718 if (dm->wake_state != opts) { 719 /* change in wol state, update IRQ state */ 720 721 if (!dm->wake_state) 722 irq_set_irq_wake(dm->irq_wake, 1); 723 else if (dm->wake_state && !opts) 724 irq_set_irq_wake(dm->irq_wake, 0); 725 } 726 727 dm->wake_state = opts; 728 return 0; 729 } 730 731 static const struct ethtool_ops dm9000_ethtool_ops = { 732 .get_drvinfo = dm9000_get_drvinfo, 733 .get_msglevel = dm9000_get_msglevel, 734 .set_msglevel = dm9000_set_msglevel, 735 .nway_reset = dm9000_nway_reset, 736 .get_link = dm9000_get_link, 737 .get_wol = dm9000_get_wol, 738 .set_wol = dm9000_set_wol, 739 .get_eeprom_len = dm9000_get_eeprom_len, 740 .get_eeprom = dm9000_get_eeprom, 741 .set_eeprom = dm9000_set_eeprom, 742 .get_link_ksettings = dm9000_get_link_ksettings, 743 .set_link_ksettings = dm9000_set_link_ksettings, 744 }; 745 746 static void dm9000_show_carrier(struct board_info *db, 747 unsigned carrier, unsigned nsr) 748 { 749 int lpa; 750 struct net_device *ndev = db->ndev; 751 struct mii_if_info *mii = &db->mii; 752 unsigned ncr = dm9000_read_locked(db, DM9000_NCR); 753 754 if (carrier) { 755 lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA); 756 dev_info(db->dev, 757 "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n", 758 ndev->name, (nsr & NSR_SPEED) ? 10 : 100, 759 (ncr & NCR_FDX) ? "full" : "half", lpa); 760 } else { 761 dev_info(db->dev, "%s: link down\n", ndev->name); 762 } 763 } 764 765 static void 766 dm9000_poll_work(struct work_struct *w) 767 { 768 struct delayed_work *dw = to_delayed_work(w); 769 struct board_info *db = container_of(dw, struct board_info, phy_poll); 770 struct net_device *ndev = db->ndev; 771 772 if (db->flags & DM9000_PLATF_SIMPLE_PHY && 773 !(db->flags & DM9000_PLATF_EXT_PHY)) { 774 unsigned nsr = dm9000_read_locked(db, DM9000_NSR); 775 unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0; 776 unsigned new_carrier; 777 778 new_carrier = (nsr & NSR_LINKST) ? 1 : 0; 779 780 if (old_carrier != new_carrier) { 781 if (netif_msg_link(db)) 782 dm9000_show_carrier(db, new_carrier, nsr); 783 784 if (!new_carrier) 785 netif_carrier_off(ndev); 786 else 787 netif_carrier_on(ndev); 788 } 789 } else 790 mii_check_media(&db->mii, netif_msg_link(db), 0); 791 792 if (netif_running(ndev)) 793 dm9000_schedule_poll(db); 794 } 795 796 /* dm9000_release_board 797 * 798 * release a board, and any mapped resources 799 */ 800 801 static void 802 dm9000_release_board(struct platform_device *pdev, struct board_info *db) 803 { 804 /* unmap our resources */ 805 806 iounmap(db->io_addr); 807 iounmap(db->io_data); 808 809 /* release the resources */ 810 811 if (db->data_req) 812 release_resource(db->data_req); 813 kfree(db->data_req); 814 815 if (db->addr_req) 816 release_resource(db->addr_req); 817 kfree(db->addr_req); 818 } 819 820 static unsigned char dm9000_type_to_char(enum dm9000_type type) 821 { 822 switch (type) { 823 case TYPE_DM9000E: return 'e'; 824 case TYPE_DM9000A: return 'a'; 825 case TYPE_DM9000B: return 'b'; 826 } 827 828 return '?'; 829 } 830 831 /* 832 * Set DM9000 multicast address 833 */ 834 static void 835 dm9000_hash_table_unlocked(struct net_device *dev) 836 { 837 struct board_info *db = netdev_priv(dev); 838 struct netdev_hw_addr *ha; 839 int i, oft; 840 u32 hash_val; 841 u16 hash_table[4] = { 0, 0, 0, 0x8000 }; /* broadcast address */ 842 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN; 843 844 dm9000_dbg(db, 1, "entering %s\n", __func__); 845 846 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++) 847 iow(db, oft, dev->dev_addr[i]); 848 849 if (dev->flags & IFF_PROMISC) 850 rcr |= RCR_PRMSC; 851 852 if (dev->flags & IFF_ALLMULTI) 853 rcr |= RCR_ALL; 854 855 /* the multicast address in Hash Table : 64 bits */ 856 netdev_for_each_mc_addr(ha, dev) { 857 hash_val = ether_crc_le(6, ha->addr) & 0x3f; 858 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); 859 } 860 861 /* Write the hash table to MAC MD table */ 862 for (i = 0, oft = DM9000_MAR; i < 4; i++) { 863 iow(db, oft++, hash_table[i]); 864 iow(db, oft++, hash_table[i] >> 8); 865 } 866 867 iow(db, DM9000_RCR, rcr); 868 } 869 870 static void 871 dm9000_hash_table(struct net_device *dev) 872 { 873 struct board_info *db = netdev_priv(dev); 874 unsigned long flags; 875 876 spin_lock_irqsave(&db->lock, flags); 877 dm9000_hash_table_unlocked(dev); 878 spin_unlock_irqrestore(&db->lock, flags); 879 } 880 881 static void 882 dm9000_mask_interrupts(struct board_info *db) 883 { 884 iow(db, DM9000_IMR, IMR_PAR); 885 } 886 887 static void 888 dm9000_unmask_interrupts(struct board_info *db) 889 { 890 iow(db, DM9000_IMR, db->imr_all); 891 } 892 893 /* 894 * Initialize dm9000 board 895 */ 896 static void 897 dm9000_init_dm9000(struct net_device *dev) 898 { 899 struct board_info *db = netdev_priv(dev); 900 unsigned int imr; 901 unsigned int ncr; 902 903 dm9000_dbg(db, 1, "entering %s\n", __func__); 904 905 dm9000_reset(db); 906 dm9000_mask_interrupts(db); 907 908 /* I/O mode */ 909 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */ 910 911 /* Checksum mode */ 912 if (dev->hw_features & NETIF_F_RXCSUM) 913 iow(db, DM9000_RCSR, 914 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0); 915 916 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 917 iow(db, DM9000_GPR, 0); 918 919 /* If we are dealing with DM9000B, some extra steps are required: a 920 * manual phy reset, and setting init params. 921 */ 922 if (db->type == TYPE_DM9000B) { 923 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); 924 dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); 925 } 926 927 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; 928 929 /* if wol is needed, then always set NCR_WAKEEN otherwise we end 930 * up dumping the wake events if we disable this. There is already 931 * a wake-mask in DM9000_WCR */ 932 if (db->wake_supported) 933 ncr |= NCR_WAKEEN; 934 935 iow(db, DM9000_NCR, ncr); 936 937 /* Program operating register */ 938 iow(db, DM9000_TCR, 0); /* TX Polling clear */ 939 iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */ 940 iow(db, DM9000_FCR, 0xff); /* Flow Control */ 941 iow(db, DM9000_SMCR, 0); /* Special Mode */ 942 /* clear TX status */ 943 iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END); 944 iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */ 945 946 /* Set address filter table */ 947 dm9000_hash_table_unlocked(dev); 948 949 imr = IMR_PAR | IMR_PTM | IMR_PRM; 950 if (db->type != TYPE_DM9000E) 951 imr |= IMR_LNKCHNG; 952 953 db->imr_all = imr; 954 955 /* Init Driver variable */ 956 db->tx_pkt_cnt = 0; 957 db->queue_pkt_len = 0; 958 netif_trans_update(dev); 959 } 960 961 /* Our watchdog timed out. Called by the networking layer */ 962 static void dm9000_timeout(struct net_device *dev, unsigned int txqueue) 963 { 964 struct board_info *db = netdev_priv(dev); 965 u8 reg_save; 966 unsigned long flags; 967 968 /* Save previous register address */ 969 spin_lock_irqsave(&db->lock, flags); 970 db->in_timeout = 1; 971 reg_save = readb(db->io_addr); 972 973 netif_stop_queue(dev); 974 dm9000_init_dm9000(dev); 975 dm9000_unmask_interrupts(db); 976 /* We can accept TX packets again */ 977 netif_trans_update(dev); /* prevent tx timeout */ 978 netif_wake_queue(dev); 979 980 /* Restore previous register address */ 981 writeb(reg_save, db->io_addr); 982 db->in_timeout = 0; 983 spin_unlock_irqrestore(&db->lock, flags); 984 } 985 986 static void dm9000_send_packet(struct net_device *dev, 987 int ip_summed, 988 u16 pkt_len) 989 { 990 struct board_info *dm = to_dm9000_board(dev); 991 992 /* The DM9000 is not smart enough to leave fragmented packets alone. */ 993 if (dm->ip_summed != ip_summed) { 994 if (ip_summed == CHECKSUM_NONE) 995 iow(dm, DM9000_TCCR, 0); 996 else 997 iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP); 998 dm->ip_summed = ip_summed; 999 } 1000 1001 /* Set TX length to DM9000 */ 1002 iow(dm, DM9000_TXPLL, pkt_len); 1003 iow(dm, DM9000_TXPLH, pkt_len >> 8); 1004 1005 /* Issue TX polling command */ 1006 iow(dm, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */ 1007 } 1008 1009 /* 1010 * Hardware start transmission. 1011 * Send a packet to media from the upper layer. 1012 */ 1013 static int 1014 dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev) 1015 { 1016 unsigned long flags; 1017 struct board_info *db = netdev_priv(dev); 1018 1019 dm9000_dbg(db, 3, "%s:\n", __func__); 1020 1021 if (db->tx_pkt_cnt > 1) 1022 return NETDEV_TX_BUSY; 1023 1024 spin_lock_irqsave(&db->lock, flags); 1025 1026 /* Move data to DM9000 TX RAM */ 1027 writeb(DM9000_MWCMD, db->io_addr); 1028 1029 (db->outblk)(db->io_data, skb->data, skb->len); 1030 dev->stats.tx_bytes += skb->len; 1031 1032 db->tx_pkt_cnt++; 1033 /* TX control: First packet immediately send, second packet queue */ 1034 if (db->tx_pkt_cnt == 1) { 1035 dm9000_send_packet(dev, skb->ip_summed, skb->len); 1036 } else { 1037 /* Second packet */ 1038 db->queue_pkt_len = skb->len; 1039 db->queue_ip_summed = skb->ip_summed; 1040 netif_stop_queue(dev); 1041 } 1042 1043 spin_unlock_irqrestore(&db->lock, flags); 1044 1045 /* free this SKB */ 1046 dev_consume_skb_any(skb); 1047 1048 return NETDEV_TX_OK; 1049 } 1050 1051 /* 1052 * DM9000 interrupt handler 1053 * receive the packet to upper layer, free the transmitted packet 1054 */ 1055 1056 static void dm9000_tx_done(struct net_device *dev, struct board_info *db) 1057 { 1058 int tx_status = ior(db, DM9000_NSR); /* Got TX status */ 1059 1060 if (tx_status & (NSR_TX2END | NSR_TX1END)) { 1061 /* One packet sent complete */ 1062 db->tx_pkt_cnt--; 1063 dev->stats.tx_packets++; 1064 1065 if (netif_msg_tx_done(db)) 1066 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status); 1067 1068 /* Queue packet check & send */ 1069 if (db->tx_pkt_cnt > 0) 1070 dm9000_send_packet(dev, db->queue_ip_summed, 1071 db->queue_pkt_len); 1072 netif_wake_queue(dev); 1073 } 1074 } 1075 1076 struct dm9000_rxhdr { 1077 u8 RxPktReady; 1078 u8 RxStatus; 1079 __le16 RxLen; 1080 } __packed; 1081 1082 /* 1083 * Received a packet and pass to upper layer 1084 */ 1085 static void 1086 dm9000_rx(struct net_device *dev) 1087 { 1088 struct board_info *db = netdev_priv(dev); 1089 struct dm9000_rxhdr rxhdr; 1090 struct sk_buff *skb; 1091 u8 rxbyte, *rdptr; 1092 bool GoodPacket; 1093 int RxLen; 1094 1095 /* Check packet ready or not */ 1096 do { 1097 ior(db, DM9000_MRCMDX); /* Dummy read */ 1098 1099 /* Get most updated data */ 1100 rxbyte = readb(db->io_data); 1101 1102 /* Status check: this byte must be 0 or 1 */ 1103 if (rxbyte & DM9000_PKT_ERR) { 1104 dev_warn(db->dev, "status check fail: %d\n", rxbyte); 1105 iow(db, DM9000_RCR, 0x00); /* Stop Device */ 1106 return; 1107 } 1108 1109 if (!(rxbyte & DM9000_PKT_RDY)) 1110 return; 1111 1112 /* A packet ready now & Get status/length */ 1113 GoodPacket = true; 1114 writeb(DM9000_MRCMD, db->io_addr); 1115 1116 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr)); 1117 1118 RxLen = le16_to_cpu(rxhdr.RxLen); 1119 1120 if (netif_msg_rx_status(db)) 1121 dev_dbg(db->dev, "RX: status %02x, length %04x\n", 1122 rxhdr.RxStatus, RxLen); 1123 1124 /* Packet Status check */ 1125 if (RxLen < 0x40) { 1126 GoodPacket = false; 1127 if (netif_msg_rx_err(db)) 1128 dev_dbg(db->dev, "RX: Bad Packet (runt)\n"); 1129 } 1130 1131 if (RxLen > DM9000_PKT_MAX) { 1132 dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen); 1133 } 1134 1135 /* rxhdr.RxStatus is identical to RSR register. */ 1136 if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE | 1137 RSR_PLE | RSR_RWTO | 1138 RSR_LCS | RSR_RF)) { 1139 GoodPacket = false; 1140 if (rxhdr.RxStatus & RSR_FOE) { 1141 if (netif_msg_rx_err(db)) 1142 dev_dbg(db->dev, "fifo error\n"); 1143 dev->stats.rx_fifo_errors++; 1144 } 1145 if (rxhdr.RxStatus & RSR_CE) { 1146 if (netif_msg_rx_err(db)) 1147 dev_dbg(db->dev, "crc error\n"); 1148 dev->stats.rx_crc_errors++; 1149 } 1150 if (rxhdr.RxStatus & RSR_RF) { 1151 if (netif_msg_rx_err(db)) 1152 dev_dbg(db->dev, "length error\n"); 1153 dev->stats.rx_length_errors++; 1154 } 1155 } 1156 1157 /* Move data from DM9000 */ 1158 if (GoodPacket && 1159 ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) { 1160 skb_reserve(skb, 2); 1161 rdptr = skb_put(skb, RxLen - 4); 1162 1163 /* Read received packet from RX SRAM */ 1164 1165 (db->inblk)(db->io_data, rdptr, RxLen); 1166 dev->stats.rx_bytes += RxLen; 1167 1168 /* Pass to upper layer */ 1169 skb->protocol = eth_type_trans(skb, dev); 1170 if (dev->features & NETIF_F_RXCSUM) { 1171 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0) 1172 skb->ip_summed = CHECKSUM_UNNECESSARY; 1173 else 1174 skb_checksum_none_assert(skb); 1175 } 1176 netif_rx(skb); 1177 dev->stats.rx_packets++; 1178 1179 } else { 1180 /* need to dump the packet's data */ 1181 1182 (db->dumpblk)(db->io_data, RxLen); 1183 } 1184 } while (rxbyte & DM9000_PKT_RDY); 1185 } 1186 1187 static irqreturn_t dm9000_interrupt(int irq, void *dev_id) 1188 { 1189 struct net_device *dev = dev_id; 1190 struct board_info *db = netdev_priv(dev); 1191 int int_status; 1192 unsigned long flags; 1193 u8 reg_save; 1194 1195 dm9000_dbg(db, 3, "entering %s\n", __func__); 1196 1197 /* A real interrupt coming */ 1198 1199 /* holders of db->lock must always block IRQs */ 1200 spin_lock_irqsave(&db->lock, flags); 1201 1202 /* Save previous register address */ 1203 reg_save = readb(db->io_addr); 1204 1205 dm9000_mask_interrupts(db); 1206 /* Got DM9000 interrupt status */ 1207 int_status = ior(db, DM9000_ISR); /* Got ISR */ 1208 iow(db, DM9000_ISR, int_status); /* Clear ISR status */ 1209 1210 if (netif_msg_intr(db)) 1211 dev_dbg(db->dev, "interrupt status %02x\n", int_status); 1212 1213 /* Received the coming packet */ 1214 if (int_status & ISR_PRS) 1215 dm9000_rx(dev); 1216 1217 /* Transmit Interrupt check */ 1218 if (int_status & ISR_PTS) 1219 dm9000_tx_done(dev, db); 1220 1221 if (db->type != TYPE_DM9000E) { 1222 if (int_status & ISR_LNKCHNG) { 1223 /* fire a link-change request */ 1224 schedule_delayed_work(&db->phy_poll, 1); 1225 } 1226 } 1227 1228 dm9000_unmask_interrupts(db); 1229 /* Restore previous register address */ 1230 writeb(reg_save, db->io_addr); 1231 1232 spin_unlock_irqrestore(&db->lock, flags); 1233 1234 return IRQ_HANDLED; 1235 } 1236 1237 static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id) 1238 { 1239 struct net_device *dev = dev_id; 1240 struct board_info *db = netdev_priv(dev); 1241 unsigned long flags; 1242 unsigned nsr, wcr; 1243 1244 spin_lock_irqsave(&db->lock, flags); 1245 1246 nsr = ior(db, DM9000_NSR); 1247 wcr = ior(db, DM9000_WCR); 1248 1249 dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr); 1250 1251 if (nsr & NSR_WAKEST) { 1252 /* clear, so we can avoid */ 1253 iow(db, DM9000_NSR, NSR_WAKEST); 1254 1255 if (wcr & WCR_LINKST) 1256 dev_info(db->dev, "wake by link status change\n"); 1257 if (wcr & WCR_SAMPLEST) 1258 dev_info(db->dev, "wake by sample packet\n"); 1259 if (wcr & WCR_MAGICST) 1260 dev_info(db->dev, "wake by magic packet\n"); 1261 if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST))) 1262 dev_err(db->dev, "wake signalled with no reason? " 1263 "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr); 1264 } 1265 1266 spin_unlock_irqrestore(&db->lock, flags); 1267 1268 return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE; 1269 } 1270 1271 #ifdef CONFIG_NET_POLL_CONTROLLER 1272 /* 1273 *Used by netconsole 1274 */ 1275 static void dm9000_poll_controller(struct net_device *dev) 1276 { 1277 disable_irq(dev->irq); 1278 dm9000_interrupt(dev->irq, dev); 1279 enable_irq(dev->irq); 1280 } 1281 #endif 1282 1283 /* 1284 * Open the interface. 1285 * The interface is opened whenever "ifconfig" actives it. 1286 */ 1287 static int 1288 dm9000_open(struct net_device *dev) 1289 { 1290 struct board_info *db = netdev_priv(dev); 1291 unsigned int irq_flags = irq_get_trigger_type(dev->irq); 1292 1293 if (netif_msg_ifup(db)) 1294 dev_dbg(db->dev, "enabling %s\n", dev->name); 1295 1296 /* If there is no IRQ type specified, tell the user that this is a 1297 * problem 1298 */ 1299 if (irq_flags == IRQF_TRIGGER_NONE) 1300 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); 1301 1302 irq_flags |= IRQF_SHARED; 1303 1304 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ 1305 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ 1306 mdelay(1); /* delay needs by DM9000B */ 1307 1308 /* Initialize DM9000 board */ 1309 dm9000_init_dm9000(dev); 1310 1311 if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev)) 1312 return -EAGAIN; 1313 /* Now that we have an interrupt handler hooked up we can unmask 1314 * our interrupts 1315 */ 1316 dm9000_unmask_interrupts(db); 1317 1318 /* Init driver variable */ 1319 db->dbug_cnt = 0; 1320 1321 mii_check_media(&db->mii, netif_msg_link(db), 1); 1322 netif_start_queue(dev); 1323 1324 /* Poll initial link status */ 1325 schedule_delayed_work(&db->phy_poll, 1); 1326 1327 return 0; 1328 } 1329 1330 static void 1331 dm9000_shutdown(struct net_device *dev) 1332 { 1333 struct board_info *db = netdev_priv(dev); 1334 1335 /* RESET device */ 1336 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ 1337 iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */ 1338 dm9000_mask_interrupts(db); 1339 iow(db, DM9000_RCR, 0x00); /* Disable RX */ 1340 } 1341 1342 /* 1343 * Stop the interface. 1344 * The interface is stopped when it is brought. 1345 */ 1346 static int 1347 dm9000_stop(struct net_device *ndev) 1348 { 1349 struct board_info *db = netdev_priv(ndev); 1350 1351 if (netif_msg_ifdown(db)) 1352 dev_dbg(db->dev, "shutting down %s\n", ndev->name); 1353 1354 cancel_delayed_work_sync(&db->phy_poll); 1355 1356 netif_stop_queue(ndev); 1357 netif_carrier_off(ndev); 1358 1359 /* free interrupt */ 1360 free_irq(ndev->irq, ndev); 1361 1362 dm9000_shutdown(ndev); 1363 1364 return 0; 1365 } 1366 1367 static const struct net_device_ops dm9000_netdev_ops = { 1368 .ndo_open = dm9000_open, 1369 .ndo_stop = dm9000_stop, 1370 .ndo_start_xmit = dm9000_start_xmit, 1371 .ndo_tx_timeout = dm9000_timeout, 1372 .ndo_set_rx_mode = dm9000_hash_table, 1373 .ndo_do_ioctl = dm9000_ioctl, 1374 .ndo_set_features = dm9000_set_features, 1375 .ndo_validate_addr = eth_validate_addr, 1376 .ndo_set_mac_address = eth_mac_addr, 1377 #ifdef CONFIG_NET_POLL_CONTROLLER 1378 .ndo_poll_controller = dm9000_poll_controller, 1379 #endif 1380 }; 1381 1382 static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) 1383 { 1384 struct dm9000_plat_data *pdata; 1385 struct device_node *np = dev->of_node; 1386 const void *mac_addr; 1387 1388 if (!IS_ENABLED(CONFIG_OF) || !np) 1389 return ERR_PTR(-ENXIO); 1390 1391 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 1392 if (!pdata) 1393 return ERR_PTR(-ENOMEM); 1394 1395 if (of_find_property(np, "davicom,ext-phy", NULL)) 1396 pdata->flags |= DM9000_PLATF_EXT_PHY; 1397 if (of_find_property(np, "davicom,no-eeprom", NULL)) 1398 pdata->flags |= DM9000_PLATF_NO_EEPROM; 1399 1400 mac_addr = of_get_mac_address(np); 1401 if (!IS_ERR(mac_addr)) 1402 ether_addr_copy(pdata->dev_addr, mac_addr); 1403 else if (PTR_ERR(mac_addr) == -EPROBE_DEFER) 1404 return ERR_CAST(mac_addr); 1405 1406 return pdata; 1407 } 1408 1409 /* 1410 * Search DM9000 board, allocate space and register it 1411 */ 1412 static int 1413 dm9000_probe(struct platform_device *pdev) 1414 { 1415 struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev); 1416 struct board_info *db; /* Point a board information structure */ 1417 struct net_device *ndev; 1418 struct device *dev = &pdev->dev; 1419 const unsigned char *mac_src; 1420 int ret = 0; 1421 int iosize; 1422 int i; 1423 u32 id_val; 1424 int reset_gpios; 1425 enum of_gpio_flags flags; 1426 struct regulator *power; 1427 bool inv_mac_addr = false; 1428 1429 power = devm_regulator_get(dev, "vcc"); 1430 if (IS_ERR(power)) { 1431 if (PTR_ERR(power) == -EPROBE_DEFER) 1432 return -EPROBE_DEFER; 1433 dev_dbg(dev, "no regulator provided\n"); 1434 } else { 1435 ret = regulator_enable(power); 1436 if (ret != 0) { 1437 dev_err(dev, 1438 "Failed to enable power regulator: %d\n", ret); 1439 return ret; 1440 } 1441 dev_dbg(dev, "regulator enabled\n"); 1442 } 1443 1444 reset_gpios = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0, 1445 &flags); 1446 if (gpio_is_valid(reset_gpios)) { 1447 ret = devm_gpio_request_one(dev, reset_gpios, flags, 1448 "dm9000_reset"); 1449 if (ret) { 1450 dev_err(dev, "failed to request reset gpio %d: %d\n", 1451 reset_gpios, ret); 1452 return -ENODEV; 1453 } 1454 1455 /* According to manual PWRST# Low Period Min 1ms */ 1456 msleep(2); 1457 gpio_set_value(reset_gpios, 1); 1458 /* Needs 3ms to read eeprom when PWRST is deasserted */ 1459 msleep(4); 1460 } 1461 1462 if (!pdata) { 1463 pdata = dm9000_parse_dt(&pdev->dev); 1464 if (IS_ERR(pdata)) 1465 return PTR_ERR(pdata); 1466 } 1467 1468 /* Init network device */ 1469 ndev = alloc_etherdev(sizeof(struct board_info)); 1470 if (!ndev) 1471 return -ENOMEM; 1472 1473 SET_NETDEV_DEV(ndev, &pdev->dev); 1474 1475 dev_dbg(&pdev->dev, "dm9000_probe()\n"); 1476 1477 /* setup board info structure */ 1478 db = netdev_priv(ndev); 1479 1480 db->dev = &pdev->dev; 1481 db->ndev = ndev; 1482 1483 spin_lock_init(&db->lock); 1484 mutex_init(&db->addr_lock); 1485 1486 INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work); 1487 1488 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1489 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1490 1491 if (!db->addr_res || !db->data_res) { 1492 dev_err(db->dev, "insufficient resources addr=%p data=%p\n", 1493 db->addr_res, db->data_res); 1494 ret = -ENOENT; 1495 goto out; 1496 } 1497 1498 ndev->irq = platform_get_irq(pdev, 0); 1499 if (ndev->irq < 0) { 1500 ret = ndev->irq; 1501 goto out; 1502 } 1503 1504 db->irq_wake = platform_get_irq(pdev, 1); 1505 if (db->irq_wake >= 0) { 1506 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake); 1507 1508 ret = request_irq(db->irq_wake, dm9000_wol_interrupt, 1509 IRQF_SHARED, dev_name(db->dev), ndev); 1510 if (ret) { 1511 dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret); 1512 } else { 1513 1514 /* test to see if irq is really wakeup capable */ 1515 ret = irq_set_irq_wake(db->irq_wake, 1); 1516 if (ret) { 1517 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n", 1518 db->irq_wake, ret); 1519 ret = 0; 1520 } else { 1521 irq_set_irq_wake(db->irq_wake, 0); 1522 db->wake_supported = 1; 1523 } 1524 } 1525 } 1526 1527 iosize = resource_size(db->addr_res); 1528 db->addr_req = request_mem_region(db->addr_res->start, iosize, 1529 pdev->name); 1530 1531 if (db->addr_req == NULL) { 1532 dev_err(db->dev, "cannot claim address reg area\n"); 1533 ret = -EIO; 1534 goto out; 1535 } 1536 1537 db->io_addr = ioremap(db->addr_res->start, iosize); 1538 1539 if (db->io_addr == NULL) { 1540 dev_err(db->dev, "failed to ioremap address reg\n"); 1541 ret = -EINVAL; 1542 goto out; 1543 } 1544 1545 iosize = resource_size(db->data_res); 1546 db->data_req = request_mem_region(db->data_res->start, iosize, 1547 pdev->name); 1548 1549 if (db->data_req == NULL) { 1550 dev_err(db->dev, "cannot claim data reg area\n"); 1551 ret = -EIO; 1552 goto out; 1553 } 1554 1555 db->io_data = ioremap(db->data_res->start, iosize); 1556 1557 if (db->io_data == NULL) { 1558 dev_err(db->dev, "failed to ioremap data reg\n"); 1559 ret = -EINVAL; 1560 goto out; 1561 } 1562 1563 /* fill in parameters for net-dev structure */ 1564 ndev->base_addr = (unsigned long)db->io_addr; 1565 1566 /* ensure at least we have a default set of IO routines */ 1567 dm9000_set_io(db, iosize); 1568 1569 /* check to see if anything is being over-ridden */ 1570 if (pdata != NULL) { 1571 /* check to see if the driver wants to over-ride the 1572 * default IO width */ 1573 1574 if (pdata->flags & DM9000_PLATF_8BITONLY) 1575 dm9000_set_io(db, 1); 1576 1577 if (pdata->flags & DM9000_PLATF_16BITONLY) 1578 dm9000_set_io(db, 2); 1579 1580 if (pdata->flags & DM9000_PLATF_32BITONLY) 1581 dm9000_set_io(db, 4); 1582 1583 /* check to see if there are any IO routine 1584 * over-rides */ 1585 1586 if (pdata->inblk != NULL) 1587 db->inblk = pdata->inblk; 1588 1589 if (pdata->outblk != NULL) 1590 db->outblk = pdata->outblk; 1591 1592 if (pdata->dumpblk != NULL) 1593 db->dumpblk = pdata->dumpblk; 1594 1595 db->flags = pdata->flags; 1596 } 1597 1598 #ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL 1599 db->flags |= DM9000_PLATF_SIMPLE_PHY; 1600 #endif 1601 1602 dm9000_reset(db); 1603 1604 /* try multiple times, DM9000 sometimes gets the read wrong */ 1605 for (i = 0; i < 8; i++) { 1606 id_val = ior(db, DM9000_VIDL); 1607 id_val |= (u32)ior(db, DM9000_VIDH) << 8; 1608 id_val |= (u32)ior(db, DM9000_PIDL) << 16; 1609 id_val |= (u32)ior(db, DM9000_PIDH) << 24; 1610 1611 if (id_val == DM9000_ID) 1612 break; 1613 dev_err(db->dev, "read wrong id 0x%08x\n", id_val); 1614 } 1615 1616 if (id_val != DM9000_ID) { 1617 dev_err(db->dev, "wrong id: 0x%08x\n", id_val); 1618 ret = -ENODEV; 1619 goto out; 1620 } 1621 1622 /* Identify what type of DM9000 we are working on */ 1623 1624 id_val = ior(db, DM9000_CHIPR); 1625 dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val); 1626 1627 switch (id_val) { 1628 case CHIPR_DM9000A: 1629 db->type = TYPE_DM9000A; 1630 break; 1631 case CHIPR_DM9000B: 1632 db->type = TYPE_DM9000B; 1633 break; 1634 default: 1635 dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val); 1636 db->type = TYPE_DM9000E; 1637 } 1638 1639 /* dm9000a/b are capable of hardware checksum offload */ 1640 if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) { 1641 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM; 1642 ndev->features |= ndev->hw_features; 1643 } 1644 1645 /* from this point we assume that we have found a DM9000 */ 1646 1647 ndev->netdev_ops = &dm9000_netdev_ops; 1648 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 1649 ndev->ethtool_ops = &dm9000_ethtool_ops; 1650 1651 db->msg_enable = NETIF_MSG_LINK; 1652 db->mii.phy_id_mask = 0x1f; 1653 db->mii.reg_num_mask = 0x1f; 1654 db->mii.force_media = 0; 1655 db->mii.full_duplex = 0; 1656 db->mii.dev = ndev; 1657 db->mii.mdio_read = dm9000_phy_read; 1658 db->mii.mdio_write = dm9000_phy_write; 1659 1660 mac_src = "eeprom"; 1661 1662 /* try reading the node address from the attached EEPROM */ 1663 for (i = 0; i < 6; i += 2) 1664 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i); 1665 1666 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) { 1667 mac_src = "platform data"; 1668 memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN); 1669 } 1670 1671 if (!is_valid_ether_addr(ndev->dev_addr)) { 1672 /* try reading from mac */ 1673 1674 mac_src = "chip"; 1675 for (i = 0; i < 6; i++) 1676 ndev->dev_addr[i] = ior(db, i+DM9000_PAR); 1677 } 1678 1679 if (!is_valid_ether_addr(ndev->dev_addr)) { 1680 inv_mac_addr = true; 1681 eth_hw_addr_random(ndev); 1682 mac_src = "random"; 1683 } 1684 1685 1686 platform_set_drvdata(pdev, ndev); 1687 ret = register_netdev(ndev); 1688 1689 if (ret == 0) { 1690 if (inv_mac_addr) 1691 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please set using ip\n", 1692 ndev->name); 1693 printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n", 1694 ndev->name, dm9000_type_to_char(db->type), 1695 db->io_addr, db->io_data, ndev->irq, 1696 ndev->dev_addr, mac_src); 1697 } 1698 return 0; 1699 1700 out: 1701 dev_err(db->dev, "not found (%d).\n", ret); 1702 1703 dm9000_release_board(pdev, db); 1704 free_netdev(ndev); 1705 1706 return ret; 1707 } 1708 1709 static int 1710 dm9000_drv_suspend(struct device *dev) 1711 { 1712 struct net_device *ndev = dev_get_drvdata(dev); 1713 struct board_info *db; 1714 1715 if (ndev) { 1716 db = netdev_priv(ndev); 1717 db->in_suspend = 1; 1718 1719 if (!netif_running(ndev)) 1720 return 0; 1721 1722 netif_device_detach(ndev); 1723 1724 /* only shutdown if not using WoL */ 1725 if (!db->wake_state) 1726 dm9000_shutdown(ndev); 1727 } 1728 return 0; 1729 } 1730 1731 static int 1732 dm9000_drv_resume(struct device *dev) 1733 { 1734 struct net_device *ndev = dev_get_drvdata(dev); 1735 struct board_info *db = netdev_priv(ndev); 1736 1737 if (ndev) { 1738 if (netif_running(ndev)) { 1739 /* reset if we were not in wake mode to ensure if 1740 * the device was powered off it is in a known state */ 1741 if (!db->wake_state) { 1742 dm9000_init_dm9000(ndev); 1743 dm9000_unmask_interrupts(db); 1744 } 1745 1746 netif_device_attach(ndev); 1747 } 1748 1749 db->in_suspend = 0; 1750 } 1751 return 0; 1752 } 1753 1754 static const struct dev_pm_ops dm9000_drv_pm_ops = { 1755 .suspend = dm9000_drv_suspend, 1756 .resume = dm9000_drv_resume, 1757 }; 1758 1759 static int 1760 dm9000_drv_remove(struct platform_device *pdev) 1761 { 1762 struct net_device *ndev = platform_get_drvdata(pdev); 1763 1764 unregister_netdev(ndev); 1765 dm9000_release_board(pdev, netdev_priv(ndev)); 1766 free_netdev(ndev); /* free device structure */ 1767 1768 dev_dbg(&pdev->dev, "released and freed device\n"); 1769 return 0; 1770 } 1771 1772 #ifdef CONFIG_OF 1773 static const struct of_device_id dm9000_of_matches[] = { 1774 { .compatible = "davicom,dm9000", }, 1775 { /* sentinel */ } 1776 }; 1777 MODULE_DEVICE_TABLE(of, dm9000_of_matches); 1778 #endif 1779 1780 static struct platform_driver dm9000_driver = { 1781 .driver = { 1782 .name = "dm9000", 1783 .pm = &dm9000_drv_pm_ops, 1784 .of_match_table = of_match_ptr(dm9000_of_matches), 1785 }, 1786 .probe = dm9000_probe, 1787 .remove = dm9000_drv_remove, 1788 }; 1789 1790 module_platform_driver(dm9000_driver); 1791 1792 MODULE_AUTHOR("Sascha Hauer, Ben Dooks"); 1793 MODULE_DESCRIPTION("Davicom DM9000 network driver"); 1794 MODULE_LICENSE("GPL"); 1795 MODULE_ALIAS("platform:dm9000"); 1796