1 /* 2 * Davicom DM9000 Fast Ethernet driver for Linux. 3 * Copyright (C) 1997 Sten Wang 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 2 8 * of the License, or (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved. 16 * 17 * Additional updates, Copyright: 18 * Ben Dooks <ben@simtec.co.uk> 19 * Sascha Hauer <s.hauer@pengutronix.de> 20 */ 21 22 #include <linux/module.h> 23 #include <linux/ioport.h> 24 #include <linux/netdevice.h> 25 #include <linux/etherdevice.h> 26 #include <linux/interrupt.h> 27 #include <linux/skbuff.h> 28 #include <linux/spinlock.h> 29 #include <linux/crc32.h> 30 #include <linux/mii.h> 31 #include <linux/of.h> 32 #include <linux/of_net.h> 33 #include <linux/ethtool.h> 34 #include <linux/dm9000.h> 35 #include <linux/delay.h> 36 #include <linux/platform_device.h> 37 #include <linux/irq.h> 38 #include <linux/slab.h> 39 #include <linux/regulator/consumer.h> 40 #include <linux/gpio.h> 41 #include <linux/of_gpio.h> 42 43 #include <asm/delay.h> 44 #include <asm/irq.h> 45 #include <asm/io.h> 46 47 #include "dm9000.h" 48 49 /* Board/System/Debug information/definition ---------------- */ 50 51 #define DM9000_PHY 0x40 /* PHY address 0x01 */ 52 53 #define CARDNAME "dm9000" 54 #define DRV_VERSION "1.31" 55 56 /* 57 * Transmit timeout, default 5 seconds. 58 */ 59 static int watchdog = 5000; 60 module_param(watchdog, int, 0400); 61 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); 62 63 /* 64 * Debug messages level 65 */ 66 static int debug; 67 module_param(debug, int, 0644); 68 MODULE_PARM_DESC(debug, "dm9000 debug level (0-4)"); 69 70 /* DM9000 register address locking. 71 * 72 * The DM9000 uses an address register to control where data written 73 * to the data register goes. This means that the address register 74 * must be preserved over interrupts or similar calls. 75 * 76 * During interrupt and other critical calls, a spinlock is used to 77 * protect the system, but the calls themselves save the address 78 * in the address register in case they are interrupting another 79 * access to the device. 80 * 81 * For general accesses a lock is provided so that calls which are 82 * allowed to sleep are serialised so that the address register does 83 * not need to be saved. This lock also serves to serialise access 84 * to the EEPROM and PHY access registers which are shared between 85 * these two devices. 86 */ 87 88 /* The driver supports the original DM9000E, and now the two newer 89 * devices, DM9000A and DM9000B. 90 */ 91 92 enum dm9000_type { 93 TYPE_DM9000E, /* original DM9000 */ 94 TYPE_DM9000A, 95 TYPE_DM9000B 96 }; 97 98 /* Structure/enum declaration ------------------------------- */ 99 struct board_info { 100 101 void __iomem *io_addr; /* Register I/O base address */ 102 void __iomem *io_data; /* Data I/O address */ 103 u16 irq; /* IRQ */ 104 105 u16 tx_pkt_cnt; 106 u16 queue_pkt_len; 107 u16 queue_start_addr; 108 u16 queue_ip_summed; 109 u16 dbug_cnt; 110 u8 io_mode; /* 0:word, 2:byte */ 111 u8 phy_addr; 112 u8 imr_all; 113 114 unsigned int flags; 115 unsigned int in_timeout:1; 116 unsigned int in_suspend:1; 117 unsigned int wake_supported:1; 118 119 enum dm9000_type type; 120 121 void (*inblk)(void __iomem *port, void *data, int length); 122 void (*outblk)(void __iomem *port, void *data, int length); 123 void (*dumpblk)(void __iomem *port, int length); 124 125 struct device *dev; /* parent device */ 126 127 struct resource *addr_res; /* resources found */ 128 struct resource *data_res; 129 struct resource *addr_req; /* resources requested */ 130 struct resource *data_req; 131 132 int irq_wake; 133 134 struct mutex addr_lock; /* phy and eeprom access lock */ 135 136 struct delayed_work phy_poll; 137 struct net_device *ndev; 138 139 spinlock_t lock; 140 141 struct mii_if_info mii; 142 u32 msg_enable; 143 u32 wake_state; 144 145 int ip_summed; 146 }; 147 148 /* debug code */ 149 150 #define dm9000_dbg(db, lev, msg...) do { \ 151 if ((lev) < debug) { \ 152 dev_dbg(db->dev, msg); \ 153 } \ 154 } while (0) 155 156 static inline struct board_info *to_dm9000_board(struct net_device *dev) 157 { 158 return netdev_priv(dev); 159 } 160 161 /* DM9000 network board routine ---------------------------- */ 162 163 /* 164 * Read a byte from I/O port 165 */ 166 static u8 167 ior(struct board_info *db, int reg) 168 { 169 writeb(reg, db->io_addr); 170 return readb(db->io_data); 171 } 172 173 /* 174 * Write a byte to I/O port 175 */ 176 177 static void 178 iow(struct board_info *db, int reg, int value) 179 { 180 writeb(reg, db->io_addr); 181 writeb(value, db->io_data); 182 } 183 184 static void 185 dm9000_reset(struct board_info *db) 186 { 187 dev_dbg(db->dev, "resetting device\n"); 188 189 /* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29 190 * The essential point is that we have to do a double reset, and the 191 * instruction is to set LBK into MAC internal loopback mode. 192 */ 193 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK); 194 udelay(100); /* Application note says at least 20 us */ 195 if (ior(db, DM9000_NCR) & 1) 196 dev_err(db->dev, "dm9000 did not respond to first reset\n"); 197 198 iow(db, DM9000_NCR, 0); 199 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK); 200 udelay(100); 201 if (ior(db, DM9000_NCR) & 1) 202 dev_err(db->dev, "dm9000 did not respond to second reset\n"); 203 } 204 205 /* routines for sending block to chip */ 206 207 static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count) 208 { 209 iowrite8_rep(reg, data, count); 210 } 211 212 static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count) 213 { 214 iowrite16_rep(reg, data, (count+1) >> 1); 215 } 216 217 static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count) 218 { 219 iowrite32_rep(reg, data, (count+3) >> 2); 220 } 221 222 /* input block from chip to memory */ 223 224 static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count) 225 { 226 ioread8_rep(reg, data, count); 227 } 228 229 230 static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count) 231 { 232 ioread16_rep(reg, data, (count+1) >> 1); 233 } 234 235 static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count) 236 { 237 ioread32_rep(reg, data, (count+3) >> 2); 238 } 239 240 /* dump block from chip to null */ 241 242 static void dm9000_dumpblk_8bit(void __iomem *reg, int count) 243 { 244 int i; 245 int tmp; 246 247 for (i = 0; i < count; i++) 248 tmp = readb(reg); 249 } 250 251 static void dm9000_dumpblk_16bit(void __iomem *reg, int count) 252 { 253 int i; 254 int tmp; 255 256 count = (count + 1) >> 1; 257 258 for (i = 0; i < count; i++) 259 tmp = readw(reg); 260 } 261 262 static void dm9000_dumpblk_32bit(void __iomem *reg, int count) 263 { 264 int i; 265 int tmp; 266 267 count = (count + 3) >> 2; 268 269 for (i = 0; i < count; i++) 270 tmp = readl(reg); 271 } 272 273 /* 274 * Sleep, either by using msleep() or if we are suspending, then 275 * use mdelay() to sleep. 276 */ 277 static void dm9000_msleep(struct board_info *db, unsigned int ms) 278 { 279 if (db->in_suspend || db->in_timeout) 280 mdelay(ms); 281 else 282 msleep(ms); 283 } 284 285 /* Read a word from phyxcer */ 286 static int 287 dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) 288 { 289 struct board_info *db = netdev_priv(dev); 290 unsigned long flags; 291 unsigned int reg_save; 292 int ret; 293 294 mutex_lock(&db->addr_lock); 295 296 spin_lock_irqsave(&db->lock, flags); 297 298 /* Save previous register address */ 299 reg_save = readb(db->io_addr); 300 301 /* Fill the phyxcer register into REG_0C */ 302 iow(db, DM9000_EPAR, DM9000_PHY | reg); 303 304 /* Issue phyxcer read command */ 305 iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); 306 307 writeb(reg_save, db->io_addr); 308 spin_unlock_irqrestore(&db->lock, flags); 309 310 dm9000_msleep(db, 1); /* Wait read complete */ 311 312 spin_lock_irqsave(&db->lock, flags); 313 reg_save = readb(db->io_addr); 314 315 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ 316 317 /* The read data keeps on REG_0D & REG_0E */ 318 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); 319 320 /* restore the previous address */ 321 writeb(reg_save, db->io_addr); 322 spin_unlock_irqrestore(&db->lock, flags); 323 324 mutex_unlock(&db->addr_lock); 325 326 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); 327 return ret; 328 } 329 330 /* Write a word to phyxcer */ 331 static void 332 dm9000_phy_write(struct net_device *dev, 333 int phyaddr_unused, int reg, int value) 334 { 335 struct board_info *db = netdev_priv(dev); 336 unsigned long flags; 337 unsigned long reg_save; 338 339 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); 340 if (!db->in_timeout) 341 mutex_lock(&db->addr_lock); 342 343 spin_lock_irqsave(&db->lock, flags); 344 345 /* Save previous register address */ 346 reg_save = readb(db->io_addr); 347 348 /* Fill the phyxcer register into REG_0C */ 349 iow(db, DM9000_EPAR, DM9000_PHY | reg); 350 351 /* Fill the written data into REG_0D & REG_0E */ 352 iow(db, DM9000_EPDRL, value); 353 iow(db, DM9000_EPDRH, value >> 8); 354 355 /* Issue phyxcer write command */ 356 iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); 357 358 writeb(reg_save, db->io_addr); 359 spin_unlock_irqrestore(&db->lock, flags); 360 361 dm9000_msleep(db, 1); /* Wait write complete */ 362 363 spin_lock_irqsave(&db->lock, flags); 364 reg_save = readb(db->io_addr); 365 366 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ 367 368 /* restore the previous address */ 369 writeb(reg_save, db->io_addr); 370 371 spin_unlock_irqrestore(&db->lock, flags); 372 if (!db->in_timeout) 373 mutex_unlock(&db->addr_lock); 374 } 375 376 /* dm9000_set_io 377 * 378 * select the specified set of io routines to use with the 379 * device 380 */ 381 382 static void dm9000_set_io(struct board_info *db, int byte_width) 383 { 384 /* use the size of the data resource to work out what IO 385 * routines we want to use 386 */ 387 388 switch (byte_width) { 389 case 1: 390 db->dumpblk = dm9000_dumpblk_8bit; 391 db->outblk = dm9000_outblk_8bit; 392 db->inblk = dm9000_inblk_8bit; 393 break; 394 395 396 case 3: 397 dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n"); 398 case 2: 399 db->dumpblk = dm9000_dumpblk_16bit; 400 db->outblk = dm9000_outblk_16bit; 401 db->inblk = dm9000_inblk_16bit; 402 break; 403 404 case 4: 405 default: 406 db->dumpblk = dm9000_dumpblk_32bit; 407 db->outblk = dm9000_outblk_32bit; 408 db->inblk = dm9000_inblk_32bit; 409 break; 410 } 411 } 412 413 static void dm9000_schedule_poll(struct board_info *db) 414 { 415 if (db->type == TYPE_DM9000E) 416 schedule_delayed_work(&db->phy_poll, HZ * 2); 417 } 418 419 static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 420 { 421 struct board_info *dm = to_dm9000_board(dev); 422 423 if (!netif_running(dev)) 424 return -EINVAL; 425 426 return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL); 427 } 428 429 static unsigned int 430 dm9000_read_locked(struct board_info *db, int reg) 431 { 432 unsigned long flags; 433 unsigned int ret; 434 435 spin_lock_irqsave(&db->lock, flags); 436 ret = ior(db, reg); 437 spin_unlock_irqrestore(&db->lock, flags); 438 439 return ret; 440 } 441 442 static int dm9000_wait_eeprom(struct board_info *db) 443 { 444 unsigned int status; 445 int timeout = 8; /* wait max 8msec */ 446 447 /* The DM9000 data sheets say we should be able to 448 * poll the ERRE bit in EPCR to wait for the EEPROM 449 * operation. From testing several chips, this bit 450 * does not seem to work. 451 * 452 * We attempt to use the bit, but fall back to the 453 * timeout (which is why we do not return an error 454 * on expiry) to say that the EEPROM operation has 455 * completed. 456 */ 457 458 while (1) { 459 status = dm9000_read_locked(db, DM9000_EPCR); 460 461 if ((status & EPCR_ERRE) == 0) 462 break; 463 464 msleep(1); 465 466 if (timeout-- < 0) { 467 dev_dbg(db->dev, "timeout waiting EEPROM\n"); 468 break; 469 } 470 } 471 472 return 0; 473 } 474 475 /* 476 * Read a word data from EEPROM 477 */ 478 static void 479 dm9000_read_eeprom(struct board_info *db, int offset, u8 *to) 480 { 481 unsigned long flags; 482 483 if (db->flags & DM9000_PLATF_NO_EEPROM) { 484 to[0] = 0xff; 485 to[1] = 0xff; 486 return; 487 } 488 489 mutex_lock(&db->addr_lock); 490 491 spin_lock_irqsave(&db->lock, flags); 492 493 iow(db, DM9000_EPAR, offset); 494 iow(db, DM9000_EPCR, EPCR_ERPRR); 495 496 spin_unlock_irqrestore(&db->lock, flags); 497 498 dm9000_wait_eeprom(db); 499 500 /* delay for at-least 150uS */ 501 msleep(1); 502 503 spin_lock_irqsave(&db->lock, flags); 504 505 iow(db, DM9000_EPCR, 0x0); 506 507 to[0] = ior(db, DM9000_EPDRL); 508 to[1] = ior(db, DM9000_EPDRH); 509 510 spin_unlock_irqrestore(&db->lock, flags); 511 512 mutex_unlock(&db->addr_lock); 513 } 514 515 /* 516 * Write a word data to SROM 517 */ 518 static void 519 dm9000_write_eeprom(struct board_info *db, int offset, u8 *data) 520 { 521 unsigned long flags; 522 523 if (db->flags & DM9000_PLATF_NO_EEPROM) 524 return; 525 526 mutex_lock(&db->addr_lock); 527 528 spin_lock_irqsave(&db->lock, flags); 529 iow(db, DM9000_EPAR, offset); 530 iow(db, DM9000_EPDRH, data[1]); 531 iow(db, DM9000_EPDRL, data[0]); 532 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW); 533 spin_unlock_irqrestore(&db->lock, flags); 534 535 dm9000_wait_eeprom(db); 536 537 mdelay(1); /* wait at least 150uS to clear */ 538 539 spin_lock_irqsave(&db->lock, flags); 540 iow(db, DM9000_EPCR, 0); 541 spin_unlock_irqrestore(&db->lock, flags); 542 543 mutex_unlock(&db->addr_lock); 544 } 545 546 /* ethtool ops */ 547 548 static void dm9000_get_drvinfo(struct net_device *dev, 549 struct ethtool_drvinfo *info) 550 { 551 struct board_info *dm = to_dm9000_board(dev); 552 553 strlcpy(info->driver, CARDNAME, sizeof(info->driver)); 554 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 555 strlcpy(info->bus_info, to_platform_device(dm->dev)->name, 556 sizeof(info->bus_info)); 557 } 558 559 static u32 dm9000_get_msglevel(struct net_device *dev) 560 { 561 struct board_info *dm = to_dm9000_board(dev); 562 563 return dm->msg_enable; 564 } 565 566 static void dm9000_set_msglevel(struct net_device *dev, u32 value) 567 { 568 struct board_info *dm = to_dm9000_board(dev); 569 570 dm->msg_enable = value; 571 } 572 573 static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 574 { 575 struct board_info *dm = to_dm9000_board(dev); 576 577 mii_ethtool_gset(&dm->mii, cmd); 578 return 0; 579 } 580 581 static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 582 { 583 struct board_info *dm = to_dm9000_board(dev); 584 585 return mii_ethtool_sset(&dm->mii, cmd); 586 } 587 588 static int dm9000_nway_reset(struct net_device *dev) 589 { 590 struct board_info *dm = to_dm9000_board(dev); 591 return mii_nway_restart(&dm->mii); 592 } 593 594 static int dm9000_set_features(struct net_device *dev, 595 netdev_features_t features) 596 { 597 struct board_info *dm = to_dm9000_board(dev); 598 netdev_features_t changed = dev->features ^ features; 599 unsigned long flags; 600 601 if (!(changed & NETIF_F_RXCSUM)) 602 return 0; 603 604 spin_lock_irqsave(&dm->lock, flags); 605 iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0); 606 spin_unlock_irqrestore(&dm->lock, flags); 607 608 return 0; 609 } 610 611 static u32 dm9000_get_link(struct net_device *dev) 612 { 613 struct board_info *dm = to_dm9000_board(dev); 614 u32 ret; 615 616 if (dm->flags & DM9000_PLATF_EXT_PHY) 617 ret = mii_link_ok(&dm->mii); 618 else 619 ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0; 620 621 return ret; 622 } 623 624 #define DM_EEPROM_MAGIC (0x444D394B) 625 626 static int dm9000_get_eeprom_len(struct net_device *dev) 627 { 628 return 128; 629 } 630 631 static int dm9000_get_eeprom(struct net_device *dev, 632 struct ethtool_eeprom *ee, u8 *data) 633 { 634 struct board_info *dm = to_dm9000_board(dev); 635 int offset = ee->offset; 636 int len = ee->len; 637 int i; 638 639 /* EEPROM access is aligned to two bytes */ 640 641 if ((len & 1) != 0 || (offset & 1) != 0) 642 return -EINVAL; 643 644 if (dm->flags & DM9000_PLATF_NO_EEPROM) 645 return -ENOENT; 646 647 ee->magic = DM_EEPROM_MAGIC; 648 649 for (i = 0; i < len; i += 2) 650 dm9000_read_eeprom(dm, (offset + i) / 2, data + i); 651 652 return 0; 653 } 654 655 static int dm9000_set_eeprom(struct net_device *dev, 656 struct ethtool_eeprom *ee, u8 *data) 657 { 658 struct board_info *dm = to_dm9000_board(dev); 659 int offset = ee->offset; 660 int len = ee->len; 661 int done; 662 663 /* EEPROM access is aligned to two bytes */ 664 665 if (dm->flags & DM9000_PLATF_NO_EEPROM) 666 return -ENOENT; 667 668 if (ee->magic != DM_EEPROM_MAGIC) 669 return -EINVAL; 670 671 while (len > 0) { 672 if (len & 1 || offset & 1) { 673 int which = offset & 1; 674 u8 tmp[2]; 675 676 dm9000_read_eeprom(dm, offset / 2, tmp); 677 tmp[which] = *data; 678 dm9000_write_eeprom(dm, offset / 2, tmp); 679 680 done = 1; 681 } else { 682 dm9000_write_eeprom(dm, offset / 2, data); 683 done = 2; 684 } 685 686 data += done; 687 offset += done; 688 len -= done; 689 } 690 691 return 0; 692 } 693 694 static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) 695 { 696 struct board_info *dm = to_dm9000_board(dev); 697 698 memset(w, 0, sizeof(struct ethtool_wolinfo)); 699 700 /* note, we could probably support wake-phy too */ 701 w->supported = dm->wake_supported ? WAKE_MAGIC : 0; 702 w->wolopts = dm->wake_state; 703 } 704 705 static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) 706 { 707 struct board_info *dm = to_dm9000_board(dev); 708 unsigned long flags; 709 u32 opts = w->wolopts; 710 u32 wcr = 0; 711 712 if (!dm->wake_supported) 713 return -EOPNOTSUPP; 714 715 if (opts & ~WAKE_MAGIC) 716 return -EINVAL; 717 718 if (opts & WAKE_MAGIC) 719 wcr |= WCR_MAGICEN; 720 721 mutex_lock(&dm->addr_lock); 722 723 spin_lock_irqsave(&dm->lock, flags); 724 iow(dm, DM9000_WCR, wcr); 725 spin_unlock_irqrestore(&dm->lock, flags); 726 727 mutex_unlock(&dm->addr_lock); 728 729 if (dm->wake_state != opts) { 730 /* change in wol state, update IRQ state */ 731 732 if (!dm->wake_state) 733 irq_set_irq_wake(dm->irq_wake, 1); 734 else if (dm->wake_state && !opts) 735 irq_set_irq_wake(dm->irq_wake, 0); 736 } 737 738 dm->wake_state = opts; 739 return 0; 740 } 741 742 static const struct ethtool_ops dm9000_ethtool_ops = { 743 .get_drvinfo = dm9000_get_drvinfo, 744 .get_settings = dm9000_get_settings, 745 .set_settings = dm9000_set_settings, 746 .get_msglevel = dm9000_get_msglevel, 747 .set_msglevel = dm9000_set_msglevel, 748 .nway_reset = dm9000_nway_reset, 749 .get_link = dm9000_get_link, 750 .get_wol = dm9000_get_wol, 751 .set_wol = dm9000_set_wol, 752 .get_eeprom_len = dm9000_get_eeprom_len, 753 .get_eeprom = dm9000_get_eeprom, 754 .set_eeprom = dm9000_set_eeprom, 755 }; 756 757 static void dm9000_show_carrier(struct board_info *db, 758 unsigned carrier, unsigned nsr) 759 { 760 int lpa; 761 struct net_device *ndev = db->ndev; 762 struct mii_if_info *mii = &db->mii; 763 unsigned ncr = dm9000_read_locked(db, DM9000_NCR); 764 765 if (carrier) { 766 lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA); 767 dev_info(db->dev, 768 "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n", 769 ndev->name, (nsr & NSR_SPEED) ? 10 : 100, 770 (ncr & NCR_FDX) ? "full" : "half", lpa); 771 } else { 772 dev_info(db->dev, "%s: link down\n", ndev->name); 773 } 774 } 775 776 static void 777 dm9000_poll_work(struct work_struct *w) 778 { 779 struct delayed_work *dw = to_delayed_work(w); 780 struct board_info *db = container_of(dw, struct board_info, phy_poll); 781 struct net_device *ndev = db->ndev; 782 783 if (db->flags & DM9000_PLATF_SIMPLE_PHY && 784 !(db->flags & DM9000_PLATF_EXT_PHY)) { 785 unsigned nsr = dm9000_read_locked(db, DM9000_NSR); 786 unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0; 787 unsigned new_carrier; 788 789 new_carrier = (nsr & NSR_LINKST) ? 1 : 0; 790 791 if (old_carrier != new_carrier) { 792 if (netif_msg_link(db)) 793 dm9000_show_carrier(db, new_carrier, nsr); 794 795 if (!new_carrier) 796 netif_carrier_off(ndev); 797 else 798 netif_carrier_on(ndev); 799 } 800 } else 801 mii_check_media(&db->mii, netif_msg_link(db), 0); 802 803 if (netif_running(ndev)) 804 dm9000_schedule_poll(db); 805 } 806 807 /* dm9000_release_board 808 * 809 * release a board, and any mapped resources 810 */ 811 812 static void 813 dm9000_release_board(struct platform_device *pdev, struct board_info *db) 814 { 815 /* unmap our resources */ 816 817 iounmap(db->io_addr); 818 iounmap(db->io_data); 819 820 /* release the resources */ 821 822 if (db->data_req) 823 release_resource(db->data_req); 824 kfree(db->data_req); 825 826 if (db->addr_req) 827 release_resource(db->addr_req); 828 kfree(db->addr_req); 829 } 830 831 static unsigned char dm9000_type_to_char(enum dm9000_type type) 832 { 833 switch (type) { 834 case TYPE_DM9000E: return 'e'; 835 case TYPE_DM9000A: return 'a'; 836 case TYPE_DM9000B: return 'b'; 837 } 838 839 return '?'; 840 } 841 842 /* 843 * Set DM9000 multicast address 844 */ 845 static void 846 dm9000_hash_table_unlocked(struct net_device *dev) 847 { 848 struct board_info *db = netdev_priv(dev); 849 struct netdev_hw_addr *ha; 850 int i, oft; 851 u32 hash_val; 852 u16 hash_table[4] = { 0, 0, 0, 0x8000 }; /* broadcast address */ 853 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN; 854 855 dm9000_dbg(db, 1, "entering %s\n", __func__); 856 857 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++) 858 iow(db, oft, dev->dev_addr[i]); 859 860 if (dev->flags & IFF_PROMISC) 861 rcr |= RCR_PRMSC; 862 863 if (dev->flags & IFF_ALLMULTI) 864 rcr |= RCR_ALL; 865 866 /* the multicast address in Hash Table : 64 bits */ 867 netdev_for_each_mc_addr(ha, dev) { 868 hash_val = ether_crc_le(6, ha->addr) & 0x3f; 869 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); 870 } 871 872 /* Write the hash table to MAC MD table */ 873 for (i = 0, oft = DM9000_MAR; i < 4; i++) { 874 iow(db, oft++, hash_table[i]); 875 iow(db, oft++, hash_table[i] >> 8); 876 } 877 878 iow(db, DM9000_RCR, rcr); 879 } 880 881 static void 882 dm9000_hash_table(struct net_device *dev) 883 { 884 struct board_info *db = netdev_priv(dev); 885 unsigned long flags; 886 887 spin_lock_irqsave(&db->lock, flags); 888 dm9000_hash_table_unlocked(dev); 889 spin_unlock_irqrestore(&db->lock, flags); 890 } 891 892 static void 893 dm9000_mask_interrupts(struct board_info *db) 894 { 895 iow(db, DM9000_IMR, IMR_PAR); 896 } 897 898 static void 899 dm9000_unmask_interrupts(struct board_info *db) 900 { 901 iow(db, DM9000_IMR, db->imr_all); 902 } 903 904 /* 905 * Initialize dm9000 board 906 */ 907 static void 908 dm9000_init_dm9000(struct net_device *dev) 909 { 910 struct board_info *db = netdev_priv(dev); 911 unsigned int imr; 912 unsigned int ncr; 913 914 dm9000_dbg(db, 1, "entering %s\n", __func__); 915 916 dm9000_reset(db); 917 dm9000_mask_interrupts(db); 918 919 /* I/O mode */ 920 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */ 921 922 /* Checksum mode */ 923 if (dev->hw_features & NETIF_F_RXCSUM) 924 iow(db, DM9000_RCSR, 925 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0); 926 927 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 928 iow(db, DM9000_GPR, 0); 929 930 /* If we are dealing with DM9000B, some extra steps are required: a 931 * manual phy reset, and setting init params. 932 */ 933 if (db->type == TYPE_DM9000B) { 934 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); 935 dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); 936 } 937 938 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; 939 940 /* if wol is needed, then always set NCR_WAKEEN otherwise we end 941 * up dumping the wake events if we disable this. There is already 942 * a wake-mask in DM9000_WCR */ 943 if (db->wake_supported) 944 ncr |= NCR_WAKEEN; 945 946 iow(db, DM9000_NCR, ncr); 947 948 /* Program operating register */ 949 iow(db, DM9000_TCR, 0); /* TX Polling clear */ 950 iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */ 951 iow(db, DM9000_FCR, 0xff); /* Flow Control */ 952 iow(db, DM9000_SMCR, 0); /* Special Mode */ 953 /* clear TX status */ 954 iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END); 955 iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */ 956 957 /* Set address filter table */ 958 dm9000_hash_table_unlocked(dev); 959 960 imr = IMR_PAR | IMR_PTM | IMR_PRM; 961 if (db->type != TYPE_DM9000E) 962 imr |= IMR_LNKCHNG; 963 964 db->imr_all = imr; 965 966 /* Init Driver variable */ 967 db->tx_pkt_cnt = 0; 968 db->queue_pkt_len = 0; 969 netif_trans_update(dev); 970 } 971 972 /* Our watchdog timed out. Called by the networking layer */ 973 static void dm9000_timeout(struct net_device *dev) 974 { 975 struct board_info *db = netdev_priv(dev); 976 u8 reg_save; 977 unsigned long flags; 978 979 /* Save previous register address */ 980 spin_lock_irqsave(&db->lock, flags); 981 db->in_timeout = 1; 982 reg_save = readb(db->io_addr); 983 984 netif_stop_queue(dev); 985 dm9000_init_dm9000(dev); 986 dm9000_unmask_interrupts(db); 987 /* We can accept TX packets again */ 988 netif_trans_update(dev); /* prevent tx timeout */ 989 netif_wake_queue(dev); 990 991 /* Restore previous register address */ 992 writeb(reg_save, db->io_addr); 993 db->in_timeout = 0; 994 spin_unlock_irqrestore(&db->lock, flags); 995 } 996 997 static void dm9000_send_packet(struct net_device *dev, 998 int ip_summed, 999 u16 pkt_len) 1000 { 1001 struct board_info *dm = to_dm9000_board(dev); 1002 1003 /* The DM9000 is not smart enough to leave fragmented packets alone. */ 1004 if (dm->ip_summed != ip_summed) { 1005 if (ip_summed == CHECKSUM_NONE) 1006 iow(dm, DM9000_TCCR, 0); 1007 else 1008 iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP); 1009 dm->ip_summed = ip_summed; 1010 } 1011 1012 /* Set TX length to DM9000 */ 1013 iow(dm, DM9000_TXPLL, pkt_len); 1014 iow(dm, DM9000_TXPLH, pkt_len >> 8); 1015 1016 /* Issue TX polling command */ 1017 iow(dm, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */ 1018 } 1019 1020 /* 1021 * Hardware start transmission. 1022 * Send a packet to media from the upper layer. 1023 */ 1024 static int 1025 dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev) 1026 { 1027 unsigned long flags; 1028 struct board_info *db = netdev_priv(dev); 1029 1030 dm9000_dbg(db, 3, "%s:\n", __func__); 1031 1032 if (db->tx_pkt_cnt > 1) 1033 return NETDEV_TX_BUSY; 1034 1035 spin_lock_irqsave(&db->lock, flags); 1036 1037 /* Move data to DM9000 TX RAM */ 1038 writeb(DM9000_MWCMD, db->io_addr); 1039 1040 (db->outblk)(db->io_data, skb->data, skb->len); 1041 dev->stats.tx_bytes += skb->len; 1042 1043 db->tx_pkt_cnt++; 1044 /* TX control: First packet immediately send, second packet queue */ 1045 if (db->tx_pkt_cnt == 1) { 1046 dm9000_send_packet(dev, skb->ip_summed, skb->len); 1047 } else { 1048 /* Second packet */ 1049 db->queue_pkt_len = skb->len; 1050 db->queue_ip_summed = skb->ip_summed; 1051 netif_stop_queue(dev); 1052 } 1053 1054 spin_unlock_irqrestore(&db->lock, flags); 1055 1056 /* free this SKB */ 1057 dev_consume_skb_any(skb); 1058 1059 return NETDEV_TX_OK; 1060 } 1061 1062 /* 1063 * DM9000 interrupt handler 1064 * receive the packet to upper layer, free the transmitted packet 1065 */ 1066 1067 static void dm9000_tx_done(struct net_device *dev, struct board_info *db) 1068 { 1069 int tx_status = ior(db, DM9000_NSR); /* Got TX status */ 1070 1071 if (tx_status & (NSR_TX2END | NSR_TX1END)) { 1072 /* One packet sent complete */ 1073 db->tx_pkt_cnt--; 1074 dev->stats.tx_packets++; 1075 1076 if (netif_msg_tx_done(db)) 1077 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status); 1078 1079 /* Queue packet check & send */ 1080 if (db->tx_pkt_cnt > 0) 1081 dm9000_send_packet(dev, db->queue_ip_summed, 1082 db->queue_pkt_len); 1083 netif_wake_queue(dev); 1084 } 1085 } 1086 1087 struct dm9000_rxhdr { 1088 u8 RxPktReady; 1089 u8 RxStatus; 1090 __le16 RxLen; 1091 } __packed; 1092 1093 /* 1094 * Received a packet and pass to upper layer 1095 */ 1096 static void 1097 dm9000_rx(struct net_device *dev) 1098 { 1099 struct board_info *db = netdev_priv(dev); 1100 struct dm9000_rxhdr rxhdr; 1101 struct sk_buff *skb; 1102 u8 rxbyte, *rdptr; 1103 bool GoodPacket; 1104 int RxLen; 1105 1106 /* Check packet ready or not */ 1107 do { 1108 ior(db, DM9000_MRCMDX); /* Dummy read */ 1109 1110 /* Get most updated data */ 1111 rxbyte = readb(db->io_data); 1112 1113 /* Status check: this byte must be 0 or 1 */ 1114 if (rxbyte & DM9000_PKT_ERR) { 1115 dev_warn(db->dev, "status check fail: %d\n", rxbyte); 1116 iow(db, DM9000_RCR, 0x00); /* Stop Device */ 1117 return; 1118 } 1119 1120 if (!(rxbyte & DM9000_PKT_RDY)) 1121 return; 1122 1123 /* A packet ready now & Get status/length */ 1124 GoodPacket = true; 1125 writeb(DM9000_MRCMD, db->io_addr); 1126 1127 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr)); 1128 1129 RxLen = le16_to_cpu(rxhdr.RxLen); 1130 1131 if (netif_msg_rx_status(db)) 1132 dev_dbg(db->dev, "RX: status %02x, length %04x\n", 1133 rxhdr.RxStatus, RxLen); 1134 1135 /* Packet Status check */ 1136 if (RxLen < 0x40) { 1137 GoodPacket = false; 1138 if (netif_msg_rx_err(db)) 1139 dev_dbg(db->dev, "RX: Bad Packet (runt)\n"); 1140 } 1141 1142 if (RxLen > DM9000_PKT_MAX) { 1143 dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen); 1144 } 1145 1146 /* rxhdr.RxStatus is identical to RSR register. */ 1147 if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE | 1148 RSR_PLE | RSR_RWTO | 1149 RSR_LCS | RSR_RF)) { 1150 GoodPacket = false; 1151 if (rxhdr.RxStatus & RSR_FOE) { 1152 if (netif_msg_rx_err(db)) 1153 dev_dbg(db->dev, "fifo error\n"); 1154 dev->stats.rx_fifo_errors++; 1155 } 1156 if (rxhdr.RxStatus & RSR_CE) { 1157 if (netif_msg_rx_err(db)) 1158 dev_dbg(db->dev, "crc error\n"); 1159 dev->stats.rx_crc_errors++; 1160 } 1161 if (rxhdr.RxStatus & RSR_RF) { 1162 if (netif_msg_rx_err(db)) 1163 dev_dbg(db->dev, "length error\n"); 1164 dev->stats.rx_length_errors++; 1165 } 1166 } 1167 1168 /* Move data from DM9000 */ 1169 if (GoodPacket && 1170 ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) { 1171 skb_reserve(skb, 2); 1172 rdptr = (u8 *) skb_put(skb, RxLen - 4); 1173 1174 /* Read received packet from RX SRAM */ 1175 1176 (db->inblk)(db->io_data, rdptr, RxLen); 1177 dev->stats.rx_bytes += RxLen; 1178 1179 /* Pass to upper layer */ 1180 skb->protocol = eth_type_trans(skb, dev); 1181 if (dev->features & NETIF_F_RXCSUM) { 1182 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0) 1183 skb->ip_summed = CHECKSUM_UNNECESSARY; 1184 else 1185 skb_checksum_none_assert(skb); 1186 } 1187 netif_rx(skb); 1188 dev->stats.rx_packets++; 1189 1190 } else { 1191 /* need to dump the packet's data */ 1192 1193 (db->dumpblk)(db->io_data, RxLen); 1194 } 1195 } while (rxbyte & DM9000_PKT_RDY); 1196 } 1197 1198 static irqreturn_t dm9000_interrupt(int irq, void *dev_id) 1199 { 1200 struct net_device *dev = dev_id; 1201 struct board_info *db = netdev_priv(dev); 1202 int int_status; 1203 unsigned long flags; 1204 u8 reg_save; 1205 1206 dm9000_dbg(db, 3, "entering %s\n", __func__); 1207 1208 /* A real interrupt coming */ 1209 1210 /* holders of db->lock must always block IRQs */ 1211 spin_lock_irqsave(&db->lock, flags); 1212 1213 /* Save previous register address */ 1214 reg_save = readb(db->io_addr); 1215 1216 dm9000_mask_interrupts(db); 1217 /* Got DM9000 interrupt status */ 1218 int_status = ior(db, DM9000_ISR); /* Got ISR */ 1219 iow(db, DM9000_ISR, int_status); /* Clear ISR status */ 1220 1221 if (netif_msg_intr(db)) 1222 dev_dbg(db->dev, "interrupt status %02x\n", int_status); 1223 1224 /* Received the coming packet */ 1225 if (int_status & ISR_PRS) 1226 dm9000_rx(dev); 1227 1228 /* Transmit Interrupt check */ 1229 if (int_status & ISR_PTS) 1230 dm9000_tx_done(dev, db); 1231 1232 if (db->type != TYPE_DM9000E) { 1233 if (int_status & ISR_LNKCHNG) { 1234 /* fire a link-change request */ 1235 schedule_delayed_work(&db->phy_poll, 1); 1236 } 1237 } 1238 1239 dm9000_unmask_interrupts(db); 1240 /* Restore previous register address */ 1241 writeb(reg_save, db->io_addr); 1242 1243 spin_unlock_irqrestore(&db->lock, flags); 1244 1245 return IRQ_HANDLED; 1246 } 1247 1248 static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id) 1249 { 1250 struct net_device *dev = dev_id; 1251 struct board_info *db = netdev_priv(dev); 1252 unsigned long flags; 1253 unsigned nsr, wcr; 1254 1255 spin_lock_irqsave(&db->lock, flags); 1256 1257 nsr = ior(db, DM9000_NSR); 1258 wcr = ior(db, DM9000_WCR); 1259 1260 dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr); 1261 1262 if (nsr & NSR_WAKEST) { 1263 /* clear, so we can avoid */ 1264 iow(db, DM9000_NSR, NSR_WAKEST); 1265 1266 if (wcr & WCR_LINKST) 1267 dev_info(db->dev, "wake by link status change\n"); 1268 if (wcr & WCR_SAMPLEST) 1269 dev_info(db->dev, "wake by sample packet\n"); 1270 if (wcr & WCR_MAGICST) 1271 dev_info(db->dev, "wake by magic packet\n"); 1272 if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST))) 1273 dev_err(db->dev, "wake signalled with no reason? " 1274 "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr); 1275 } 1276 1277 spin_unlock_irqrestore(&db->lock, flags); 1278 1279 return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE; 1280 } 1281 1282 #ifdef CONFIG_NET_POLL_CONTROLLER 1283 /* 1284 *Used by netconsole 1285 */ 1286 static void dm9000_poll_controller(struct net_device *dev) 1287 { 1288 disable_irq(dev->irq); 1289 dm9000_interrupt(dev->irq, dev); 1290 enable_irq(dev->irq); 1291 } 1292 #endif 1293 1294 /* 1295 * Open the interface. 1296 * The interface is opened whenever "ifconfig" actives it. 1297 */ 1298 static int 1299 dm9000_open(struct net_device *dev) 1300 { 1301 struct board_info *db = netdev_priv(dev); 1302 1303 if (netif_msg_ifup(db)) 1304 dev_dbg(db->dev, "enabling %s\n", dev->name); 1305 1306 /* If there is no IRQ type specified, tell the user that this is a 1307 * problem 1308 */ 1309 if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE) 1310 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); 1311 1312 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ 1313 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ 1314 mdelay(1); /* delay needs by DM9000B */ 1315 1316 /* Initialize DM9000 board */ 1317 dm9000_init_dm9000(dev); 1318 1319 if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED, 1320 dev->name, dev)) 1321 return -EAGAIN; 1322 /* Now that we have an interrupt handler hooked up we can unmask 1323 * our interrupts 1324 */ 1325 dm9000_unmask_interrupts(db); 1326 1327 /* Init driver variable */ 1328 db->dbug_cnt = 0; 1329 1330 mii_check_media(&db->mii, netif_msg_link(db), 1); 1331 netif_start_queue(dev); 1332 1333 /* Poll initial link status */ 1334 schedule_delayed_work(&db->phy_poll, 1); 1335 1336 return 0; 1337 } 1338 1339 static void 1340 dm9000_shutdown(struct net_device *dev) 1341 { 1342 struct board_info *db = netdev_priv(dev); 1343 1344 /* RESET device */ 1345 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ 1346 iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */ 1347 dm9000_mask_interrupts(db); 1348 iow(db, DM9000_RCR, 0x00); /* Disable RX */ 1349 } 1350 1351 /* 1352 * Stop the interface. 1353 * The interface is stopped when it is brought. 1354 */ 1355 static int 1356 dm9000_stop(struct net_device *ndev) 1357 { 1358 struct board_info *db = netdev_priv(ndev); 1359 1360 if (netif_msg_ifdown(db)) 1361 dev_dbg(db->dev, "shutting down %s\n", ndev->name); 1362 1363 cancel_delayed_work_sync(&db->phy_poll); 1364 1365 netif_stop_queue(ndev); 1366 netif_carrier_off(ndev); 1367 1368 /* free interrupt */ 1369 free_irq(ndev->irq, ndev); 1370 1371 dm9000_shutdown(ndev); 1372 1373 return 0; 1374 } 1375 1376 static const struct net_device_ops dm9000_netdev_ops = { 1377 .ndo_open = dm9000_open, 1378 .ndo_stop = dm9000_stop, 1379 .ndo_start_xmit = dm9000_start_xmit, 1380 .ndo_tx_timeout = dm9000_timeout, 1381 .ndo_set_rx_mode = dm9000_hash_table, 1382 .ndo_do_ioctl = dm9000_ioctl, 1383 .ndo_change_mtu = eth_change_mtu, 1384 .ndo_set_features = dm9000_set_features, 1385 .ndo_validate_addr = eth_validate_addr, 1386 .ndo_set_mac_address = eth_mac_addr, 1387 #ifdef CONFIG_NET_POLL_CONTROLLER 1388 .ndo_poll_controller = dm9000_poll_controller, 1389 #endif 1390 }; 1391 1392 static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) 1393 { 1394 struct dm9000_plat_data *pdata; 1395 struct device_node *np = dev->of_node; 1396 const void *mac_addr; 1397 1398 if (!IS_ENABLED(CONFIG_OF) || !np) 1399 return ERR_PTR(-ENXIO); 1400 1401 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 1402 if (!pdata) 1403 return ERR_PTR(-ENOMEM); 1404 1405 if (of_find_property(np, "davicom,ext-phy", NULL)) 1406 pdata->flags |= DM9000_PLATF_EXT_PHY; 1407 if (of_find_property(np, "davicom,no-eeprom", NULL)) 1408 pdata->flags |= DM9000_PLATF_NO_EEPROM; 1409 1410 mac_addr = of_get_mac_address(np); 1411 if (mac_addr) 1412 memcpy(pdata->dev_addr, mac_addr, sizeof(pdata->dev_addr)); 1413 1414 return pdata; 1415 } 1416 1417 /* 1418 * Search DM9000 board, allocate space and register it 1419 */ 1420 static int 1421 dm9000_probe(struct platform_device *pdev) 1422 { 1423 struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev); 1424 struct board_info *db; /* Point a board information structure */ 1425 struct net_device *ndev; 1426 struct device *dev = &pdev->dev; 1427 const unsigned char *mac_src; 1428 int ret = 0; 1429 int iosize; 1430 int i; 1431 u32 id_val; 1432 int reset_gpios; 1433 enum of_gpio_flags flags; 1434 struct regulator *power; 1435 bool inv_mac_addr = false; 1436 1437 power = devm_regulator_get(dev, "vcc"); 1438 if (IS_ERR(power)) { 1439 if (PTR_ERR(power) == -EPROBE_DEFER) 1440 return -EPROBE_DEFER; 1441 dev_dbg(dev, "no regulator provided\n"); 1442 } else { 1443 ret = regulator_enable(power); 1444 if (ret != 0) { 1445 dev_err(dev, 1446 "Failed to enable power regulator: %d\n", ret); 1447 return ret; 1448 } 1449 dev_dbg(dev, "regulator enabled\n"); 1450 } 1451 1452 reset_gpios = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0, 1453 &flags); 1454 if (gpio_is_valid(reset_gpios)) { 1455 ret = devm_gpio_request_one(dev, reset_gpios, flags, 1456 "dm9000_reset"); 1457 if (ret) { 1458 dev_err(dev, "failed to request reset gpio %d: %d\n", 1459 reset_gpios, ret); 1460 return -ENODEV; 1461 } 1462 1463 /* According to manual PWRST# Low Period Min 1ms */ 1464 msleep(2); 1465 gpio_set_value(reset_gpios, 1); 1466 /* Needs 3ms to read eeprom when PWRST is deasserted */ 1467 msleep(4); 1468 } 1469 1470 if (!pdata) { 1471 pdata = dm9000_parse_dt(&pdev->dev); 1472 if (IS_ERR(pdata)) 1473 return PTR_ERR(pdata); 1474 } 1475 1476 /* Init network device */ 1477 ndev = alloc_etherdev(sizeof(struct board_info)); 1478 if (!ndev) 1479 return -ENOMEM; 1480 1481 SET_NETDEV_DEV(ndev, &pdev->dev); 1482 1483 dev_dbg(&pdev->dev, "dm9000_probe()\n"); 1484 1485 /* setup board info structure */ 1486 db = netdev_priv(ndev); 1487 1488 db->dev = &pdev->dev; 1489 db->ndev = ndev; 1490 1491 spin_lock_init(&db->lock); 1492 mutex_init(&db->addr_lock); 1493 1494 INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work); 1495 1496 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1497 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1498 1499 if (!db->addr_res || !db->data_res) { 1500 dev_err(db->dev, "insufficient resources addr=%p data=%p\n", 1501 db->addr_res, db->data_res); 1502 ret = -ENOENT; 1503 goto out; 1504 } 1505 1506 ndev->irq = platform_get_irq(pdev, 0); 1507 if (ndev->irq < 0) { 1508 dev_err(db->dev, "interrupt resource unavailable: %d\n", 1509 ndev->irq); 1510 ret = ndev->irq; 1511 goto out; 1512 } 1513 1514 db->irq_wake = platform_get_irq(pdev, 1); 1515 if (db->irq_wake >= 0) { 1516 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake); 1517 1518 ret = request_irq(db->irq_wake, dm9000_wol_interrupt, 1519 IRQF_SHARED, dev_name(db->dev), ndev); 1520 if (ret) { 1521 dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret); 1522 } else { 1523 1524 /* test to see if irq is really wakeup capable */ 1525 ret = irq_set_irq_wake(db->irq_wake, 1); 1526 if (ret) { 1527 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n", 1528 db->irq_wake, ret); 1529 ret = 0; 1530 } else { 1531 irq_set_irq_wake(db->irq_wake, 0); 1532 db->wake_supported = 1; 1533 } 1534 } 1535 } 1536 1537 iosize = resource_size(db->addr_res); 1538 db->addr_req = request_mem_region(db->addr_res->start, iosize, 1539 pdev->name); 1540 1541 if (db->addr_req == NULL) { 1542 dev_err(db->dev, "cannot claim address reg area\n"); 1543 ret = -EIO; 1544 goto out; 1545 } 1546 1547 db->io_addr = ioremap(db->addr_res->start, iosize); 1548 1549 if (db->io_addr == NULL) { 1550 dev_err(db->dev, "failed to ioremap address reg\n"); 1551 ret = -EINVAL; 1552 goto out; 1553 } 1554 1555 iosize = resource_size(db->data_res); 1556 db->data_req = request_mem_region(db->data_res->start, iosize, 1557 pdev->name); 1558 1559 if (db->data_req == NULL) { 1560 dev_err(db->dev, "cannot claim data reg area\n"); 1561 ret = -EIO; 1562 goto out; 1563 } 1564 1565 db->io_data = ioremap(db->data_res->start, iosize); 1566 1567 if (db->io_data == NULL) { 1568 dev_err(db->dev, "failed to ioremap data reg\n"); 1569 ret = -EINVAL; 1570 goto out; 1571 } 1572 1573 /* fill in parameters for net-dev structure */ 1574 ndev->base_addr = (unsigned long)db->io_addr; 1575 1576 /* ensure at least we have a default set of IO routines */ 1577 dm9000_set_io(db, iosize); 1578 1579 /* check to see if anything is being over-ridden */ 1580 if (pdata != NULL) { 1581 /* check to see if the driver wants to over-ride the 1582 * default IO width */ 1583 1584 if (pdata->flags & DM9000_PLATF_8BITONLY) 1585 dm9000_set_io(db, 1); 1586 1587 if (pdata->flags & DM9000_PLATF_16BITONLY) 1588 dm9000_set_io(db, 2); 1589 1590 if (pdata->flags & DM9000_PLATF_32BITONLY) 1591 dm9000_set_io(db, 4); 1592 1593 /* check to see if there are any IO routine 1594 * over-rides */ 1595 1596 if (pdata->inblk != NULL) 1597 db->inblk = pdata->inblk; 1598 1599 if (pdata->outblk != NULL) 1600 db->outblk = pdata->outblk; 1601 1602 if (pdata->dumpblk != NULL) 1603 db->dumpblk = pdata->dumpblk; 1604 1605 db->flags = pdata->flags; 1606 } 1607 1608 #ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL 1609 db->flags |= DM9000_PLATF_SIMPLE_PHY; 1610 #endif 1611 1612 dm9000_reset(db); 1613 1614 /* try multiple times, DM9000 sometimes gets the read wrong */ 1615 for (i = 0; i < 8; i++) { 1616 id_val = ior(db, DM9000_VIDL); 1617 id_val |= (u32)ior(db, DM9000_VIDH) << 8; 1618 id_val |= (u32)ior(db, DM9000_PIDL) << 16; 1619 id_val |= (u32)ior(db, DM9000_PIDH) << 24; 1620 1621 if (id_val == DM9000_ID) 1622 break; 1623 dev_err(db->dev, "read wrong id 0x%08x\n", id_val); 1624 } 1625 1626 if (id_val != DM9000_ID) { 1627 dev_err(db->dev, "wrong id: 0x%08x\n", id_val); 1628 ret = -ENODEV; 1629 goto out; 1630 } 1631 1632 /* Identify what type of DM9000 we are working on */ 1633 1634 id_val = ior(db, DM9000_CHIPR); 1635 dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val); 1636 1637 switch (id_val) { 1638 case CHIPR_DM9000A: 1639 db->type = TYPE_DM9000A; 1640 break; 1641 case CHIPR_DM9000B: 1642 db->type = TYPE_DM9000B; 1643 break; 1644 default: 1645 dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val); 1646 db->type = TYPE_DM9000E; 1647 } 1648 1649 /* dm9000a/b are capable of hardware checksum offload */ 1650 if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) { 1651 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM; 1652 ndev->features |= ndev->hw_features; 1653 } 1654 1655 /* from this point we assume that we have found a DM9000 */ 1656 1657 ndev->netdev_ops = &dm9000_netdev_ops; 1658 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 1659 ndev->ethtool_ops = &dm9000_ethtool_ops; 1660 1661 db->msg_enable = NETIF_MSG_LINK; 1662 db->mii.phy_id_mask = 0x1f; 1663 db->mii.reg_num_mask = 0x1f; 1664 db->mii.force_media = 0; 1665 db->mii.full_duplex = 0; 1666 db->mii.dev = ndev; 1667 db->mii.mdio_read = dm9000_phy_read; 1668 db->mii.mdio_write = dm9000_phy_write; 1669 1670 mac_src = "eeprom"; 1671 1672 /* try reading the node address from the attached EEPROM */ 1673 for (i = 0; i < 6; i += 2) 1674 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i); 1675 1676 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) { 1677 mac_src = "platform data"; 1678 memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN); 1679 } 1680 1681 if (!is_valid_ether_addr(ndev->dev_addr)) { 1682 /* try reading from mac */ 1683 1684 mac_src = "chip"; 1685 for (i = 0; i < 6; i++) 1686 ndev->dev_addr[i] = ior(db, i+DM9000_PAR); 1687 } 1688 1689 if (!is_valid_ether_addr(ndev->dev_addr)) { 1690 inv_mac_addr = true; 1691 eth_hw_addr_random(ndev); 1692 mac_src = "random"; 1693 } 1694 1695 1696 platform_set_drvdata(pdev, ndev); 1697 ret = register_netdev(ndev); 1698 1699 if (ret == 0) { 1700 if (inv_mac_addr) 1701 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please set using ip\n", 1702 ndev->name); 1703 printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n", 1704 ndev->name, dm9000_type_to_char(db->type), 1705 db->io_addr, db->io_data, ndev->irq, 1706 ndev->dev_addr, mac_src); 1707 } 1708 return 0; 1709 1710 out: 1711 dev_err(db->dev, "not found (%d).\n", ret); 1712 1713 dm9000_release_board(pdev, db); 1714 free_netdev(ndev); 1715 1716 return ret; 1717 } 1718 1719 static int 1720 dm9000_drv_suspend(struct device *dev) 1721 { 1722 struct platform_device *pdev = to_platform_device(dev); 1723 struct net_device *ndev = platform_get_drvdata(pdev); 1724 struct board_info *db; 1725 1726 if (ndev) { 1727 db = netdev_priv(ndev); 1728 db->in_suspend = 1; 1729 1730 if (!netif_running(ndev)) 1731 return 0; 1732 1733 netif_device_detach(ndev); 1734 1735 /* only shutdown if not using WoL */ 1736 if (!db->wake_state) 1737 dm9000_shutdown(ndev); 1738 } 1739 return 0; 1740 } 1741 1742 static int 1743 dm9000_drv_resume(struct device *dev) 1744 { 1745 struct platform_device *pdev = to_platform_device(dev); 1746 struct net_device *ndev = platform_get_drvdata(pdev); 1747 struct board_info *db = netdev_priv(ndev); 1748 1749 if (ndev) { 1750 if (netif_running(ndev)) { 1751 /* reset if we were not in wake mode to ensure if 1752 * the device was powered off it is in a known state */ 1753 if (!db->wake_state) { 1754 dm9000_init_dm9000(ndev); 1755 dm9000_unmask_interrupts(db); 1756 } 1757 1758 netif_device_attach(ndev); 1759 } 1760 1761 db->in_suspend = 0; 1762 } 1763 return 0; 1764 } 1765 1766 static const struct dev_pm_ops dm9000_drv_pm_ops = { 1767 .suspend = dm9000_drv_suspend, 1768 .resume = dm9000_drv_resume, 1769 }; 1770 1771 static int 1772 dm9000_drv_remove(struct platform_device *pdev) 1773 { 1774 struct net_device *ndev = platform_get_drvdata(pdev); 1775 1776 unregister_netdev(ndev); 1777 dm9000_release_board(pdev, netdev_priv(ndev)); 1778 free_netdev(ndev); /* free device structure */ 1779 1780 dev_dbg(&pdev->dev, "released and freed device\n"); 1781 return 0; 1782 } 1783 1784 #ifdef CONFIG_OF 1785 static const struct of_device_id dm9000_of_matches[] = { 1786 { .compatible = "davicom,dm9000", }, 1787 { /* sentinel */ } 1788 }; 1789 MODULE_DEVICE_TABLE(of, dm9000_of_matches); 1790 #endif 1791 1792 static struct platform_driver dm9000_driver = { 1793 .driver = { 1794 .name = "dm9000", 1795 .pm = &dm9000_drv_pm_ops, 1796 .of_match_table = of_match_ptr(dm9000_of_matches), 1797 }, 1798 .probe = dm9000_probe, 1799 .remove = dm9000_drv_remove, 1800 }; 1801 1802 module_platform_driver(dm9000_driver); 1803 1804 MODULE_AUTHOR("Sascha Hauer, Ben Dooks"); 1805 MODULE_DESCRIPTION("Davicom DM9000 network driver"); 1806 MODULE_LICENSE("GPL"); 1807 MODULE_ALIAS("platform:dm9000"); 1808