1 /* 2 * Davicom DM9000 Fast Ethernet driver for Linux. 3 * Copyright (C) 1997 Sten Wang 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 2 8 * of the License, or (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved. 16 * 17 * Additional updates, Copyright: 18 * Ben Dooks <ben@simtec.co.uk> 19 * Sascha Hauer <s.hauer@pengutronix.de> 20 */ 21 22 #include <linux/module.h> 23 #include <linux/ioport.h> 24 #include <linux/netdevice.h> 25 #include <linux/etherdevice.h> 26 #include <linux/interrupt.h> 27 #include <linux/skbuff.h> 28 #include <linux/spinlock.h> 29 #include <linux/crc32.h> 30 #include <linux/mii.h> 31 #include <linux/of.h> 32 #include <linux/of_net.h> 33 #include <linux/ethtool.h> 34 #include <linux/dm9000.h> 35 #include <linux/delay.h> 36 #include <linux/platform_device.h> 37 #include <linux/irq.h> 38 #include <linux/slab.h> 39 40 #include <asm/delay.h> 41 #include <asm/irq.h> 42 #include <asm/io.h> 43 44 #include "dm9000.h" 45 46 /* Board/System/Debug information/definition ---------------- */ 47 48 #define DM9000_PHY 0x40 /* PHY address 0x01 */ 49 50 #define CARDNAME "dm9000" 51 #define DRV_VERSION "1.31" 52 53 /* 54 * Transmit timeout, default 5 seconds. 55 */ 56 static int watchdog = 5000; 57 module_param(watchdog, int, 0400); 58 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); 59 60 /* 61 * Debug messages level 62 */ 63 static int debug; 64 module_param(debug, int, 0644); 65 MODULE_PARM_DESC(debug, "dm9000 debug level (0-4)"); 66 67 /* DM9000 register address locking. 68 * 69 * The DM9000 uses an address register to control where data written 70 * to the data register goes. This means that the address register 71 * must be preserved over interrupts or similar calls. 72 * 73 * During interrupt and other critical calls, a spinlock is used to 74 * protect the system, but the calls themselves save the address 75 * in the address register in case they are interrupting another 76 * access to the device. 77 * 78 * For general accesses a lock is provided so that calls which are 79 * allowed to sleep are serialised so that the address register does 80 * not need to be saved. This lock also serves to serialise access 81 * to the EEPROM and PHY access registers which are shared between 82 * these two devices. 83 */ 84 85 /* The driver supports the original DM9000E, and now the two newer 86 * devices, DM9000A and DM9000B. 87 */ 88 89 enum dm9000_type { 90 TYPE_DM9000E, /* original DM9000 */ 91 TYPE_DM9000A, 92 TYPE_DM9000B 93 }; 94 95 /* Structure/enum declaration ------------------------------- */ 96 typedef struct board_info { 97 98 void __iomem *io_addr; /* Register I/O base address */ 99 void __iomem *io_data; /* Data I/O address */ 100 u16 irq; /* IRQ */ 101 102 u16 tx_pkt_cnt; 103 u16 queue_pkt_len; 104 u16 queue_start_addr; 105 u16 queue_ip_summed; 106 u16 dbug_cnt; 107 u8 io_mode; /* 0:word, 2:byte */ 108 u8 phy_addr; 109 u8 imr_all; 110 111 unsigned int flags; 112 unsigned int in_timeout:1; 113 unsigned int in_suspend:1; 114 unsigned int wake_supported:1; 115 116 enum dm9000_type type; 117 118 void (*inblk)(void __iomem *port, void *data, int length); 119 void (*outblk)(void __iomem *port, void *data, int length); 120 void (*dumpblk)(void __iomem *port, int length); 121 122 struct device *dev; /* parent device */ 123 124 struct resource *addr_res; /* resources found */ 125 struct resource *data_res; 126 struct resource *addr_req; /* resources requested */ 127 struct resource *data_req; 128 struct resource *irq_res; 129 130 int irq_wake; 131 132 struct mutex addr_lock; /* phy and eeprom access lock */ 133 134 struct delayed_work phy_poll; 135 struct net_device *ndev; 136 137 spinlock_t lock; 138 139 struct mii_if_info mii; 140 u32 msg_enable; 141 u32 wake_state; 142 143 int ip_summed; 144 } board_info_t; 145 146 /* debug code */ 147 148 #define dm9000_dbg(db, lev, msg...) do { \ 149 if ((lev) < debug) { \ 150 dev_dbg(db->dev, msg); \ 151 } \ 152 } while (0) 153 154 static inline board_info_t *to_dm9000_board(struct net_device *dev) 155 { 156 return netdev_priv(dev); 157 } 158 159 /* DM9000 network board routine ---------------------------- */ 160 161 /* 162 * Read a byte from I/O port 163 */ 164 static u8 165 ior(board_info_t *db, int reg) 166 { 167 writeb(reg, db->io_addr); 168 return readb(db->io_data); 169 } 170 171 /* 172 * Write a byte to I/O port 173 */ 174 175 static void 176 iow(board_info_t *db, int reg, int value) 177 { 178 writeb(reg, db->io_addr); 179 writeb(value, db->io_data); 180 } 181 182 static void 183 dm9000_reset(board_info_t *db) 184 { 185 dev_dbg(db->dev, "resetting device\n"); 186 187 /* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29 188 * The essential point is that we have to do a double reset, and the 189 * instruction is to set LBK into MAC internal loopback mode. 190 */ 191 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK); 192 udelay(100); /* Application note says at least 20 us */ 193 if (ior(db, DM9000_NCR) & 1) 194 dev_err(db->dev, "dm9000 did not respond to first reset\n"); 195 196 iow(db, DM9000_NCR, 0); 197 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK); 198 udelay(100); 199 if (ior(db, DM9000_NCR) & 1) 200 dev_err(db->dev, "dm9000 did not respond to second reset\n"); 201 } 202 203 /* routines for sending block to chip */ 204 205 static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count) 206 { 207 iowrite8_rep(reg, data, count); 208 } 209 210 static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count) 211 { 212 iowrite16_rep(reg, data, (count+1) >> 1); 213 } 214 215 static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count) 216 { 217 iowrite32_rep(reg, data, (count+3) >> 2); 218 } 219 220 /* input block from chip to memory */ 221 222 static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count) 223 { 224 ioread8_rep(reg, data, count); 225 } 226 227 228 static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count) 229 { 230 ioread16_rep(reg, data, (count+1) >> 1); 231 } 232 233 static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count) 234 { 235 ioread32_rep(reg, data, (count+3) >> 2); 236 } 237 238 /* dump block from chip to null */ 239 240 static void dm9000_dumpblk_8bit(void __iomem *reg, int count) 241 { 242 int i; 243 int tmp; 244 245 for (i = 0; i < count; i++) 246 tmp = readb(reg); 247 } 248 249 static void dm9000_dumpblk_16bit(void __iomem *reg, int count) 250 { 251 int i; 252 int tmp; 253 254 count = (count + 1) >> 1; 255 256 for (i = 0; i < count; i++) 257 tmp = readw(reg); 258 } 259 260 static void dm9000_dumpblk_32bit(void __iomem *reg, int count) 261 { 262 int i; 263 int tmp; 264 265 count = (count + 3) >> 2; 266 267 for (i = 0; i < count; i++) 268 tmp = readl(reg); 269 } 270 271 /* 272 * Sleep, either by using msleep() or if we are suspending, then 273 * use mdelay() to sleep. 274 */ 275 static void dm9000_msleep(board_info_t *db, unsigned int ms) 276 { 277 if (db->in_suspend || db->in_timeout) 278 mdelay(ms); 279 else 280 msleep(ms); 281 } 282 283 /* Read a word from phyxcer */ 284 static int 285 dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) 286 { 287 board_info_t *db = netdev_priv(dev); 288 unsigned long flags; 289 unsigned int reg_save; 290 int ret; 291 292 mutex_lock(&db->addr_lock); 293 294 spin_lock_irqsave(&db->lock, flags); 295 296 /* Save previous register address */ 297 reg_save = readb(db->io_addr); 298 299 /* Fill the phyxcer register into REG_0C */ 300 iow(db, DM9000_EPAR, DM9000_PHY | reg); 301 302 /* Issue phyxcer read command */ 303 iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); 304 305 writeb(reg_save, db->io_addr); 306 spin_unlock_irqrestore(&db->lock, flags); 307 308 dm9000_msleep(db, 1); /* Wait read complete */ 309 310 spin_lock_irqsave(&db->lock, flags); 311 reg_save = readb(db->io_addr); 312 313 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ 314 315 /* The read data keeps on REG_0D & REG_0E */ 316 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); 317 318 /* restore the previous address */ 319 writeb(reg_save, db->io_addr); 320 spin_unlock_irqrestore(&db->lock, flags); 321 322 mutex_unlock(&db->addr_lock); 323 324 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); 325 return ret; 326 } 327 328 /* Write a word to phyxcer */ 329 static void 330 dm9000_phy_write(struct net_device *dev, 331 int phyaddr_unused, int reg, int value) 332 { 333 board_info_t *db = netdev_priv(dev); 334 unsigned long flags; 335 unsigned long reg_save; 336 337 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); 338 if (!db->in_timeout) 339 mutex_lock(&db->addr_lock); 340 341 spin_lock_irqsave(&db->lock, flags); 342 343 /* Save previous register address */ 344 reg_save = readb(db->io_addr); 345 346 /* Fill the phyxcer register into REG_0C */ 347 iow(db, DM9000_EPAR, DM9000_PHY | reg); 348 349 /* Fill the written data into REG_0D & REG_0E */ 350 iow(db, DM9000_EPDRL, value); 351 iow(db, DM9000_EPDRH, value >> 8); 352 353 /* Issue phyxcer write command */ 354 iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); 355 356 writeb(reg_save, db->io_addr); 357 spin_unlock_irqrestore(&db->lock, flags); 358 359 dm9000_msleep(db, 1); /* Wait write complete */ 360 361 spin_lock_irqsave(&db->lock, flags); 362 reg_save = readb(db->io_addr); 363 364 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ 365 366 /* restore the previous address */ 367 writeb(reg_save, db->io_addr); 368 369 spin_unlock_irqrestore(&db->lock, flags); 370 if (!db->in_timeout) 371 mutex_unlock(&db->addr_lock); 372 } 373 374 /* dm9000_set_io 375 * 376 * select the specified set of io routines to use with the 377 * device 378 */ 379 380 static void dm9000_set_io(struct board_info *db, int byte_width) 381 { 382 /* use the size of the data resource to work out what IO 383 * routines we want to use 384 */ 385 386 switch (byte_width) { 387 case 1: 388 db->dumpblk = dm9000_dumpblk_8bit; 389 db->outblk = dm9000_outblk_8bit; 390 db->inblk = dm9000_inblk_8bit; 391 break; 392 393 394 case 3: 395 dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n"); 396 case 2: 397 db->dumpblk = dm9000_dumpblk_16bit; 398 db->outblk = dm9000_outblk_16bit; 399 db->inblk = dm9000_inblk_16bit; 400 break; 401 402 case 4: 403 default: 404 db->dumpblk = dm9000_dumpblk_32bit; 405 db->outblk = dm9000_outblk_32bit; 406 db->inblk = dm9000_inblk_32bit; 407 break; 408 } 409 } 410 411 static void dm9000_schedule_poll(board_info_t *db) 412 { 413 if (db->type == TYPE_DM9000E) 414 schedule_delayed_work(&db->phy_poll, HZ * 2); 415 } 416 417 static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 418 { 419 board_info_t *dm = to_dm9000_board(dev); 420 421 if (!netif_running(dev)) 422 return -EINVAL; 423 424 return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL); 425 } 426 427 static unsigned int 428 dm9000_read_locked(board_info_t *db, int reg) 429 { 430 unsigned long flags; 431 unsigned int ret; 432 433 spin_lock_irqsave(&db->lock, flags); 434 ret = ior(db, reg); 435 spin_unlock_irqrestore(&db->lock, flags); 436 437 return ret; 438 } 439 440 static int dm9000_wait_eeprom(board_info_t *db) 441 { 442 unsigned int status; 443 int timeout = 8; /* wait max 8msec */ 444 445 /* The DM9000 data sheets say we should be able to 446 * poll the ERRE bit in EPCR to wait for the EEPROM 447 * operation. From testing several chips, this bit 448 * does not seem to work. 449 * 450 * We attempt to use the bit, but fall back to the 451 * timeout (which is why we do not return an error 452 * on expiry) to say that the EEPROM operation has 453 * completed. 454 */ 455 456 while (1) { 457 status = dm9000_read_locked(db, DM9000_EPCR); 458 459 if ((status & EPCR_ERRE) == 0) 460 break; 461 462 msleep(1); 463 464 if (timeout-- < 0) { 465 dev_dbg(db->dev, "timeout waiting EEPROM\n"); 466 break; 467 } 468 } 469 470 return 0; 471 } 472 473 /* 474 * Read a word data from EEPROM 475 */ 476 static void 477 dm9000_read_eeprom(board_info_t *db, int offset, u8 *to) 478 { 479 unsigned long flags; 480 481 if (db->flags & DM9000_PLATF_NO_EEPROM) { 482 to[0] = 0xff; 483 to[1] = 0xff; 484 return; 485 } 486 487 mutex_lock(&db->addr_lock); 488 489 spin_lock_irqsave(&db->lock, flags); 490 491 iow(db, DM9000_EPAR, offset); 492 iow(db, DM9000_EPCR, EPCR_ERPRR); 493 494 spin_unlock_irqrestore(&db->lock, flags); 495 496 dm9000_wait_eeprom(db); 497 498 /* delay for at-least 150uS */ 499 msleep(1); 500 501 spin_lock_irqsave(&db->lock, flags); 502 503 iow(db, DM9000_EPCR, 0x0); 504 505 to[0] = ior(db, DM9000_EPDRL); 506 to[1] = ior(db, DM9000_EPDRH); 507 508 spin_unlock_irqrestore(&db->lock, flags); 509 510 mutex_unlock(&db->addr_lock); 511 } 512 513 /* 514 * Write a word data to SROM 515 */ 516 static void 517 dm9000_write_eeprom(board_info_t *db, int offset, u8 *data) 518 { 519 unsigned long flags; 520 521 if (db->flags & DM9000_PLATF_NO_EEPROM) 522 return; 523 524 mutex_lock(&db->addr_lock); 525 526 spin_lock_irqsave(&db->lock, flags); 527 iow(db, DM9000_EPAR, offset); 528 iow(db, DM9000_EPDRH, data[1]); 529 iow(db, DM9000_EPDRL, data[0]); 530 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW); 531 spin_unlock_irqrestore(&db->lock, flags); 532 533 dm9000_wait_eeprom(db); 534 535 mdelay(1); /* wait at least 150uS to clear */ 536 537 spin_lock_irqsave(&db->lock, flags); 538 iow(db, DM9000_EPCR, 0); 539 spin_unlock_irqrestore(&db->lock, flags); 540 541 mutex_unlock(&db->addr_lock); 542 } 543 544 /* ethtool ops */ 545 546 static void dm9000_get_drvinfo(struct net_device *dev, 547 struct ethtool_drvinfo *info) 548 { 549 board_info_t *dm = to_dm9000_board(dev); 550 551 strlcpy(info->driver, CARDNAME, sizeof(info->driver)); 552 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 553 strlcpy(info->bus_info, to_platform_device(dm->dev)->name, 554 sizeof(info->bus_info)); 555 } 556 557 static u32 dm9000_get_msglevel(struct net_device *dev) 558 { 559 board_info_t *dm = to_dm9000_board(dev); 560 561 return dm->msg_enable; 562 } 563 564 static void dm9000_set_msglevel(struct net_device *dev, u32 value) 565 { 566 board_info_t *dm = to_dm9000_board(dev); 567 568 dm->msg_enable = value; 569 } 570 571 static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 572 { 573 board_info_t *dm = to_dm9000_board(dev); 574 575 mii_ethtool_gset(&dm->mii, cmd); 576 return 0; 577 } 578 579 static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 580 { 581 board_info_t *dm = to_dm9000_board(dev); 582 583 return mii_ethtool_sset(&dm->mii, cmd); 584 } 585 586 static int dm9000_nway_reset(struct net_device *dev) 587 { 588 board_info_t *dm = to_dm9000_board(dev); 589 return mii_nway_restart(&dm->mii); 590 } 591 592 static int dm9000_set_features(struct net_device *dev, 593 netdev_features_t features) 594 { 595 board_info_t *dm = to_dm9000_board(dev); 596 netdev_features_t changed = dev->features ^ features; 597 unsigned long flags; 598 599 if (!(changed & NETIF_F_RXCSUM)) 600 return 0; 601 602 spin_lock_irqsave(&dm->lock, flags); 603 iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0); 604 spin_unlock_irqrestore(&dm->lock, flags); 605 606 return 0; 607 } 608 609 static u32 dm9000_get_link(struct net_device *dev) 610 { 611 board_info_t *dm = to_dm9000_board(dev); 612 u32 ret; 613 614 if (dm->flags & DM9000_PLATF_EXT_PHY) 615 ret = mii_link_ok(&dm->mii); 616 else 617 ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0; 618 619 return ret; 620 } 621 622 #define DM_EEPROM_MAGIC (0x444D394B) 623 624 static int dm9000_get_eeprom_len(struct net_device *dev) 625 { 626 return 128; 627 } 628 629 static int dm9000_get_eeprom(struct net_device *dev, 630 struct ethtool_eeprom *ee, u8 *data) 631 { 632 board_info_t *dm = to_dm9000_board(dev); 633 int offset = ee->offset; 634 int len = ee->len; 635 int i; 636 637 /* EEPROM access is aligned to two bytes */ 638 639 if ((len & 1) != 0 || (offset & 1) != 0) 640 return -EINVAL; 641 642 if (dm->flags & DM9000_PLATF_NO_EEPROM) 643 return -ENOENT; 644 645 ee->magic = DM_EEPROM_MAGIC; 646 647 for (i = 0; i < len; i += 2) 648 dm9000_read_eeprom(dm, (offset + i) / 2, data + i); 649 650 return 0; 651 } 652 653 static int dm9000_set_eeprom(struct net_device *dev, 654 struct ethtool_eeprom *ee, u8 *data) 655 { 656 board_info_t *dm = to_dm9000_board(dev); 657 int offset = ee->offset; 658 int len = ee->len; 659 int done; 660 661 /* EEPROM access is aligned to two bytes */ 662 663 if (dm->flags & DM9000_PLATF_NO_EEPROM) 664 return -ENOENT; 665 666 if (ee->magic != DM_EEPROM_MAGIC) 667 return -EINVAL; 668 669 while (len > 0) { 670 if (len & 1 || offset & 1) { 671 int which = offset & 1; 672 u8 tmp[2]; 673 674 dm9000_read_eeprom(dm, offset / 2, tmp); 675 tmp[which] = *data; 676 dm9000_write_eeprom(dm, offset / 2, tmp); 677 678 done = 1; 679 } else { 680 dm9000_write_eeprom(dm, offset / 2, data); 681 done = 2; 682 } 683 684 data += done; 685 offset += done; 686 len -= done; 687 } 688 689 return 0; 690 } 691 692 static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) 693 { 694 board_info_t *dm = to_dm9000_board(dev); 695 696 memset(w, 0, sizeof(struct ethtool_wolinfo)); 697 698 /* note, we could probably support wake-phy too */ 699 w->supported = dm->wake_supported ? WAKE_MAGIC : 0; 700 w->wolopts = dm->wake_state; 701 } 702 703 static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) 704 { 705 board_info_t *dm = to_dm9000_board(dev); 706 unsigned long flags; 707 u32 opts = w->wolopts; 708 u32 wcr = 0; 709 710 if (!dm->wake_supported) 711 return -EOPNOTSUPP; 712 713 if (opts & ~WAKE_MAGIC) 714 return -EINVAL; 715 716 if (opts & WAKE_MAGIC) 717 wcr |= WCR_MAGICEN; 718 719 mutex_lock(&dm->addr_lock); 720 721 spin_lock_irqsave(&dm->lock, flags); 722 iow(dm, DM9000_WCR, wcr); 723 spin_unlock_irqrestore(&dm->lock, flags); 724 725 mutex_unlock(&dm->addr_lock); 726 727 if (dm->wake_state != opts) { 728 /* change in wol state, update IRQ state */ 729 730 if (!dm->wake_state) 731 irq_set_irq_wake(dm->irq_wake, 1); 732 else if (dm->wake_state && !opts) 733 irq_set_irq_wake(dm->irq_wake, 0); 734 } 735 736 dm->wake_state = opts; 737 return 0; 738 } 739 740 static const struct ethtool_ops dm9000_ethtool_ops = { 741 .get_drvinfo = dm9000_get_drvinfo, 742 .get_settings = dm9000_get_settings, 743 .set_settings = dm9000_set_settings, 744 .get_msglevel = dm9000_get_msglevel, 745 .set_msglevel = dm9000_set_msglevel, 746 .nway_reset = dm9000_nway_reset, 747 .get_link = dm9000_get_link, 748 .get_wol = dm9000_get_wol, 749 .set_wol = dm9000_set_wol, 750 .get_eeprom_len = dm9000_get_eeprom_len, 751 .get_eeprom = dm9000_get_eeprom, 752 .set_eeprom = dm9000_set_eeprom, 753 }; 754 755 static void dm9000_show_carrier(board_info_t *db, 756 unsigned carrier, unsigned nsr) 757 { 758 int lpa; 759 struct net_device *ndev = db->ndev; 760 struct mii_if_info *mii = &db->mii; 761 unsigned ncr = dm9000_read_locked(db, DM9000_NCR); 762 763 if (carrier) { 764 lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA); 765 dev_info(db->dev, 766 "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n", 767 ndev->name, (nsr & NSR_SPEED) ? 10 : 100, 768 (ncr & NCR_FDX) ? "full" : "half", lpa); 769 } else { 770 dev_info(db->dev, "%s: link down\n", ndev->name); 771 } 772 } 773 774 static void 775 dm9000_poll_work(struct work_struct *w) 776 { 777 struct delayed_work *dw = to_delayed_work(w); 778 board_info_t *db = container_of(dw, board_info_t, phy_poll); 779 struct net_device *ndev = db->ndev; 780 781 if (db->flags & DM9000_PLATF_SIMPLE_PHY && 782 !(db->flags & DM9000_PLATF_EXT_PHY)) { 783 unsigned nsr = dm9000_read_locked(db, DM9000_NSR); 784 unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0; 785 unsigned new_carrier; 786 787 new_carrier = (nsr & NSR_LINKST) ? 1 : 0; 788 789 if (old_carrier != new_carrier) { 790 if (netif_msg_link(db)) 791 dm9000_show_carrier(db, new_carrier, nsr); 792 793 if (!new_carrier) 794 netif_carrier_off(ndev); 795 else 796 netif_carrier_on(ndev); 797 } 798 } else 799 mii_check_media(&db->mii, netif_msg_link(db), 0); 800 801 if (netif_running(ndev)) 802 dm9000_schedule_poll(db); 803 } 804 805 /* dm9000_release_board 806 * 807 * release a board, and any mapped resources 808 */ 809 810 static void 811 dm9000_release_board(struct platform_device *pdev, struct board_info *db) 812 { 813 /* unmap our resources */ 814 815 iounmap(db->io_addr); 816 iounmap(db->io_data); 817 818 /* release the resources */ 819 820 release_resource(db->data_req); 821 kfree(db->data_req); 822 823 release_resource(db->addr_req); 824 kfree(db->addr_req); 825 } 826 827 static unsigned char dm9000_type_to_char(enum dm9000_type type) 828 { 829 switch (type) { 830 case TYPE_DM9000E: return 'e'; 831 case TYPE_DM9000A: return 'a'; 832 case TYPE_DM9000B: return 'b'; 833 } 834 835 return '?'; 836 } 837 838 /* 839 * Set DM9000 multicast address 840 */ 841 static void 842 dm9000_hash_table_unlocked(struct net_device *dev) 843 { 844 board_info_t *db = netdev_priv(dev); 845 struct netdev_hw_addr *ha; 846 int i, oft; 847 u32 hash_val; 848 u16 hash_table[4] = { 0, 0, 0, 0x8000 }; /* broadcast address */ 849 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN; 850 851 dm9000_dbg(db, 1, "entering %s\n", __func__); 852 853 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++) 854 iow(db, oft, dev->dev_addr[i]); 855 856 if (dev->flags & IFF_PROMISC) 857 rcr |= RCR_PRMSC; 858 859 if (dev->flags & IFF_ALLMULTI) 860 rcr |= RCR_ALL; 861 862 /* the multicast address in Hash Table : 64 bits */ 863 netdev_for_each_mc_addr(ha, dev) { 864 hash_val = ether_crc_le(6, ha->addr) & 0x3f; 865 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); 866 } 867 868 /* Write the hash table to MAC MD table */ 869 for (i = 0, oft = DM9000_MAR; i < 4; i++) { 870 iow(db, oft++, hash_table[i]); 871 iow(db, oft++, hash_table[i] >> 8); 872 } 873 874 iow(db, DM9000_RCR, rcr); 875 } 876 877 static void 878 dm9000_hash_table(struct net_device *dev) 879 { 880 board_info_t *db = netdev_priv(dev); 881 unsigned long flags; 882 883 spin_lock_irqsave(&db->lock, flags); 884 dm9000_hash_table_unlocked(dev); 885 spin_unlock_irqrestore(&db->lock, flags); 886 } 887 888 static void 889 dm9000_mask_interrupts(board_info_t *db) 890 { 891 iow(db, DM9000_IMR, IMR_PAR); 892 } 893 894 static void 895 dm9000_unmask_interrupts(board_info_t *db) 896 { 897 iow(db, DM9000_IMR, db->imr_all); 898 } 899 900 /* 901 * Initialize dm9000 board 902 */ 903 static void 904 dm9000_init_dm9000(struct net_device *dev) 905 { 906 board_info_t *db = netdev_priv(dev); 907 unsigned int imr; 908 unsigned int ncr; 909 910 dm9000_dbg(db, 1, "entering %s\n", __func__); 911 912 dm9000_reset(db); 913 dm9000_mask_interrupts(db); 914 915 /* I/O mode */ 916 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */ 917 918 /* Checksum mode */ 919 if (dev->hw_features & NETIF_F_RXCSUM) 920 iow(db, DM9000_RCSR, 921 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0); 922 923 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 924 iow(db, DM9000_GPR, 0); 925 926 /* If we are dealing with DM9000B, some extra steps are required: a 927 * manual phy reset, and setting init params. 928 */ 929 if (db->type == TYPE_DM9000B) { 930 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); 931 dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); 932 } 933 934 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; 935 936 /* if wol is needed, then always set NCR_WAKEEN otherwise we end 937 * up dumping the wake events if we disable this. There is already 938 * a wake-mask in DM9000_WCR */ 939 if (db->wake_supported) 940 ncr |= NCR_WAKEEN; 941 942 iow(db, DM9000_NCR, ncr); 943 944 /* Program operating register */ 945 iow(db, DM9000_TCR, 0); /* TX Polling clear */ 946 iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */ 947 iow(db, DM9000_FCR, 0xff); /* Flow Control */ 948 iow(db, DM9000_SMCR, 0); /* Special Mode */ 949 /* clear TX status */ 950 iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END); 951 iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */ 952 953 /* Set address filter table */ 954 dm9000_hash_table_unlocked(dev); 955 956 imr = IMR_PAR | IMR_PTM | IMR_PRM; 957 if (db->type != TYPE_DM9000E) 958 imr |= IMR_LNKCHNG; 959 960 db->imr_all = imr; 961 962 /* Init Driver variable */ 963 db->tx_pkt_cnt = 0; 964 db->queue_pkt_len = 0; 965 dev->trans_start = jiffies; 966 } 967 968 /* Our watchdog timed out. Called by the networking layer */ 969 static void dm9000_timeout(struct net_device *dev) 970 { 971 board_info_t *db = netdev_priv(dev); 972 u8 reg_save; 973 unsigned long flags; 974 975 /* Save previous register address */ 976 spin_lock_irqsave(&db->lock, flags); 977 db->in_timeout = 1; 978 reg_save = readb(db->io_addr); 979 980 netif_stop_queue(dev); 981 dm9000_init_dm9000(dev); 982 dm9000_unmask_interrupts(db); 983 /* We can accept TX packets again */ 984 dev->trans_start = jiffies; /* prevent tx timeout */ 985 netif_wake_queue(dev); 986 987 /* Restore previous register address */ 988 writeb(reg_save, db->io_addr); 989 db->in_timeout = 0; 990 spin_unlock_irqrestore(&db->lock, flags); 991 } 992 993 static void dm9000_send_packet(struct net_device *dev, 994 int ip_summed, 995 u16 pkt_len) 996 { 997 board_info_t *dm = to_dm9000_board(dev); 998 999 /* The DM9000 is not smart enough to leave fragmented packets alone. */ 1000 if (dm->ip_summed != ip_summed) { 1001 if (ip_summed == CHECKSUM_NONE) 1002 iow(dm, DM9000_TCCR, 0); 1003 else 1004 iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP); 1005 dm->ip_summed = ip_summed; 1006 } 1007 1008 /* Set TX length to DM9000 */ 1009 iow(dm, DM9000_TXPLL, pkt_len); 1010 iow(dm, DM9000_TXPLH, pkt_len >> 8); 1011 1012 /* Issue TX polling command */ 1013 iow(dm, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */ 1014 } 1015 1016 /* 1017 * Hardware start transmission. 1018 * Send a packet to media from the upper layer. 1019 */ 1020 static int 1021 dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev) 1022 { 1023 unsigned long flags; 1024 board_info_t *db = netdev_priv(dev); 1025 1026 dm9000_dbg(db, 3, "%s:\n", __func__); 1027 1028 if (db->tx_pkt_cnt > 1) 1029 return NETDEV_TX_BUSY; 1030 1031 spin_lock_irqsave(&db->lock, flags); 1032 1033 /* Move data to DM9000 TX RAM */ 1034 writeb(DM9000_MWCMD, db->io_addr); 1035 1036 (db->outblk)(db->io_data, skb->data, skb->len); 1037 dev->stats.tx_bytes += skb->len; 1038 1039 db->tx_pkt_cnt++; 1040 /* TX control: First packet immediately send, second packet queue */ 1041 if (db->tx_pkt_cnt == 1) { 1042 dm9000_send_packet(dev, skb->ip_summed, skb->len); 1043 } else { 1044 /* Second packet */ 1045 db->queue_pkt_len = skb->len; 1046 db->queue_ip_summed = skb->ip_summed; 1047 netif_stop_queue(dev); 1048 } 1049 1050 spin_unlock_irqrestore(&db->lock, flags); 1051 1052 /* free this SKB */ 1053 dev_consume_skb_any(skb); 1054 1055 return NETDEV_TX_OK; 1056 } 1057 1058 /* 1059 * DM9000 interrupt handler 1060 * receive the packet to upper layer, free the transmitted packet 1061 */ 1062 1063 static void dm9000_tx_done(struct net_device *dev, board_info_t *db) 1064 { 1065 int tx_status = ior(db, DM9000_NSR); /* Got TX status */ 1066 1067 if (tx_status & (NSR_TX2END | NSR_TX1END)) { 1068 /* One packet sent complete */ 1069 db->tx_pkt_cnt--; 1070 dev->stats.tx_packets++; 1071 1072 if (netif_msg_tx_done(db)) 1073 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status); 1074 1075 /* Queue packet check & send */ 1076 if (db->tx_pkt_cnt > 0) 1077 dm9000_send_packet(dev, db->queue_ip_summed, 1078 db->queue_pkt_len); 1079 netif_wake_queue(dev); 1080 } 1081 } 1082 1083 struct dm9000_rxhdr { 1084 u8 RxPktReady; 1085 u8 RxStatus; 1086 __le16 RxLen; 1087 } __packed; 1088 1089 /* 1090 * Received a packet and pass to upper layer 1091 */ 1092 static void 1093 dm9000_rx(struct net_device *dev) 1094 { 1095 board_info_t *db = netdev_priv(dev); 1096 struct dm9000_rxhdr rxhdr; 1097 struct sk_buff *skb; 1098 u8 rxbyte, *rdptr; 1099 bool GoodPacket; 1100 int RxLen; 1101 1102 /* Check packet ready or not */ 1103 do { 1104 ior(db, DM9000_MRCMDX); /* Dummy read */ 1105 1106 /* Get most updated data */ 1107 rxbyte = readb(db->io_data); 1108 1109 /* Status check: this byte must be 0 or 1 */ 1110 if (rxbyte & DM9000_PKT_ERR) { 1111 dev_warn(db->dev, "status check fail: %d\n", rxbyte); 1112 iow(db, DM9000_RCR, 0x00); /* Stop Device */ 1113 return; 1114 } 1115 1116 if (!(rxbyte & DM9000_PKT_RDY)) 1117 return; 1118 1119 /* A packet ready now & Get status/length */ 1120 GoodPacket = true; 1121 writeb(DM9000_MRCMD, db->io_addr); 1122 1123 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr)); 1124 1125 RxLen = le16_to_cpu(rxhdr.RxLen); 1126 1127 if (netif_msg_rx_status(db)) 1128 dev_dbg(db->dev, "RX: status %02x, length %04x\n", 1129 rxhdr.RxStatus, RxLen); 1130 1131 /* Packet Status check */ 1132 if (RxLen < 0x40) { 1133 GoodPacket = false; 1134 if (netif_msg_rx_err(db)) 1135 dev_dbg(db->dev, "RX: Bad Packet (runt)\n"); 1136 } 1137 1138 if (RxLen > DM9000_PKT_MAX) { 1139 dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen); 1140 } 1141 1142 /* rxhdr.RxStatus is identical to RSR register. */ 1143 if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE | 1144 RSR_PLE | RSR_RWTO | 1145 RSR_LCS | RSR_RF)) { 1146 GoodPacket = false; 1147 if (rxhdr.RxStatus & RSR_FOE) { 1148 if (netif_msg_rx_err(db)) 1149 dev_dbg(db->dev, "fifo error\n"); 1150 dev->stats.rx_fifo_errors++; 1151 } 1152 if (rxhdr.RxStatus & RSR_CE) { 1153 if (netif_msg_rx_err(db)) 1154 dev_dbg(db->dev, "crc error\n"); 1155 dev->stats.rx_crc_errors++; 1156 } 1157 if (rxhdr.RxStatus & RSR_RF) { 1158 if (netif_msg_rx_err(db)) 1159 dev_dbg(db->dev, "length error\n"); 1160 dev->stats.rx_length_errors++; 1161 } 1162 } 1163 1164 /* Move data from DM9000 */ 1165 if (GoodPacket && 1166 ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) { 1167 skb_reserve(skb, 2); 1168 rdptr = (u8 *) skb_put(skb, RxLen - 4); 1169 1170 /* Read received packet from RX SRAM */ 1171 1172 (db->inblk)(db->io_data, rdptr, RxLen); 1173 dev->stats.rx_bytes += RxLen; 1174 1175 /* Pass to upper layer */ 1176 skb->protocol = eth_type_trans(skb, dev); 1177 if (dev->features & NETIF_F_RXCSUM) { 1178 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0) 1179 skb->ip_summed = CHECKSUM_UNNECESSARY; 1180 else 1181 skb_checksum_none_assert(skb); 1182 } 1183 netif_rx(skb); 1184 dev->stats.rx_packets++; 1185 1186 } else { 1187 /* need to dump the packet's data */ 1188 1189 (db->dumpblk)(db->io_data, RxLen); 1190 } 1191 } while (rxbyte & DM9000_PKT_RDY); 1192 } 1193 1194 static irqreturn_t dm9000_interrupt(int irq, void *dev_id) 1195 { 1196 struct net_device *dev = dev_id; 1197 board_info_t *db = netdev_priv(dev); 1198 int int_status; 1199 unsigned long flags; 1200 u8 reg_save; 1201 1202 dm9000_dbg(db, 3, "entering %s\n", __func__); 1203 1204 /* A real interrupt coming */ 1205 1206 /* holders of db->lock must always block IRQs */ 1207 spin_lock_irqsave(&db->lock, flags); 1208 1209 /* Save previous register address */ 1210 reg_save = readb(db->io_addr); 1211 1212 dm9000_mask_interrupts(db); 1213 /* Got DM9000 interrupt status */ 1214 int_status = ior(db, DM9000_ISR); /* Got ISR */ 1215 iow(db, DM9000_ISR, int_status); /* Clear ISR status */ 1216 1217 if (netif_msg_intr(db)) 1218 dev_dbg(db->dev, "interrupt status %02x\n", int_status); 1219 1220 /* Received the coming packet */ 1221 if (int_status & ISR_PRS) 1222 dm9000_rx(dev); 1223 1224 /* Trnasmit Interrupt check */ 1225 if (int_status & ISR_PTS) 1226 dm9000_tx_done(dev, db); 1227 1228 if (db->type != TYPE_DM9000E) { 1229 if (int_status & ISR_LNKCHNG) { 1230 /* fire a link-change request */ 1231 schedule_delayed_work(&db->phy_poll, 1); 1232 } 1233 } 1234 1235 dm9000_unmask_interrupts(db); 1236 /* Restore previous register address */ 1237 writeb(reg_save, db->io_addr); 1238 1239 spin_unlock_irqrestore(&db->lock, flags); 1240 1241 return IRQ_HANDLED; 1242 } 1243 1244 static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id) 1245 { 1246 struct net_device *dev = dev_id; 1247 board_info_t *db = netdev_priv(dev); 1248 unsigned long flags; 1249 unsigned nsr, wcr; 1250 1251 spin_lock_irqsave(&db->lock, flags); 1252 1253 nsr = ior(db, DM9000_NSR); 1254 wcr = ior(db, DM9000_WCR); 1255 1256 dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr); 1257 1258 if (nsr & NSR_WAKEST) { 1259 /* clear, so we can avoid */ 1260 iow(db, DM9000_NSR, NSR_WAKEST); 1261 1262 if (wcr & WCR_LINKST) 1263 dev_info(db->dev, "wake by link status change\n"); 1264 if (wcr & WCR_SAMPLEST) 1265 dev_info(db->dev, "wake by sample packet\n"); 1266 if (wcr & WCR_MAGICST) 1267 dev_info(db->dev, "wake by magic packet\n"); 1268 if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST))) 1269 dev_err(db->dev, "wake signalled with no reason? " 1270 "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr); 1271 } 1272 1273 spin_unlock_irqrestore(&db->lock, flags); 1274 1275 return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE; 1276 } 1277 1278 #ifdef CONFIG_NET_POLL_CONTROLLER 1279 /* 1280 *Used by netconsole 1281 */ 1282 static void dm9000_poll_controller(struct net_device *dev) 1283 { 1284 disable_irq(dev->irq); 1285 dm9000_interrupt(dev->irq, dev); 1286 enable_irq(dev->irq); 1287 } 1288 #endif 1289 1290 /* 1291 * Open the interface. 1292 * The interface is opened whenever "ifconfig" actives it. 1293 */ 1294 static int 1295 dm9000_open(struct net_device *dev) 1296 { 1297 board_info_t *db = netdev_priv(dev); 1298 unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK; 1299 1300 if (netif_msg_ifup(db)) 1301 dev_dbg(db->dev, "enabling %s\n", dev->name); 1302 1303 /* If there is no IRQ type specified, default to something that 1304 * may work, and tell the user that this is a problem */ 1305 1306 if (irqflags == IRQF_TRIGGER_NONE) 1307 irqflags = irq_get_trigger_type(dev->irq); 1308 1309 if (irqflags == IRQF_TRIGGER_NONE) 1310 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); 1311 1312 irqflags |= IRQF_SHARED; 1313 1314 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ 1315 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ 1316 mdelay(1); /* delay needs by DM9000B */ 1317 1318 /* Initialize DM9000 board */ 1319 dm9000_init_dm9000(dev); 1320 1321 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) 1322 return -EAGAIN; 1323 /* Now that we have an interrupt handler hooked up we can unmask 1324 * our interrupts 1325 */ 1326 dm9000_unmask_interrupts(db); 1327 1328 /* Init driver variable */ 1329 db->dbug_cnt = 0; 1330 1331 mii_check_media(&db->mii, netif_msg_link(db), 1); 1332 netif_start_queue(dev); 1333 1334 /* Poll initial link status */ 1335 schedule_delayed_work(&db->phy_poll, 1); 1336 1337 return 0; 1338 } 1339 1340 static void 1341 dm9000_shutdown(struct net_device *dev) 1342 { 1343 board_info_t *db = netdev_priv(dev); 1344 1345 /* RESET device */ 1346 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ 1347 iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */ 1348 dm9000_mask_interrupts(db); 1349 iow(db, DM9000_RCR, 0x00); /* Disable RX */ 1350 } 1351 1352 /* 1353 * Stop the interface. 1354 * The interface is stopped when it is brought. 1355 */ 1356 static int 1357 dm9000_stop(struct net_device *ndev) 1358 { 1359 board_info_t *db = netdev_priv(ndev); 1360 1361 if (netif_msg_ifdown(db)) 1362 dev_dbg(db->dev, "shutting down %s\n", ndev->name); 1363 1364 cancel_delayed_work_sync(&db->phy_poll); 1365 1366 netif_stop_queue(ndev); 1367 netif_carrier_off(ndev); 1368 1369 /* free interrupt */ 1370 free_irq(ndev->irq, ndev); 1371 1372 dm9000_shutdown(ndev); 1373 1374 return 0; 1375 } 1376 1377 static const struct net_device_ops dm9000_netdev_ops = { 1378 .ndo_open = dm9000_open, 1379 .ndo_stop = dm9000_stop, 1380 .ndo_start_xmit = dm9000_start_xmit, 1381 .ndo_tx_timeout = dm9000_timeout, 1382 .ndo_set_rx_mode = dm9000_hash_table, 1383 .ndo_do_ioctl = dm9000_ioctl, 1384 .ndo_change_mtu = eth_change_mtu, 1385 .ndo_set_features = dm9000_set_features, 1386 .ndo_validate_addr = eth_validate_addr, 1387 .ndo_set_mac_address = eth_mac_addr, 1388 #ifdef CONFIG_NET_POLL_CONTROLLER 1389 .ndo_poll_controller = dm9000_poll_controller, 1390 #endif 1391 }; 1392 1393 static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) 1394 { 1395 struct dm9000_plat_data *pdata; 1396 struct device_node *np = dev->of_node; 1397 const void *mac_addr; 1398 1399 if (!IS_ENABLED(CONFIG_OF) || !np) 1400 return NULL; 1401 1402 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 1403 if (!pdata) 1404 return ERR_PTR(-ENOMEM); 1405 1406 if (of_find_property(np, "davicom,ext-phy", NULL)) 1407 pdata->flags |= DM9000_PLATF_EXT_PHY; 1408 if (of_find_property(np, "davicom,no-eeprom", NULL)) 1409 pdata->flags |= DM9000_PLATF_NO_EEPROM; 1410 1411 mac_addr = of_get_mac_address(np); 1412 if (mac_addr) 1413 memcpy(pdata->dev_addr, mac_addr, sizeof(pdata->dev_addr)); 1414 1415 return pdata; 1416 } 1417 1418 /* 1419 * Search DM9000 board, allocate space and register it 1420 */ 1421 static int 1422 dm9000_probe(struct platform_device *pdev) 1423 { 1424 struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev); 1425 struct board_info *db; /* Point a board information structure */ 1426 struct net_device *ndev; 1427 const unsigned char *mac_src; 1428 int ret = 0; 1429 int iosize; 1430 int i; 1431 u32 id_val; 1432 1433 if (!pdata) { 1434 pdata = dm9000_parse_dt(&pdev->dev); 1435 if (IS_ERR(pdata)) 1436 return PTR_ERR(pdata); 1437 } 1438 1439 /* Init network device */ 1440 ndev = alloc_etherdev(sizeof(struct board_info)); 1441 if (!ndev) 1442 return -ENOMEM; 1443 1444 SET_NETDEV_DEV(ndev, &pdev->dev); 1445 1446 dev_dbg(&pdev->dev, "dm9000_probe()\n"); 1447 1448 /* setup board info structure */ 1449 db = netdev_priv(ndev); 1450 1451 db->dev = &pdev->dev; 1452 db->ndev = ndev; 1453 1454 spin_lock_init(&db->lock); 1455 mutex_init(&db->addr_lock); 1456 1457 INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work); 1458 1459 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1460 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1461 db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1462 1463 if (db->addr_res == NULL || db->data_res == NULL || 1464 db->irq_res == NULL) { 1465 dev_err(db->dev, "insufficient resources\n"); 1466 ret = -ENOENT; 1467 goto out; 1468 } 1469 1470 db->irq_wake = platform_get_irq(pdev, 1); 1471 if (db->irq_wake >= 0) { 1472 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake); 1473 1474 ret = request_irq(db->irq_wake, dm9000_wol_interrupt, 1475 IRQF_SHARED, dev_name(db->dev), ndev); 1476 if (ret) { 1477 dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret); 1478 } else { 1479 1480 /* test to see if irq is really wakeup capable */ 1481 ret = irq_set_irq_wake(db->irq_wake, 1); 1482 if (ret) { 1483 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n", 1484 db->irq_wake, ret); 1485 ret = 0; 1486 } else { 1487 irq_set_irq_wake(db->irq_wake, 0); 1488 db->wake_supported = 1; 1489 } 1490 } 1491 } 1492 1493 iosize = resource_size(db->addr_res); 1494 db->addr_req = request_mem_region(db->addr_res->start, iosize, 1495 pdev->name); 1496 1497 if (db->addr_req == NULL) { 1498 dev_err(db->dev, "cannot claim address reg area\n"); 1499 ret = -EIO; 1500 goto out; 1501 } 1502 1503 db->io_addr = ioremap(db->addr_res->start, iosize); 1504 1505 if (db->io_addr == NULL) { 1506 dev_err(db->dev, "failed to ioremap address reg\n"); 1507 ret = -EINVAL; 1508 goto out; 1509 } 1510 1511 iosize = resource_size(db->data_res); 1512 db->data_req = request_mem_region(db->data_res->start, iosize, 1513 pdev->name); 1514 1515 if (db->data_req == NULL) { 1516 dev_err(db->dev, "cannot claim data reg area\n"); 1517 ret = -EIO; 1518 goto out; 1519 } 1520 1521 db->io_data = ioremap(db->data_res->start, iosize); 1522 1523 if (db->io_data == NULL) { 1524 dev_err(db->dev, "failed to ioremap data reg\n"); 1525 ret = -EINVAL; 1526 goto out; 1527 } 1528 1529 /* fill in parameters for net-dev structure */ 1530 ndev->base_addr = (unsigned long)db->io_addr; 1531 ndev->irq = db->irq_res->start; 1532 1533 /* ensure at least we have a default set of IO routines */ 1534 dm9000_set_io(db, iosize); 1535 1536 /* check to see if anything is being over-ridden */ 1537 if (pdata != NULL) { 1538 /* check to see if the driver wants to over-ride the 1539 * default IO width */ 1540 1541 if (pdata->flags & DM9000_PLATF_8BITONLY) 1542 dm9000_set_io(db, 1); 1543 1544 if (pdata->flags & DM9000_PLATF_16BITONLY) 1545 dm9000_set_io(db, 2); 1546 1547 if (pdata->flags & DM9000_PLATF_32BITONLY) 1548 dm9000_set_io(db, 4); 1549 1550 /* check to see if there are any IO routine 1551 * over-rides */ 1552 1553 if (pdata->inblk != NULL) 1554 db->inblk = pdata->inblk; 1555 1556 if (pdata->outblk != NULL) 1557 db->outblk = pdata->outblk; 1558 1559 if (pdata->dumpblk != NULL) 1560 db->dumpblk = pdata->dumpblk; 1561 1562 db->flags = pdata->flags; 1563 } 1564 1565 #ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL 1566 db->flags |= DM9000_PLATF_SIMPLE_PHY; 1567 #endif 1568 1569 dm9000_reset(db); 1570 1571 /* try multiple times, DM9000 sometimes gets the read wrong */ 1572 for (i = 0; i < 8; i++) { 1573 id_val = ior(db, DM9000_VIDL); 1574 id_val |= (u32)ior(db, DM9000_VIDH) << 8; 1575 id_val |= (u32)ior(db, DM9000_PIDL) << 16; 1576 id_val |= (u32)ior(db, DM9000_PIDH) << 24; 1577 1578 if (id_val == DM9000_ID) 1579 break; 1580 dev_err(db->dev, "read wrong id 0x%08x\n", id_val); 1581 } 1582 1583 if (id_val != DM9000_ID) { 1584 dev_err(db->dev, "wrong id: 0x%08x\n", id_val); 1585 ret = -ENODEV; 1586 goto out; 1587 } 1588 1589 /* Identify what type of DM9000 we are working on */ 1590 1591 id_val = ior(db, DM9000_CHIPR); 1592 dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val); 1593 1594 switch (id_val) { 1595 case CHIPR_DM9000A: 1596 db->type = TYPE_DM9000A; 1597 break; 1598 case CHIPR_DM9000B: 1599 db->type = TYPE_DM9000B; 1600 break; 1601 default: 1602 dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val); 1603 db->type = TYPE_DM9000E; 1604 } 1605 1606 /* dm9000a/b are capable of hardware checksum offload */ 1607 if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) { 1608 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM; 1609 ndev->features |= ndev->hw_features; 1610 } 1611 1612 /* from this point we assume that we have found a DM9000 */ 1613 1614 /* driver system function */ 1615 ether_setup(ndev); 1616 1617 ndev->netdev_ops = &dm9000_netdev_ops; 1618 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 1619 ndev->ethtool_ops = &dm9000_ethtool_ops; 1620 1621 db->msg_enable = NETIF_MSG_LINK; 1622 db->mii.phy_id_mask = 0x1f; 1623 db->mii.reg_num_mask = 0x1f; 1624 db->mii.force_media = 0; 1625 db->mii.full_duplex = 0; 1626 db->mii.dev = ndev; 1627 db->mii.mdio_read = dm9000_phy_read; 1628 db->mii.mdio_write = dm9000_phy_write; 1629 1630 mac_src = "eeprom"; 1631 1632 /* try reading the node address from the attached EEPROM */ 1633 for (i = 0; i < 6; i += 2) 1634 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i); 1635 1636 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) { 1637 mac_src = "platform data"; 1638 memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN); 1639 } 1640 1641 if (!is_valid_ether_addr(ndev->dev_addr)) { 1642 /* try reading from mac */ 1643 1644 mac_src = "chip"; 1645 for (i = 0; i < 6; i++) 1646 ndev->dev_addr[i] = ior(db, i+DM9000_PAR); 1647 } 1648 1649 if (!is_valid_ether_addr(ndev->dev_addr)) { 1650 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please " 1651 "set using ifconfig\n", ndev->name); 1652 1653 eth_hw_addr_random(ndev); 1654 mac_src = "random"; 1655 } 1656 1657 1658 platform_set_drvdata(pdev, ndev); 1659 ret = register_netdev(ndev); 1660 1661 if (ret == 0) 1662 printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n", 1663 ndev->name, dm9000_type_to_char(db->type), 1664 db->io_addr, db->io_data, ndev->irq, 1665 ndev->dev_addr, mac_src); 1666 return 0; 1667 1668 out: 1669 dev_err(db->dev, "not found (%d).\n", ret); 1670 1671 dm9000_release_board(pdev, db); 1672 free_netdev(ndev); 1673 1674 return ret; 1675 } 1676 1677 static int 1678 dm9000_drv_suspend(struct device *dev) 1679 { 1680 struct platform_device *pdev = to_platform_device(dev); 1681 struct net_device *ndev = platform_get_drvdata(pdev); 1682 board_info_t *db; 1683 1684 if (ndev) { 1685 db = netdev_priv(ndev); 1686 db->in_suspend = 1; 1687 1688 if (!netif_running(ndev)) 1689 return 0; 1690 1691 netif_device_detach(ndev); 1692 1693 /* only shutdown if not using WoL */ 1694 if (!db->wake_state) 1695 dm9000_shutdown(ndev); 1696 } 1697 return 0; 1698 } 1699 1700 static int 1701 dm9000_drv_resume(struct device *dev) 1702 { 1703 struct platform_device *pdev = to_platform_device(dev); 1704 struct net_device *ndev = platform_get_drvdata(pdev); 1705 board_info_t *db = netdev_priv(ndev); 1706 1707 if (ndev) { 1708 if (netif_running(ndev)) { 1709 /* reset if we were not in wake mode to ensure if 1710 * the device was powered off it is in a known state */ 1711 if (!db->wake_state) { 1712 dm9000_init_dm9000(ndev); 1713 dm9000_unmask_interrupts(db); 1714 } 1715 1716 netif_device_attach(ndev); 1717 } 1718 1719 db->in_suspend = 0; 1720 } 1721 return 0; 1722 } 1723 1724 static const struct dev_pm_ops dm9000_drv_pm_ops = { 1725 .suspend = dm9000_drv_suspend, 1726 .resume = dm9000_drv_resume, 1727 }; 1728 1729 static int 1730 dm9000_drv_remove(struct platform_device *pdev) 1731 { 1732 struct net_device *ndev = platform_get_drvdata(pdev); 1733 1734 unregister_netdev(ndev); 1735 dm9000_release_board(pdev, netdev_priv(ndev)); 1736 free_netdev(ndev); /* free device structure */ 1737 1738 dev_dbg(&pdev->dev, "released and freed device\n"); 1739 return 0; 1740 } 1741 1742 #ifdef CONFIG_OF 1743 static const struct of_device_id dm9000_of_matches[] = { 1744 { .compatible = "davicom,dm9000", }, 1745 { /* sentinel */ } 1746 }; 1747 MODULE_DEVICE_TABLE(of, dm9000_of_matches); 1748 #endif 1749 1750 static struct platform_driver dm9000_driver = { 1751 .driver = { 1752 .name = "dm9000", 1753 .owner = THIS_MODULE, 1754 .pm = &dm9000_drv_pm_ops, 1755 .of_match_table = of_match_ptr(dm9000_of_matches), 1756 }, 1757 .probe = dm9000_probe, 1758 .remove = dm9000_drv_remove, 1759 }; 1760 1761 module_platform_driver(dm9000_driver); 1762 1763 MODULE_AUTHOR("Sascha Hauer, Ben Dooks"); 1764 MODULE_DESCRIPTION("Davicom DM9000 network driver"); 1765 MODULE_LICENSE("GPL"); 1766 MODULE_ALIAS("platform:dm9000"); 1767