1 /* 2 A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast 3 ethernet driver for Linux. 4 Copyright (C) 1997 Sten Wang 5 6 This program is free software; you can redistribute it and/or 7 modify it under the terms of the GNU General Public License 8 as published by the Free Software Foundation; either version 2 9 of the License, or (at your option) any later version. 10 11 This program is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 GNU General Public License for more details. 15 16 DAVICOM Web-Site: www.davicom.com.tw 17 18 Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw 19 Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu> 20 21 (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved. 22 23 Marcelo Tosatti <marcelo@conectiva.com.br> : 24 Made it compile in 2.3 (device to net_device) 25 26 Alan Cox <alan@lxorguk.ukuu.org.uk> : 27 Cleaned up for kernel merge. 28 Removed the back compatibility support 29 Reformatted, fixing spelling etc as I went 30 Removed IRQ 0-15 assumption 31 32 Jeff Garzik <jgarzik@pobox.com> : 33 Updated to use new PCI driver API. 34 Resource usage cleanups. 35 Report driver version to user. 36 37 Tobias Ringstrom <tori@unhappy.mine.nu> : 38 Cleaned up and added SMP safety. Thanks go to Jeff Garzik, 39 Andrew Morton and Frank Davis for the SMP safety fixes. 40 41 Vojtech Pavlik <vojtech@suse.cz> : 42 Cleaned up pointer arithmetics. 43 Fixed a lot of 64bit issues. 44 Cleaned up printk()s a bit. 45 Fixed some obvious big endian problems. 46 47 Tobias Ringstrom <tori@unhappy.mine.nu> : 48 Use time_after for jiffies calculation. Added ethtool 49 support. Updated PCI resource allocation. Do not 50 forget to unmap PCI mapped skbs. 51 52 Alan Cox <alan@lxorguk.ukuu.org.uk> 53 Added new PCI identifiers provided by Clear Zhang at ALi 54 for their 1563 ethernet device. 55 56 TODO 57 58 Check on 64 bit boxes. 59 Check and fix on big endian boxes. 60 61 Test and make sure PCI latency is now correct for all cases. 62 */ 63 64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 65 66 #define DRV_NAME "dmfe" 67 #define DRV_VERSION "1.36.4" 68 #define DRV_RELDATE "2002-01-17" 69 70 #include <linux/module.h> 71 #include <linux/kernel.h> 72 #include <linux/string.h> 73 #include <linux/timer.h> 74 #include <linux/ptrace.h> 75 #include <linux/errno.h> 76 #include <linux/ioport.h> 77 #include <linux/interrupt.h> 78 #include <linux/pci.h> 79 #include <linux/dma-mapping.h> 80 #include <linux/init.h> 81 #include <linux/netdevice.h> 82 #include <linux/etherdevice.h> 83 #include <linux/ethtool.h> 84 #include <linux/skbuff.h> 85 #include <linux/delay.h> 86 #include <linux/spinlock.h> 87 #include <linux/crc32.h> 88 #include <linux/bitops.h> 89 90 #include <asm/processor.h> 91 #include <asm/io.h> 92 #include <asm/dma.h> 93 #include <asm/uaccess.h> 94 #include <asm/irq.h> 95 96 #ifdef CONFIG_TULIP_DM910X 97 #include <linux/of.h> 98 #endif 99 100 101 /* Board/System/Debug information/definition ---------------- */ 102 #define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */ 103 #define PCI_DM9102_ID 0x91021282 /* Davicom DM9102 ID */ 104 #define PCI_DM9100_ID 0x91001282 /* Davicom DM9100 ID */ 105 #define PCI_DM9009_ID 0x90091282 /* Davicom DM9009 ID */ 106 107 #define DM9102_IO_SIZE 0x80 108 #define DM9102A_IO_SIZE 0x100 109 #define TX_MAX_SEND_CNT 0x1 /* Maximum tx packet per time */ 110 #define TX_DESC_CNT 0x10 /* Allocated Tx descriptors */ 111 #define RX_DESC_CNT 0x20 /* Allocated Rx descriptors */ 112 #define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */ 113 #define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */ 114 #define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT) 115 #define TX_BUF_ALLOC 0x600 116 #define RX_ALLOC_SIZE 0x620 117 #define DM910X_RESET 1 118 #define CR0_DEFAULT 0x00E00000 /* TX & RX burst mode */ 119 #define CR6_DEFAULT 0x00080000 /* HD */ 120 #define CR7_DEFAULT 0x180c1 121 #define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */ 122 #define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */ 123 #define MAX_PACKET_SIZE 1514 124 #define DMFE_MAX_MULTICAST 14 125 #define RX_COPY_SIZE 100 126 #define MAX_CHECK_PACKET 0x8000 127 #define DM9801_NOISE_FLOOR 8 128 #define DM9802_NOISE_FLOOR 5 129 130 #define DMFE_WOL_LINKCHANGE 0x20000000 131 #define DMFE_WOL_SAMPLEPACKET 0x10000000 132 #define DMFE_WOL_MAGICPACKET 0x08000000 133 134 135 #define DMFE_10MHF 0 136 #define DMFE_100MHF 1 137 #define DMFE_10MFD 4 138 #define DMFE_100MFD 5 139 #define DMFE_AUTO 8 140 #define DMFE_1M_HPNA 0x10 141 142 #define DMFE_TXTH_72 0x400000 /* TX TH 72 byte */ 143 #define DMFE_TXTH_96 0x404000 /* TX TH 96 byte */ 144 #define DMFE_TXTH_128 0x0000 /* TX TH 128 byte */ 145 #define DMFE_TXTH_256 0x4000 /* TX TH 256 byte */ 146 #define DMFE_TXTH_512 0x8000 /* TX TH 512 byte */ 147 #define DMFE_TXTH_1K 0xC000 /* TX TH 1K byte */ 148 149 #define DMFE_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */ 150 #define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */ 151 #define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */ 152 153 #define dw32(reg, val) iowrite32(val, ioaddr + (reg)) 154 #define dw16(reg, val) iowrite16(val, ioaddr + (reg)) 155 #define dr32(reg) ioread32(ioaddr + (reg)) 156 #define dr16(reg) ioread16(ioaddr + (reg)) 157 #define dr8(reg) ioread8(ioaddr + (reg)) 158 159 #define DMFE_DBUG(dbug_now, msg, value) \ 160 do { \ 161 if (dmfe_debug || (dbug_now)) \ 162 pr_err("%s %lx\n", \ 163 (msg), (long) (value)); \ 164 } while (0) 165 166 #define SHOW_MEDIA_TYPE(mode) \ 167 pr_info("Change Speed to %sMhz %s duplex\n" , \ 168 (mode & 1) ? "100":"10", \ 169 (mode & 4) ? "full":"half"); 170 171 172 /* CR9 definition: SROM/MII */ 173 #define CR9_SROM_READ 0x4800 174 #define CR9_SRCS 0x1 175 #define CR9_SRCLK 0x2 176 #define CR9_CRDOUT 0x8 177 #define SROM_DATA_0 0x0 178 #define SROM_DATA_1 0x4 179 #define PHY_DATA_1 0x20000 180 #define PHY_DATA_0 0x00000 181 #define MDCLKH 0x10000 182 183 #define PHY_POWER_DOWN 0x800 184 185 #define SROM_V41_CODE 0x14 186 187 #define __CHK_IO_SIZE(pci_id, dev_rev) \ 188 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \ 189 DM9102A_IO_SIZE: DM9102_IO_SIZE) 190 191 #define CHK_IO_SIZE(pci_dev) \ 192 (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \ 193 (pci_dev)->revision)) 194 195 /* Sten Check */ 196 #define DEVICE net_device 197 198 /* Structure/enum declaration ------------------------------- */ 199 struct tx_desc { 200 __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */ 201 char *tx_buf_ptr; /* Data for us */ 202 struct tx_desc *next_tx_desc; 203 } __attribute__(( aligned(32) )); 204 205 struct rx_desc { 206 __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */ 207 struct sk_buff *rx_skb_ptr; /* Data for us */ 208 struct rx_desc *next_rx_desc; 209 } __attribute__(( aligned(32) )); 210 211 struct dmfe_board_info { 212 u32 chip_id; /* Chip vendor/Device ID */ 213 u8 chip_revision; /* Chip revision */ 214 struct net_device *next_dev; /* next device */ 215 struct pci_dev *pdev; /* PCI device */ 216 spinlock_t lock; 217 218 void __iomem *ioaddr; /* I/O base address */ 219 u32 cr0_data; 220 u32 cr5_data; 221 u32 cr6_data; 222 u32 cr7_data; 223 u32 cr15_data; 224 225 /* pointer for memory physical address */ 226 dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */ 227 dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */ 228 dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */ 229 dma_addr_t first_tx_desc_dma; 230 dma_addr_t first_rx_desc_dma; 231 232 /* descriptor pointer */ 233 unsigned char *buf_pool_ptr; /* Tx buffer pool memory */ 234 unsigned char *buf_pool_start; /* Tx buffer pool align dword */ 235 unsigned char *desc_pool_ptr; /* descriptor pool memory */ 236 struct tx_desc *first_tx_desc; 237 struct tx_desc *tx_insert_ptr; 238 struct tx_desc *tx_remove_ptr; 239 struct rx_desc *first_rx_desc; 240 struct rx_desc *rx_insert_ptr; 241 struct rx_desc *rx_ready_ptr; /* packet come pointer */ 242 unsigned long tx_packet_cnt; /* transmitted packet count */ 243 unsigned long tx_queue_cnt; /* wait to send packet count */ 244 unsigned long rx_avail_cnt; /* available rx descriptor count */ 245 unsigned long interval_rx_cnt; /* rx packet count a callback time */ 246 247 u16 HPNA_command; /* For HPNA register 16 */ 248 u16 HPNA_timer; /* For HPNA remote device check */ 249 u16 dbug_cnt; 250 u16 NIC_capability; /* NIC media capability */ 251 u16 PHY_reg4; /* Saved Phyxcer register 4 value */ 252 253 u8 HPNA_present; /* 0:none, 1:DM9801, 2:DM9802 */ 254 u8 chip_type; /* Keep DM9102A chip type */ 255 u8 media_mode; /* user specify media mode */ 256 u8 op_mode; /* real work media mode */ 257 u8 phy_addr; 258 u8 wait_reset; /* Hardware failed, need to reset */ 259 u8 dm910x_chk_mode; /* Operating mode check */ 260 u8 first_in_callback; /* Flag to record state */ 261 u8 wol_mode; /* user WOL settings */ 262 struct timer_list timer; 263 264 /* Driver defined statistic counter */ 265 unsigned long tx_fifo_underrun; 266 unsigned long tx_loss_carrier; 267 unsigned long tx_no_carrier; 268 unsigned long tx_late_collision; 269 unsigned long tx_excessive_collision; 270 unsigned long tx_jabber_timeout; 271 unsigned long reset_count; 272 unsigned long reset_cr8; 273 unsigned long reset_fatal; 274 unsigned long reset_TXtimeout; 275 276 /* NIC SROM data */ 277 unsigned char srom[128]; 278 }; 279 280 enum dmfe_offsets { 281 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20, 282 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48, 283 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70, 284 DCR15 = 0x78 285 }; 286 287 enum dmfe_CR6_bits { 288 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80, 289 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000, 290 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000 291 }; 292 293 /* Global variable declaration ----------------------------- */ 294 static int __devinitdata printed_version; 295 static const char version[] __devinitconst = 296 "Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")"; 297 298 static int dmfe_debug; 299 static unsigned char dmfe_media_mode = DMFE_AUTO; 300 static u32 dmfe_cr6_user_set; 301 302 /* For module input parameter */ 303 static int debug; 304 static u32 cr6set; 305 static unsigned char mode = 8; 306 static u8 chkmode = 1; 307 static u8 HPNA_mode; /* Default: Low Power/High Speed */ 308 static u8 HPNA_rx_cmd; /* Default: Disable Rx remote command */ 309 static u8 HPNA_tx_cmd; /* Default: Don't issue remote command */ 310 static u8 HPNA_NoiseFloor; /* Default: HPNA NoiseFloor */ 311 static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control 312 4: TX pause packet */ 313 314 315 /* function declaration ------------------------------------- */ 316 static int dmfe_open(struct DEVICE *); 317 static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *); 318 static int dmfe_stop(struct DEVICE *); 319 static void dmfe_set_filter_mode(struct DEVICE *); 320 static const struct ethtool_ops netdev_ethtool_ops; 321 static u16 read_srom_word(void __iomem *, int); 322 static irqreturn_t dmfe_interrupt(int , void *); 323 #ifdef CONFIG_NET_POLL_CONTROLLER 324 static void poll_dmfe (struct net_device *dev); 325 #endif 326 static void dmfe_descriptor_init(struct net_device *); 327 static void allocate_rx_buffer(struct net_device *); 328 static void update_cr6(u32, void __iomem *); 329 static void send_filter_frame(struct DEVICE *); 330 static void dm9132_id_table(struct DEVICE *); 331 static u16 phy_read(void __iomem *, u8, u8, u32); 332 static void phy_write(void __iomem *, u8, u8, u16, u32); 333 static void phy_write_1bit(void __iomem *, u32); 334 static u16 phy_read_1bit(void __iomem *); 335 static u8 dmfe_sense_speed(struct dmfe_board_info *); 336 static void dmfe_process_mode(struct dmfe_board_info *); 337 static void dmfe_timer(unsigned long); 338 static inline u32 cal_CRC(unsigned char *, unsigned int, u8); 339 static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *); 340 static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *); 341 static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *); 342 static void dmfe_dynamic_reset(struct DEVICE *); 343 static void dmfe_free_rxbuffer(struct dmfe_board_info *); 344 static void dmfe_init_dm910x(struct DEVICE *); 345 static void dmfe_parse_srom(struct dmfe_board_info *); 346 static void dmfe_program_DM9801(struct dmfe_board_info *, int); 347 static void dmfe_program_DM9802(struct dmfe_board_info *); 348 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * ); 349 static void dmfe_set_phyxcer(struct dmfe_board_info *); 350 351 /* DM910X network board routine ---------------------------- */ 352 353 static const struct net_device_ops netdev_ops = { 354 .ndo_open = dmfe_open, 355 .ndo_stop = dmfe_stop, 356 .ndo_start_xmit = dmfe_start_xmit, 357 .ndo_set_rx_mode = dmfe_set_filter_mode, 358 .ndo_change_mtu = eth_change_mtu, 359 .ndo_set_mac_address = eth_mac_addr, 360 .ndo_validate_addr = eth_validate_addr, 361 #ifdef CONFIG_NET_POLL_CONTROLLER 362 .ndo_poll_controller = poll_dmfe, 363 #endif 364 }; 365 366 /* 367 * Search DM910X board ,allocate space and register it 368 */ 369 370 static int __devinit dmfe_init_one (struct pci_dev *pdev, 371 const struct pci_device_id *ent) 372 { 373 struct dmfe_board_info *db; /* board information structure */ 374 struct net_device *dev; 375 u32 pci_pmr; 376 int i, err; 377 378 DMFE_DBUG(0, "dmfe_init_one()", 0); 379 380 if (!printed_version++) 381 pr_info("%s\n", version); 382 383 /* 384 * SPARC on-board DM910x chips should be handled by the main 385 * tulip driver, except for early DM9100s. 386 */ 387 #ifdef CONFIG_TULIP_DM910X 388 if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) || 389 ent->driver_data == PCI_DM9102_ID) { 390 struct device_node *dp = pci_device_to_OF_node(pdev); 391 392 if (dp && of_get_property(dp, "local-mac-address", NULL)) { 393 pr_info("skipping on-board DM910x (use tulip)\n"); 394 return -ENODEV; 395 } 396 } 397 #endif 398 399 /* Init network device */ 400 dev = alloc_etherdev(sizeof(*db)); 401 if (dev == NULL) 402 return -ENOMEM; 403 SET_NETDEV_DEV(dev, &pdev->dev); 404 405 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 406 pr_warn("32-bit PCI DMA not available\n"); 407 err = -ENODEV; 408 goto err_out_free; 409 } 410 411 /* Enable Master/IO access, Disable memory access */ 412 err = pci_enable_device(pdev); 413 if (err) 414 goto err_out_free; 415 416 if (!pci_resource_start(pdev, 0)) { 417 pr_err("I/O base is zero\n"); 418 err = -ENODEV; 419 goto err_out_disable; 420 } 421 422 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) { 423 pr_err("Allocated I/O size too small\n"); 424 err = -ENODEV; 425 goto err_out_disable; 426 } 427 428 #if 0 /* pci_{enable_device,set_master} sets minimum latency for us now */ 429 430 /* Set Latency Timer 80h */ 431 /* FIXME: setting values > 32 breaks some SiS 559x stuff. 432 Need a PCI quirk.. */ 433 434 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80); 435 #endif 436 437 if (pci_request_regions(pdev, DRV_NAME)) { 438 pr_err("Failed to request PCI regions\n"); 439 err = -ENODEV; 440 goto err_out_disable; 441 } 442 443 /* Init system & device */ 444 db = netdev_priv(dev); 445 446 /* Allocate Tx/Rx descriptor memory */ 447 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * 448 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); 449 if (!db->desc_pool_ptr) { 450 err = -ENOMEM; 451 goto err_out_res; 452 } 453 454 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * 455 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); 456 if (!db->buf_pool_ptr) { 457 err = -ENOMEM; 458 goto err_out_free_desc; 459 } 460 461 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; 462 db->first_tx_desc_dma = db->desc_pool_dma_ptr; 463 db->buf_pool_start = db->buf_pool_ptr; 464 db->buf_pool_dma_start = db->buf_pool_dma_ptr; 465 466 db->chip_id = ent->driver_data; 467 /* IO type range. */ 468 db->ioaddr = pci_iomap(pdev, 0, 0); 469 if (!db->ioaddr) { 470 err = -ENOMEM; 471 goto err_out_free_buf; 472 } 473 474 db->chip_revision = pdev->revision; 475 db->wol_mode = 0; 476 477 db->pdev = pdev; 478 479 pci_set_drvdata(pdev, dev); 480 dev->netdev_ops = &netdev_ops; 481 dev->ethtool_ops = &netdev_ethtool_ops; 482 netif_carrier_off(dev); 483 spin_lock_init(&db->lock); 484 485 pci_read_config_dword(pdev, 0x50, &pci_pmr); 486 pci_pmr &= 0x70000; 487 if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) ) 488 db->chip_type = 1; /* DM9102A E3 */ 489 else 490 db->chip_type = 0; 491 492 /* read 64 word srom data */ 493 for (i = 0; i < 64; i++) { 494 ((__le16 *) db->srom)[i] = 495 cpu_to_le16(read_srom_word(db->ioaddr, i)); 496 } 497 498 /* Set Node address */ 499 for (i = 0; i < 6; i++) 500 dev->dev_addr[i] = db->srom[20 + i]; 501 502 err = register_netdev (dev); 503 if (err) 504 goto err_out_unmap; 505 506 dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n", 507 ent->driver_data >> 16, 508 pci_name(pdev), dev->dev_addr, pdev->irq); 509 510 pci_set_master(pdev); 511 512 return 0; 513 514 err_out_unmap: 515 pci_iounmap(pdev, db->ioaddr); 516 err_out_free_buf: 517 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, 518 db->buf_pool_ptr, db->buf_pool_dma_ptr); 519 err_out_free_desc: 520 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, 521 db->desc_pool_ptr, db->desc_pool_dma_ptr); 522 err_out_res: 523 pci_release_regions(pdev); 524 err_out_disable: 525 pci_disable_device(pdev); 526 err_out_free: 527 pci_set_drvdata(pdev, NULL); 528 free_netdev(dev); 529 530 return err; 531 } 532 533 534 static void __devexit dmfe_remove_one (struct pci_dev *pdev) 535 { 536 struct net_device *dev = pci_get_drvdata(pdev); 537 struct dmfe_board_info *db = netdev_priv(dev); 538 539 DMFE_DBUG(0, "dmfe_remove_one()", 0); 540 541 if (dev) { 542 543 unregister_netdev(dev); 544 pci_iounmap(db->pdev, db->ioaddr); 545 pci_free_consistent(db->pdev, sizeof(struct tx_desc) * 546 DESC_ALL_CNT + 0x20, db->desc_pool_ptr, 547 db->desc_pool_dma_ptr); 548 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, 549 db->buf_pool_ptr, db->buf_pool_dma_ptr); 550 pci_release_regions(pdev); 551 free_netdev(dev); /* free board information */ 552 553 pci_set_drvdata(pdev, NULL); 554 } 555 556 DMFE_DBUG(0, "dmfe_remove_one() exit", 0); 557 } 558 559 560 /* 561 * Open the interface. 562 * The interface is opened whenever "ifconfig" actives it. 563 */ 564 565 static int dmfe_open(struct DEVICE *dev) 566 { 567 struct dmfe_board_info *db = netdev_priv(dev); 568 const int irq = db->pdev->irq; 569 int ret; 570 571 DMFE_DBUG(0, "dmfe_open", 0); 572 573 ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev); 574 if (ret) 575 return ret; 576 577 /* system variable init */ 578 db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set; 579 db->tx_packet_cnt = 0; 580 db->tx_queue_cnt = 0; 581 db->rx_avail_cnt = 0; 582 db->wait_reset = 0; 583 584 db->first_in_callback = 0; 585 db->NIC_capability = 0xf; /* All capability*/ 586 db->PHY_reg4 = 0x1e0; 587 588 /* CR6 operation mode decision */ 589 if ( !chkmode || (db->chip_id == PCI_DM9132_ID) || 590 (db->chip_revision >= 0x30) ) { 591 db->cr6_data |= DMFE_TXTH_256; 592 db->cr0_data = CR0_DEFAULT; 593 db->dm910x_chk_mode=4; /* Enter the normal mode */ 594 } else { 595 db->cr6_data |= CR6_SFT; /* Store & Forward mode */ 596 db->cr0_data = 0; 597 db->dm910x_chk_mode = 1; /* Enter the check mode */ 598 } 599 600 /* Initialize DM910X board */ 601 dmfe_init_dm910x(dev); 602 603 /* Active System Interface */ 604 netif_wake_queue(dev); 605 606 /* set and active a timer process */ 607 init_timer(&db->timer); 608 db->timer.expires = DMFE_TIMER_WUT + HZ * 2; 609 db->timer.data = (unsigned long)dev; 610 db->timer.function = dmfe_timer; 611 add_timer(&db->timer); 612 613 return 0; 614 } 615 616 617 /* Initialize DM910X board 618 * Reset DM910X board 619 * Initialize TX/Rx descriptor chain structure 620 * Send the set-up frame 621 * Enable Tx/Rx machine 622 */ 623 624 static void dmfe_init_dm910x(struct DEVICE *dev) 625 { 626 struct dmfe_board_info *db = netdev_priv(dev); 627 void __iomem *ioaddr = db->ioaddr; 628 629 DMFE_DBUG(0, "dmfe_init_dm910x()", 0); 630 631 /* Reset DM910x MAC controller */ 632 dw32(DCR0, DM910X_RESET); /* RESET MAC */ 633 udelay(100); 634 dw32(DCR0, db->cr0_data); 635 udelay(5); 636 637 /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */ 638 db->phy_addr = 1; 639 640 /* Parser SROM and media mode */ 641 dmfe_parse_srom(db); 642 db->media_mode = dmfe_media_mode; 643 644 /* RESET Phyxcer Chip by GPR port bit 7 */ 645 dw32(DCR12, 0x180); /* Let bit 7 output port */ 646 if (db->chip_id == PCI_DM9009_ID) { 647 dw32(DCR12, 0x80); /* Issue RESET signal */ 648 mdelay(300); /* Delay 300 ms */ 649 } 650 dw32(DCR12, 0x0); /* Clear RESET signal */ 651 652 /* Process Phyxcer Media Mode */ 653 if ( !(db->media_mode & 0x10) ) /* Force 1M mode */ 654 dmfe_set_phyxcer(db); 655 656 /* Media Mode Process */ 657 if ( !(db->media_mode & DMFE_AUTO) ) 658 db->op_mode = db->media_mode; /* Force Mode */ 659 660 /* Initialize Transmit/Receive decriptor and CR3/4 */ 661 dmfe_descriptor_init(dev); 662 663 /* Init CR6 to program DM910x operation */ 664 update_cr6(db->cr6_data, ioaddr); 665 666 /* Send setup frame */ 667 if (db->chip_id == PCI_DM9132_ID) 668 dm9132_id_table(dev); /* DM9132 */ 669 else 670 send_filter_frame(dev); /* DM9102/DM9102A */ 671 672 /* Init CR7, interrupt active bit */ 673 db->cr7_data = CR7_DEFAULT; 674 dw32(DCR7, db->cr7_data); 675 676 /* Init CR15, Tx jabber and Rx watchdog timer */ 677 dw32(DCR15, db->cr15_data); 678 679 /* Enable DM910X Tx/Rx function */ 680 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000; 681 update_cr6(db->cr6_data, ioaddr); 682 } 683 684 685 /* 686 * Hardware start transmission. 687 * Send a packet to media from the upper layer. 688 */ 689 690 static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb, 691 struct DEVICE *dev) 692 { 693 struct dmfe_board_info *db = netdev_priv(dev); 694 void __iomem *ioaddr = db->ioaddr; 695 struct tx_desc *txptr; 696 unsigned long flags; 697 698 DMFE_DBUG(0, "dmfe_start_xmit", 0); 699 700 /* Too large packet check */ 701 if (skb->len > MAX_PACKET_SIZE) { 702 pr_err("big packet = %d\n", (u16)skb->len); 703 dev_kfree_skb(skb); 704 return NETDEV_TX_OK; 705 } 706 707 /* Resource flag check */ 708 netif_stop_queue(dev); 709 710 spin_lock_irqsave(&db->lock, flags); 711 712 /* No Tx resource check, it never happen nromally */ 713 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) { 714 spin_unlock_irqrestore(&db->lock, flags); 715 pr_err("No Tx resource %ld\n", db->tx_queue_cnt); 716 return NETDEV_TX_BUSY; 717 } 718 719 /* Disable NIC interrupt */ 720 dw32(DCR7, 0); 721 722 /* transmit this packet */ 723 txptr = db->tx_insert_ptr; 724 skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len); 725 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); 726 727 /* Point to next transmit free descriptor */ 728 db->tx_insert_ptr = txptr->next_tx_desc; 729 730 /* Transmit Packet Process */ 731 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) { 732 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ 733 db->tx_packet_cnt++; /* Ready to send */ 734 dw32(DCR1, 0x1); /* Issue Tx polling */ 735 dev->trans_start = jiffies; /* saved time stamp */ 736 } else { 737 db->tx_queue_cnt++; /* queue TX packet */ 738 dw32(DCR1, 0x1); /* Issue Tx polling */ 739 } 740 741 /* Tx resource check */ 742 if ( db->tx_queue_cnt < TX_FREE_DESC_CNT ) 743 netif_wake_queue(dev); 744 745 /* Restore CR7 to enable interrupt */ 746 spin_unlock_irqrestore(&db->lock, flags); 747 dw32(DCR7, db->cr7_data); 748 749 /* free this SKB */ 750 dev_kfree_skb(skb); 751 752 return NETDEV_TX_OK; 753 } 754 755 756 /* 757 * Stop the interface. 758 * The interface is stopped when it is brought. 759 */ 760 761 static int dmfe_stop(struct DEVICE *dev) 762 { 763 struct dmfe_board_info *db = netdev_priv(dev); 764 void __iomem *ioaddr = db->ioaddr; 765 766 DMFE_DBUG(0, "dmfe_stop", 0); 767 768 /* disable system */ 769 netif_stop_queue(dev); 770 771 /* deleted timer */ 772 del_timer_sync(&db->timer); 773 774 /* Reset & stop DM910X board */ 775 dw32(DCR0, DM910X_RESET); 776 udelay(100); 777 phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id); 778 779 /* free interrupt */ 780 free_irq(db->pdev->irq, dev); 781 782 /* free allocated rx buffer */ 783 dmfe_free_rxbuffer(db); 784 785 #if 0 786 /* show statistic counter */ 787 printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n", 788 db->tx_fifo_underrun, db->tx_excessive_collision, 789 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier, 790 db->tx_jabber_timeout, db->reset_count, db->reset_cr8, 791 db->reset_fatal, db->reset_TXtimeout); 792 #endif 793 794 return 0; 795 } 796 797 798 /* 799 * DM9102 insterrupt handler 800 * receive the packet to upper layer, free the transmitted packet 801 */ 802 803 static irqreturn_t dmfe_interrupt(int irq, void *dev_id) 804 { 805 struct DEVICE *dev = dev_id; 806 struct dmfe_board_info *db = netdev_priv(dev); 807 void __iomem *ioaddr = db->ioaddr; 808 unsigned long flags; 809 810 DMFE_DBUG(0, "dmfe_interrupt()", 0); 811 812 spin_lock_irqsave(&db->lock, flags); 813 814 /* Got DM910X status */ 815 db->cr5_data = dr32(DCR5); 816 dw32(DCR5, db->cr5_data); 817 if ( !(db->cr5_data & 0xc1) ) { 818 spin_unlock_irqrestore(&db->lock, flags); 819 return IRQ_HANDLED; 820 } 821 822 /* Disable all interrupt in CR7 to solve the interrupt edge problem */ 823 dw32(DCR7, 0); 824 825 /* Check system status */ 826 if (db->cr5_data & 0x2000) { 827 /* system bus error happen */ 828 DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data); 829 db->reset_fatal++; 830 db->wait_reset = 1; /* Need to RESET */ 831 spin_unlock_irqrestore(&db->lock, flags); 832 return IRQ_HANDLED; 833 } 834 835 /* Received the coming packet */ 836 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt ) 837 dmfe_rx_packet(dev, db); 838 839 /* reallocate rx descriptor buffer */ 840 if (db->rx_avail_cnt<RX_DESC_CNT) 841 allocate_rx_buffer(dev); 842 843 /* Free the transmitted descriptor */ 844 if ( db->cr5_data & 0x01) 845 dmfe_free_tx_pkt(dev, db); 846 847 /* Mode Check */ 848 if (db->dm910x_chk_mode & 0x2) { 849 db->dm910x_chk_mode = 0x4; 850 db->cr6_data |= 0x100; 851 update_cr6(db->cr6_data, ioaddr); 852 } 853 854 /* Restore CR7 to enable interrupt mask */ 855 dw32(DCR7, db->cr7_data); 856 857 spin_unlock_irqrestore(&db->lock, flags); 858 return IRQ_HANDLED; 859 } 860 861 862 #ifdef CONFIG_NET_POLL_CONTROLLER 863 /* 864 * Polling 'interrupt' - used by things like netconsole to send skbs 865 * without having to re-enable interrupts. It's not called while 866 * the interrupt routine is executing. 867 */ 868 869 static void poll_dmfe (struct net_device *dev) 870 { 871 struct dmfe_board_info *db = netdev_priv(dev); 872 const int irq = db->pdev->irq; 873 874 /* disable_irq here is not very nice, but with the lockless 875 interrupt handler we have no other choice. */ 876 disable_irq(irq); 877 dmfe_interrupt (irq, dev); 878 enable_irq(irq); 879 } 880 #endif 881 882 /* 883 * Free TX resource after TX complete 884 */ 885 886 static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db) 887 { 888 struct tx_desc *txptr; 889 void __iomem *ioaddr = db->ioaddr; 890 u32 tdes0; 891 892 txptr = db->tx_remove_ptr; 893 while(db->tx_packet_cnt) { 894 tdes0 = le32_to_cpu(txptr->tdes0); 895 if (tdes0 & 0x80000000) 896 break; 897 898 /* A packet sent completed */ 899 db->tx_packet_cnt--; 900 dev->stats.tx_packets++; 901 902 /* Transmit statistic counter */ 903 if ( tdes0 != 0x7fffffff ) { 904 dev->stats.collisions += (tdes0 >> 3) & 0xf; 905 dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff; 906 if (tdes0 & TDES0_ERR_MASK) { 907 dev->stats.tx_errors++; 908 909 if (tdes0 & 0x0002) { /* UnderRun */ 910 db->tx_fifo_underrun++; 911 if ( !(db->cr6_data & CR6_SFT) ) { 912 db->cr6_data = db->cr6_data | CR6_SFT; 913 update_cr6(db->cr6_data, ioaddr); 914 } 915 } 916 if (tdes0 & 0x0100) 917 db->tx_excessive_collision++; 918 if (tdes0 & 0x0200) 919 db->tx_late_collision++; 920 if (tdes0 & 0x0400) 921 db->tx_no_carrier++; 922 if (tdes0 & 0x0800) 923 db->tx_loss_carrier++; 924 if (tdes0 & 0x4000) 925 db->tx_jabber_timeout++; 926 } 927 } 928 929 txptr = txptr->next_tx_desc; 930 }/* End of while */ 931 932 /* Update TX remove pointer to next */ 933 db->tx_remove_ptr = txptr; 934 935 /* Send the Tx packet in queue */ 936 if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) { 937 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ 938 db->tx_packet_cnt++; /* Ready to send */ 939 db->tx_queue_cnt--; 940 dw32(DCR1, 0x1); /* Issue Tx polling */ 941 dev->trans_start = jiffies; /* saved time stamp */ 942 } 943 944 /* Resource available check */ 945 if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT ) 946 netif_wake_queue(dev); /* Active upper layer, send again */ 947 } 948 949 950 /* 951 * Calculate the CRC valude of the Rx packet 952 * flag = 1 : return the reverse CRC (for the received packet CRC) 953 * 0 : return the normal CRC (for Hash Table index) 954 */ 955 956 static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag) 957 { 958 u32 crc = crc32(~0, Data, Len); 959 if (flag) crc = ~crc; 960 return crc; 961 } 962 963 964 /* 965 * Receive the come packet and pass to upper layer 966 */ 967 968 static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) 969 { 970 struct rx_desc *rxptr; 971 struct sk_buff *skb, *newskb; 972 int rxlen; 973 u32 rdes0; 974 975 rxptr = db->rx_ready_ptr; 976 977 while(db->rx_avail_cnt) { 978 rdes0 = le32_to_cpu(rxptr->rdes0); 979 if (rdes0 & 0x80000000) /* packet owner check */ 980 break; 981 982 db->rx_avail_cnt--; 983 db->interval_rx_cnt++; 984 985 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), 986 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE); 987 988 if ( (rdes0 & 0x300) != 0x300) { 989 /* A packet without First/Last flag */ 990 /* reuse this SKB */ 991 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0); 992 dmfe_reuse_skb(db, rxptr->rx_skb_ptr); 993 } else { 994 /* A packet with First/Last flag */ 995 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4; 996 997 /* error summary bit check */ 998 if (rdes0 & 0x8000) { 999 /* This is a error packet */ 1000 dev->stats.rx_errors++; 1001 if (rdes0 & 1) 1002 dev->stats.rx_fifo_errors++; 1003 if (rdes0 & 2) 1004 dev->stats.rx_crc_errors++; 1005 if (rdes0 & 0x80) 1006 dev->stats.rx_length_errors++; 1007 } 1008 1009 if ( !(rdes0 & 0x8000) || 1010 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) { 1011 skb = rxptr->rx_skb_ptr; 1012 1013 /* Received Packet CRC check need or not */ 1014 if ( (db->dm910x_chk_mode & 1) && 1015 (cal_CRC(skb->data, rxlen, 1) != 1016 (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */ 1017 /* Found a error received packet */ 1018 dmfe_reuse_skb(db, rxptr->rx_skb_ptr); 1019 db->dm910x_chk_mode = 3; 1020 } else { 1021 /* Good packet, send to upper layer */ 1022 /* Shorst packet used new SKB */ 1023 if ((rxlen < RX_COPY_SIZE) && 1024 ((newskb = netdev_alloc_skb(dev, rxlen + 2)) 1025 != NULL)) { 1026 1027 skb = newskb; 1028 /* size less than COPY_SIZE, allocate a rxlen SKB */ 1029 skb_reserve(skb, 2); /* 16byte align */ 1030 skb_copy_from_linear_data(rxptr->rx_skb_ptr, 1031 skb_put(skb, rxlen), 1032 rxlen); 1033 dmfe_reuse_skb(db, rxptr->rx_skb_ptr); 1034 } else 1035 skb_put(skb, rxlen); 1036 1037 skb->protocol = eth_type_trans(skb, dev); 1038 netif_rx(skb); 1039 dev->stats.rx_packets++; 1040 dev->stats.rx_bytes += rxlen; 1041 } 1042 } else { 1043 /* Reuse SKB buffer when the packet is error */ 1044 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0); 1045 dmfe_reuse_skb(db, rxptr->rx_skb_ptr); 1046 } 1047 } 1048 1049 rxptr = rxptr->next_rx_desc; 1050 } 1051 1052 db->rx_ready_ptr = rxptr; 1053 } 1054 1055 /* 1056 * Set DM910X multicast address 1057 */ 1058 1059 static void dmfe_set_filter_mode(struct DEVICE * dev) 1060 { 1061 struct dmfe_board_info *db = netdev_priv(dev); 1062 unsigned long flags; 1063 int mc_count = netdev_mc_count(dev); 1064 1065 DMFE_DBUG(0, "dmfe_set_filter_mode()", 0); 1066 spin_lock_irqsave(&db->lock, flags); 1067 1068 if (dev->flags & IFF_PROMISC) { 1069 DMFE_DBUG(0, "Enable PROM Mode", 0); 1070 db->cr6_data |= CR6_PM | CR6_PBF; 1071 update_cr6(db->cr6_data, db->ioaddr); 1072 spin_unlock_irqrestore(&db->lock, flags); 1073 return; 1074 } 1075 1076 if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) { 1077 DMFE_DBUG(0, "Pass all multicast address", mc_count); 1078 db->cr6_data &= ~(CR6_PM | CR6_PBF); 1079 db->cr6_data |= CR6_PAM; 1080 spin_unlock_irqrestore(&db->lock, flags); 1081 return; 1082 } 1083 1084 DMFE_DBUG(0, "Set multicast address", mc_count); 1085 if (db->chip_id == PCI_DM9132_ID) 1086 dm9132_id_table(dev); /* DM9132 */ 1087 else 1088 send_filter_frame(dev); /* DM9102/DM9102A */ 1089 spin_unlock_irqrestore(&db->lock, flags); 1090 } 1091 1092 /* 1093 * Ethtool interace 1094 */ 1095 1096 static void dmfe_ethtool_get_drvinfo(struct net_device *dev, 1097 struct ethtool_drvinfo *info) 1098 { 1099 struct dmfe_board_info *np = netdev_priv(dev); 1100 1101 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 1102 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1103 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); 1104 } 1105 1106 static int dmfe_ethtool_set_wol(struct net_device *dev, 1107 struct ethtool_wolinfo *wolinfo) 1108 { 1109 struct dmfe_board_info *db = netdev_priv(dev); 1110 1111 if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | 1112 WAKE_ARP | WAKE_MAGICSECURE)) 1113 return -EOPNOTSUPP; 1114 1115 db->wol_mode = wolinfo->wolopts; 1116 return 0; 1117 } 1118 1119 static void dmfe_ethtool_get_wol(struct net_device *dev, 1120 struct ethtool_wolinfo *wolinfo) 1121 { 1122 struct dmfe_board_info *db = netdev_priv(dev); 1123 1124 wolinfo->supported = WAKE_PHY | WAKE_MAGIC; 1125 wolinfo->wolopts = db->wol_mode; 1126 } 1127 1128 1129 static const struct ethtool_ops netdev_ethtool_ops = { 1130 .get_drvinfo = dmfe_ethtool_get_drvinfo, 1131 .get_link = ethtool_op_get_link, 1132 .set_wol = dmfe_ethtool_set_wol, 1133 .get_wol = dmfe_ethtool_get_wol, 1134 }; 1135 1136 /* 1137 * A periodic timer routine 1138 * Dynamic media sense, allocate Rx buffer... 1139 */ 1140 1141 static void dmfe_timer(unsigned long data) 1142 { 1143 struct net_device *dev = (struct net_device *)data; 1144 struct dmfe_board_info *db = netdev_priv(dev); 1145 void __iomem *ioaddr = db->ioaddr; 1146 u32 tmp_cr8; 1147 unsigned char tmp_cr12; 1148 unsigned long flags; 1149 1150 int link_ok, link_ok_phy; 1151 1152 DMFE_DBUG(0, "dmfe_timer()", 0); 1153 spin_lock_irqsave(&db->lock, flags); 1154 1155 /* Media mode process when Link OK before enter this route */ 1156 if (db->first_in_callback == 0) { 1157 db->first_in_callback = 1; 1158 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) { 1159 db->cr6_data &= ~0x40000; 1160 update_cr6(db->cr6_data, ioaddr); 1161 phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); 1162 db->cr6_data |= 0x40000; 1163 update_cr6(db->cr6_data, ioaddr); 1164 db->timer.expires = DMFE_TIMER_WUT + HZ * 2; 1165 add_timer(&db->timer); 1166 spin_unlock_irqrestore(&db->lock, flags); 1167 return; 1168 } 1169 } 1170 1171 1172 /* Operating Mode Check */ 1173 if ( (db->dm910x_chk_mode & 0x1) && 1174 (dev->stats.rx_packets > MAX_CHECK_PACKET) ) 1175 db->dm910x_chk_mode = 0x4; 1176 1177 /* Dynamic reset DM910X : system error or transmit time-out */ 1178 tmp_cr8 = dr32(DCR8); 1179 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) { 1180 db->reset_cr8++; 1181 db->wait_reset = 1; 1182 } 1183 db->interval_rx_cnt = 0; 1184 1185 /* TX polling kick monitor */ 1186 if ( db->tx_packet_cnt && 1187 time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) { 1188 dw32(DCR1, 0x1); /* Tx polling again */ 1189 1190 /* TX Timeout */ 1191 if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) { 1192 db->reset_TXtimeout++; 1193 db->wait_reset = 1; 1194 dev_warn(&dev->dev, "Tx timeout - resetting\n"); 1195 } 1196 } 1197 1198 if (db->wait_reset) { 1199 DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt); 1200 db->reset_count++; 1201 dmfe_dynamic_reset(dev); 1202 db->first_in_callback = 0; 1203 db->timer.expires = DMFE_TIMER_WUT; 1204 add_timer(&db->timer); 1205 spin_unlock_irqrestore(&db->lock, flags); 1206 return; 1207 } 1208 1209 /* Link status check, Dynamic media type change */ 1210 if (db->chip_id == PCI_DM9132_ID) 1211 tmp_cr12 = dr8(DCR9 + 3); /* DM9132 */ 1212 else 1213 tmp_cr12 = dr8(DCR12); /* DM9102/DM9102A */ 1214 1215 if ( ((db->chip_id == PCI_DM9102_ID) && 1216 (db->chip_revision == 0x30)) || 1217 ((db->chip_id == PCI_DM9132_ID) && 1218 (db->chip_revision == 0x10)) ) { 1219 /* DM9102A Chip */ 1220 if (tmp_cr12 & 2) 1221 link_ok = 0; 1222 else 1223 link_ok = 1; 1224 } 1225 else 1226 /*0x43 is used instead of 0x3 because bit 6 should represent 1227 link status of external PHY */ 1228 link_ok = (tmp_cr12 & 0x43) ? 1 : 0; 1229 1230 1231 /* If chip reports that link is failed it could be because external 1232 PHY link status pin is not connected correctly to chip 1233 To be sure ask PHY too. 1234 */ 1235 1236 /* need a dummy read because of PHY's register latch*/ 1237 phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id); 1238 link_ok_phy = (phy_read (db->ioaddr, 1239 db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0; 1240 1241 if (link_ok_phy != link_ok) { 1242 DMFE_DBUG (0, "PHY and chip report different link status", 0); 1243 link_ok = link_ok | link_ok_phy; 1244 } 1245 1246 if ( !link_ok && netif_carrier_ok(dev)) { 1247 /* Link Failed */ 1248 DMFE_DBUG(0, "Link Failed", tmp_cr12); 1249 netif_carrier_off(dev); 1250 1251 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ 1252 /* AUTO or force 1M Homerun/Longrun don't need */ 1253 if ( !(db->media_mode & 0x38) ) 1254 phy_write(db->ioaddr, db->phy_addr, 1255 0, 0x1000, db->chip_id); 1256 1257 /* AUTO mode, if INT phyxcer link failed, select EXT device */ 1258 if (db->media_mode & DMFE_AUTO) { 1259 /* 10/100M link failed, used 1M Home-Net */ 1260 db->cr6_data|=0x00040000; /* bit18=1, MII */ 1261 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */ 1262 update_cr6(db->cr6_data, ioaddr); 1263 } 1264 } else if (!netif_carrier_ok(dev)) { 1265 1266 DMFE_DBUG(0, "Link link OK", tmp_cr12); 1267 1268 /* Auto Sense Speed */ 1269 if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) { 1270 netif_carrier_on(dev); 1271 SHOW_MEDIA_TYPE(db->op_mode); 1272 } 1273 1274 dmfe_process_mode(db); 1275 } 1276 1277 /* HPNA remote command check */ 1278 if (db->HPNA_command & 0xf00) { 1279 db->HPNA_timer--; 1280 if (!db->HPNA_timer) 1281 dmfe_HPNA_remote_cmd_chk(db); 1282 } 1283 1284 /* Timer active again */ 1285 db->timer.expires = DMFE_TIMER_WUT; 1286 add_timer(&db->timer); 1287 spin_unlock_irqrestore(&db->lock, flags); 1288 } 1289 1290 1291 /* 1292 * Dynamic reset the DM910X board 1293 * Stop DM910X board 1294 * Free Tx/Rx allocated memory 1295 * Reset DM910X board 1296 * Re-initialize DM910X board 1297 */ 1298 1299 static void dmfe_dynamic_reset(struct net_device *dev) 1300 { 1301 struct dmfe_board_info *db = netdev_priv(dev); 1302 void __iomem *ioaddr = db->ioaddr; 1303 1304 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0); 1305 1306 /* Sopt MAC controller */ 1307 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */ 1308 update_cr6(db->cr6_data, ioaddr); 1309 dw32(DCR7, 0); /* Disable Interrupt */ 1310 dw32(DCR5, dr32(DCR5)); 1311 1312 /* Disable upper layer interface */ 1313 netif_stop_queue(dev); 1314 1315 /* Free Rx Allocate buffer */ 1316 dmfe_free_rxbuffer(db); 1317 1318 /* system variable init */ 1319 db->tx_packet_cnt = 0; 1320 db->tx_queue_cnt = 0; 1321 db->rx_avail_cnt = 0; 1322 netif_carrier_off(dev); 1323 db->wait_reset = 0; 1324 1325 /* Re-initialize DM910X board */ 1326 dmfe_init_dm910x(dev); 1327 1328 /* Restart upper layer interface */ 1329 netif_wake_queue(dev); 1330 } 1331 1332 1333 /* 1334 * free all allocated rx buffer 1335 */ 1336 1337 static void dmfe_free_rxbuffer(struct dmfe_board_info * db) 1338 { 1339 DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0); 1340 1341 /* free allocated rx buffer */ 1342 while (db->rx_avail_cnt) { 1343 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr); 1344 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc; 1345 db->rx_avail_cnt--; 1346 } 1347 } 1348 1349 1350 /* 1351 * Reuse the SK buffer 1352 */ 1353 1354 static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb) 1355 { 1356 struct rx_desc *rxptr = db->rx_insert_ptr; 1357 1358 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { 1359 rxptr->rx_skb_ptr = skb; 1360 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, 1361 skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); 1362 wmb(); 1363 rxptr->rdes0 = cpu_to_le32(0x80000000); 1364 db->rx_avail_cnt++; 1365 db->rx_insert_ptr = rxptr->next_rx_desc; 1366 } else 1367 DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt); 1368 } 1369 1370 1371 /* 1372 * Initialize transmit/Receive descriptor 1373 * Using Chain structure, and allocate Tx/Rx buffer 1374 */ 1375 1376 static void dmfe_descriptor_init(struct net_device *dev) 1377 { 1378 struct dmfe_board_info *db = netdev_priv(dev); 1379 void __iomem *ioaddr = db->ioaddr; 1380 struct tx_desc *tmp_tx; 1381 struct rx_desc *tmp_rx; 1382 unsigned char *tmp_buf; 1383 dma_addr_t tmp_tx_dma, tmp_rx_dma; 1384 dma_addr_t tmp_buf_dma; 1385 int i; 1386 1387 DMFE_DBUG(0, "dmfe_descriptor_init()", 0); 1388 1389 /* tx descriptor start pointer */ 1390 db->tx_insert_ptr = db->first_tx_desc; 1391 db->tx_remove_ptr = db->first_tx_desc; 1392 dw32(DCR4, db->first_tx_desc_dma); /* TX DESC address */ 1393 1394 /* rx descriptor start pointer */ 1395 db->first_rx_desc = (void *)db->first_tx_desc + 1396 sizeof(struct tx_desc) * TX_DESC_CNT; 1397 1398 db->first_rx_desc_dma = db->first_tx_desc_dma + 1399 sizeof(struct tx_desc) * TX_DESC_CNT; 1400 db->rx_insert_ptr = db->first_rx_desc; 1401 db->rx_ready_ptr = db->first_rx_desc; 1402 dw32(DCR3, db->first_rx_desc_dma); /* RX DESC address */ 1403 1404 /* Init Transmit chain */ 1405 tmp_buf = db->buf_pool_start; 1406 tmp_buf_dma = db->buf_pool_dma_start; 1407 tmp_tx_dma = db->first_tx_desc_dma; 1408 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) { 1409 tmp_tx->tx_buf_ptr = tmp_buf; 1410 tmp_tx->tdes0 = cpu_to_le32(0); 1411 tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */ 1412 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma); 1413 tmp_tx_dma += sizeof(struct tx_desc); 1414 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma); 1415 tmp_tx->next_tx_desc = tmp_tx + 1; 1416 tmp_buf = tmp_buf + TX_BUF_ALLOC; 1417 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC; 1418 } 1419 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma); 1420 tmp_tx->next_tx_desc = db->first_tx_desc; 1421 1422 /* Init Receive descriptor chain */ 1423 tmp_rx_dma=db->first_rx_desc_dma; 1424 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) { 1425 tmp_rx->rdes0 = cpu_to_le32(0); 1426 tmp_rx->rdes1 = cpu_to_le32(0x01000600); 1427 tmp_rx_dma += sizeof(struct rx_desc); 1428 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma); 1429 tmp_rx->next_rx_desc = tmp_rx + 1; 1430 } 1431 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma); 1432 tmp_rx->next_rx_desc = db->first_rx_desc; 1433 1434 /* pre-allocate Rx buffer */ 1435 allocate_rx_buffer(dev); 1436 } 1437 1438 1439 /* 1440 * Update CR6 value 1441 * Firstly stop DM910X , then written value and start 1442 */ 1443 1444 static void update_cr6(u32 cr6_data, void __iomem *ioaddr) 1445 { 1446 u32 cr6_tmp; 1447 1448 cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */ 1449 dw32(DCR6, cr6_tmp); 1450 udelay(5); 1451 dw32(DCR6, cr6_data); 1452 udelay(5); 1453 } 1454 1455 1456 /* 1457 * Send a setup frame for DM9132 1458 * This setup frame initialize DM910X address filter mode 1459 */ 1460 1461 static void dm9132_id_table(struct net_device *dev) 1462 { 1463 struct dmfe_board_info *db = netdev_priv(dev); 1464 void __iomem *ioaddr = db->ioaddr + 0xc0; 1465 u16 *addrptr = (u16 *)dev->dev_addr; 1466 struct netdev_hw_addr *ha; 1467 u16 i, hash_table[4]; 1468 1469 /* Node address */ 1470 for (i = 0; i < 3; i++) { 1471 dw16(0, addrptr[i]); 1472 ioaddr += 4; 1473 } 1474 1475 /* Clear Hash Table */ 1476 memset(hash_table, 0, sizeof(hash_table)); 1477 1478 /* broadcast address */ 1479 hash_table[3] = 0x8000; 1480 1481 /* the multicast address in Hash Table : 64 bits */ 1482 netdev_for_each_mc_addr(ha, dev) { 1483 u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f; 1484 1485 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); 1486 } 1487 1488 /* Write the hash table to MAC MD table */ 1489 for (i = 0; i < 4; i++, ioaddr += 4) 1490 dw16(0, hash_table[i]); 1491 } 1492 1493 1494 /* 1495 * Send a setup frame for DM9102/DM9102A 1496 * This setup frame initialize DM910X address filter mode 1497 */ 1498 1499 static void send_filter_frame(struct net_device *dev) 1500 { 1501 struct dmfe_board_info *db = netdev_priv(dev); 1502 struct netdev_hw_addr *ha; 1503 struct tx_desc *txptr; 1504 u16 * addrptr; 1505 u32 * suptr; 1506 int i; 1507 1508 DMFE_DBUG(0, "send_filter_frame()", 0); 1509 1510 txptr = db->tx_insert_ptr; 1511 suptr = (u32 *) txptr->tx_buf_ptr; 1512 1513 /* Node address */ 1514 addrptr = (u16 *) dev->dev_addr; 1515 *suptr++ = addrptr[0]; 1516 *suptr++ = addrptr[1]; 1517 *suptr++ = addrptr[2]; 1518 1519 /* broadcast address */ 1520 *suptr++ = 0xffff; 1521 *suptr++ = 0xffff; 1522 *suptr++ = 0xffff; 1523 1524 /* fit the multicast address */ 1525 netdev_for_each_mc_addr(ha, dev) { 1526 addrptr = (u16 *) ha->addr; 1527 *suptr++ = addrptr[0]; 1528 *suptr++ = addrptr[1]; 1529 *suptr++ = addrptr[2]; 1530 } 1531 1532 for (i = netdev_mc_count(dev); i < 14; i++) { 1533 *suptr++ = 0xffff; 1534 *suptr++ = 0xffff; 1535 *suptr++ = 0xffff; 1536 } 1537 1538 /* prepare the setup frame */ 1539 db->tx_insert_ptr = txptr->next_tx_desc; 1540 txptr->tdes1 = cpu_to_le32(0x890000c0); 1541 1542 /* Resource Check and Send the setup packet */ 1543 if (!db->tx_packet_cnt) { 1544 void __iomem *ioaddr = db->ioaddr; 1545 1546 /* Resource Empty */ 1547 db->tx_packet_cnt++; 1548 txptr->tdes0 = cpu_to_le32(0x80000000); 1549 update_cr6(db->cr6_data | 0x2000, ioaddr); 1550 dw32(DCR1, 0x1); /* Issue Tx polling */ 1551 update_cr6(db->cr6_data, ioaddr); 1552 dev->trans_start = jiffies; 1553 } else 1554 db->tx_queue_cnt++; /* Put in TX queue */ 1555 } 1556 1557 1558 /* 1559 * Allocate rx buffer, 1560 * As possible as allocate maxiumn Rx buffer 1561 */ 1562 1563 static void allocate_rx_buffer(struct net_device *dev) 1564 { 1565 struct dmfe_board_info *db = netdev_priv(dev); 1566 struct rx_desc *rxptr; 1567 struct sk_buff *skb; 1568 1569 rxptr = db->rx_insert_ptr; 1570 1571 while(db->rx_avail_cnt < RX_DESC_CNT) { 1572 if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL ) 1573 break; 1574 rxptr->rx_skb_ptr = skb; /* FIXME (?) */ 1575 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, 1576 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); 1577 wmb(); 1578 rxptr->rdes0 = cpu_to_le32(0x80000000); 1579 rxptr = rxptr->next_rx_desc; 1580 db->rx_avail_cnt++; 1581 } 1582 1583 db->rx_insert_ptr = rxptr; 1584 } 1585 1586 static void srom_clk_write(void __iomem *ioaddr, u32 data) 1587 { 1588 static const u32 cmd[] = { 1589 CR9_SROM_READ | CR9_SRCS, 1590 CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, 1591 CR9_SROM_READ | CR9_SRCS 1592 }; 1593 int i; 1594 1595 for (i = 0; i < ARRAY_SIZE(cmd); i++) { 1596 dw32(DCR9, data | cmd[i]); 1597 udelay(5); 1598 } 1599 } 1600 1601 /* 1602 * Read one word data from the serial ROM 1603 */ 1604 static u16 read_srom_word(void __iomem *ioaddr, int offset) 1605 { 1606 u16 srom_data; 1607 int i; 1608 1609 dw32(DCR9, CR9_SROM_READ); 1610 udelay(5); 1611 dw32(DCR9, CR9_SROM_READ | CR9_SRCS); 1612 udelay(5); 1613 1614 /* Send the Read Command 110b */ 1615 srom_clk_write(ioaddr, SROM_DATA_1); 1616 srom_clk_write(ioaddr, SROM_DATA_1); 1617 srom_clk_write(ioaddr, SROM_DATA_0); 1618 1619 /* Send the offset */ 1620 for (i = 5; i >= 0; i--) { 1621 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0; 1622 srom_clk_write(ioaddr, srom_data); 1623 } 1624 1625 dw32(DCR9, CR9_SROM_READ | CR9_SRCS); 1626 udelay(5); 1627 1628 for (i = 16; i > 0; i--) { 1629 dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK); 1630 udelay(5); 1631 srom_data = (srom_data << 1) | 1632 ((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0); 1633 dw32(DCR9, CR9_SROM_READ | CR9_SRCS); 1634 udelay(5); 1635 } 1636 1637 dw32(DCR9, CR9_SROM_READ); 1638 udelay(5); 1639 return srom_data; 1640 } 1641 1642 1643 /* 1644 * Auto sense the media mode 1645 */ 1646 1647 static u8 dmfe_sense_speed(struct dmfe_board_info *db) 1648 { 1649 void __iomem *ioaddr = db->ioaddr; 1650 u8 ErrFlag = 0; 1651 u16 phy_mode; 1652 1653 /* CR6 bit18=0, select 10/100M */ 1654 update_cr6(db->cr6_data & ~0x40000, ioaddr); 1655 1656 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); 1657 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); 1658 1659 if ( (phy_mode & 0x24) == 0x24 ) { 1660 if (db->chip_id == PCI_DM9132_ID) /* DM9132 */ 1661 phy_mode = phy_read(db->ioaddr, 1662 db->phy_addr, 7, db->chip_id) & 0xf000; 1663 else /* DM9102/DM9102A */ 1664 phy_mode = phy_read(db->ioaddr, 1665 db->phy_addr, 17, db->chip_id) & 0xf000; 1666 switch (phy_mode) { 1667 case 0x1000: db->op_mode = DMFE_10MHF; break; 1668 case 0x2000: db->op_mode = DMFE_10MFD; break; 1669 case 0x4000: db->op_mode = DMFE_100MHF; break; 1670 case 0x8000: db->op_mode = DMFE_100MFD; break; 1671 default: db->op_mode = DMFE_10MHF; 1672 ErrFlag = 1; 1673 break; 1674 } 1675 } else { 1676 db->op_mode = DMFE_10MHF; 1677 DMFE_DBUG(0, "Link Failed :", phy_mode); 1678 ErrFlag = 1; 1679 } 1680 1681 return ErrFlag; 1682 } 1683 1684 1685 /* 1686 * Set 10/100 phyxcer capability 1687 * AUTO mode : phyxcer register4 is NIC capability 1688 * Force mode: phyxcer register4 is the force media 1689 */ 1690 1691 static void dmfe_set_phyxcer(struct dmfe_board_info *db) 1692 { 1693 void __iomem *ioaddr = db->ioaddr; 1694 u16 phy_reg; 1695 1696 /* Select 10/100M phyxcer */ 1697 db->cr6_data &= ~0x40000; 1698 update_cr6(db->cr6_data, ioaddr); 1699 1700 /* DM9009 Chip: Phyxcer reg18 bit12=0 */ 1701 if (db->chip_id == PCI_DM9009_ID) { 1702 phy_reg = phy_read(db->ioaddr, 1703 db->phy_addr, 18, db->chip_id) & ~0x1000; 1704 1705 phy_write(db->ioaddr, 1706 db->phy_addr, 18, phy_reg, db->chip_id); 1707 } 1708 1709 /* Phyxcer capability setting */ 1710 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0; 1711 1712 if (db->media_mode & DMFE_AUTO) { 1713 /* AUTO Mode */ 1714 phy_reg |= db->PHY_reg4; 1715 } else { 1716 /* Force Mode */ 1717 switch(db->media_mode) { 1718 case DMFE_10MHF: phy_reg |= 0x20; break; 1719 case DMFE_10MFD: phy_reg |= 0x40; break; 1720 case DMFE_100MHF: phy_reg |= 0x80; break; 1721 case DMFE_100MFD: phy_reg |= 0x100; break; 1722 } 1723 if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61; 1724 } 1725 1726 /* Write new capability to Phyxcer Reg4 */ 1727 if ( !(phy_reg & 0x01e0)) { 1728 phy_reg|=db->PHY_reg4; 1729 db->media_mode|=DMFE_AUTO; 1730 } 1731 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id); 1732 1733 /* Restart Auto-Negotiation */ 1734 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) ) 1735 phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id); 1736 if ( !db->chip_type ) 1737 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id); 1738 } 1739 1740 1741 /* 1742 * Process op-mode 1743 * AUTO mode : PHY controller in Auto-negotiation Mode 1744 * Force mode: PHY controller in force mode with HUB 1745 * N-way force capability with SWITCH 1746 */ 1747 1748 static void dmfe_process_mode(struct dmfe_board_info *db) 1749 { 1750 u16 phy_reg; 1751 1752 /* Full Duplex Mode Check */ 1753 if (db->op_mode & 0x4) 1754 db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */ 1755 else 1756 db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */ 1757 1758 /* Transciver Selection */ 1759 if (db->op_mode & 0x10) /* 1M HomePNA */ 1760 db->cr6_data |= 0x40000;/* External MII select */ 1761 else 1762 db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */ 1763 1764 update_cr6(db->cr6_data, db->ioaddr); 1765 1766 /* 10/100M phyxcer force mode need */ 1767 if ( !(db->media_mode & 0x18)) { 1768 /* Forece Mode */ 1769 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id); 1770 if ( !(phy_reg & 0x1) ) { 1771 /* parter without N-Way capability */ 1772 phy_reg = 0x0; 1773 switch(db->op_mode) { 1774 case DMFE_10MHF: phy_reg = 0x0; break; 1775 case DMFE_10MFD: phy_reg = 0x100; break; 1776 case DMFE_100MHF: phy_reg = 0x2000; break; 1777 case DMFE_100MFD: phy_reg = 0x2100; break; 1778 } 1779 phy_write(db->ioaddr, 1780 db->phy_addr, 0, phy_reg, db->chip_id); 1781 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) ) 1782 mdelay(20); 1783 phy_write(db->ioaddr, 1784 db->phy_addr, 0, phy_reg, db->chip_id); 1785 } 1786 } 1787 } 1788 1789 1790 /* 1791 * Write a word to Phy register 1792 */ 1793 1794 static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset, 1795 u16 phy_data, u32 chip_id) 1796 { 1797 u16 i; 1798 1799 if (chip_id == PCI_DM9132_ID) { 1800 dw16(0x80 + offset * 4, phy_data); 1801 } else { 1802 /* DM9102/DM9102A Chip */ 1803 1804 /* Send 33 synchronization clock to Phy controller */ 1805 for (i = 0; i < 35; i++) 1806 phy_write_1bit(ioaddr, PHY_DATA_1); 1807 1808 /* Send start command(01) to Phy */ 1809 phy_write_1bit(ioaddr, PHY_DATA_0); 1810 phy_write_1bit(ioaddr, PHY_DATA_1); 1811 1812 /* Send write command(01) to Phy */ 1813 phy_write_1bit(ioaddr, PHY_DATA_0); 1814 phy_write_1bit(ioaddr, PHY_DATA_1); 1815 1816 /* Send Phy address */ 1817 for (i = 0x10; i > 0; i = i >> 1) 1818 phy_write_1bit(ioaddr, 1819 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); 1820 1821 /* Send register address */ 1822 for (i = 0x10; i > 0; i = i >> 1) 1823 phy_write_1bit(ioaddr, 1824 offset & i ? PHY_DATA_1 : PHY_DATA_0); 1825 1826 /* written trasnition */ 1827 phy_write_1bit(ioaddr, PHY_DATA_1); 1828 phy_write_1bit(ioaddr, PHY_DATA_0); 1829 1830 /* Write a word data to PHY controller */ 1831 for ( i = 0x8000; i > 0; i >>= 1) 1832 phy_write_1bit(ioaddr, 1833 phy_data & i ? PHY_DATA_1 : PHY_DATA_0); 1834 } 1835 } 1836 1837 1838 /* 1839 * Read a word data from phy register 1840 */ 1841 1842 static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id) 1843 { 1844 int i; 1845 u16 phy_data; 1846 1847 if (chip_id == PCI_DM9132_ID) { 1848 /* DM9132 Chip */ 1849 phy_data = dr16(0x80 + offset * 4); 1850 } else { 1851 /* DM9102/DM9102A Chip */ 1852 1853 /* Send 33 synchronization clock to Phy controller */ 1854 for (i = 0; i < 35; i++) 1855 phy_write_1bit(ioaddr, PHY_DATA_1); 1856 1857 /* Send start command(01) to Phy */ 1858 phy_write_1bit(ioaddr, PHY_DATA_0); 1859 phy_write_1bit(ioaddr, PHY_DATA_1); 1860 1861 /* Send read command(10) to Phy */ 1862 phy_write_1bit(ioaddr, PHY_DATA_1); 1863 phy_write_1bit(ioaddr, PHY_DATA_0); 1864 1865 /* Send Phy address */ 1866 for (i = 0x10; i > 0; i = i >> 1) 1867 phy_write_1bit(ioaddr, 1868 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); 1869 1870 /* Send register address */ 1871 for (i = 0x10; i > 0; i = i >> 1) 1872 phy_write_1bit(ioaddr, 1873 offset & i ? PHY_DATA_1 : PHY_DATA_0); 1874 1875 /* Skip transition state */ 1876 phy_read_1bit(ioaddr); 1877 1878 /* read 16bit data */ 1879 for (phy_data = 0, i = 0; i < 16; i++) { 1880 phy_data <<= 1; 1881 phy_data |= phy_read_1bit(ioaddr); 1882 } 1883 } 1884 1885 return phy_data; 1886 } 1887 1888 1889 /* 1890 * Write one bit data to Phy Controller 1891 */ 1892 1893 static void phy_write_1bit(void __iomem *ioaddr, u32 phy_data) 1894 { 1895 dw32(DCR9, phy_data); /* MII Clock Low */ 1896 udelay(1); 1897 dw32(DCR9, phy_data | MDCLKH); /* MII Clock High */ 1898 udelay(1); 1899 dw32(DCR9, phy_data); /* MII Clock Low */ 1900 udelay(1); 1901 } 1902 1903 1904 /* 1905 * Read one bit phy data from PHY controller 1906 */ 1907 1908 static u16 phy_read_1bit(void __iomem *ioaddr) 1909 { 1910 u16 phy_data; 1911 1912 dw32(DCR9, 0x50000); 1913 udelay(1); 1914 phy_data = (dr32(DCR9) >> 19) & 0x1; 1915 dw32(DCR9, 0x40000); 1916 udelay(1); 1917 1918 return phy_data; 1919 } 1920 1921 1922 /* 1923 * Parser SROM and media mode 1924 */ 1925 1926 static void dmfe_parse_srom(struct dmfe_board_info * db) 1927 { 1928 char * srom = db->srom; 1929 int dmfe_mode, tmp_reg; 1930 1931 DMFE_DBUG(0, "dmfe_parse_srom() ", 0); 1932 1933 /* Init CR15 */ 1934 db->cr15_data = CR15_DEFAULT; 1935 1936 /* Check SROM Version */ 1937 if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) { 1938 /* SROM V4.01 */ 1939 /* Get NIC support media mode */ 1940 db->NIC_capability = le16_to_cpup((__le16 *) (srom + 34)); 1941 db->PHY_reg4 = 0; 1942 for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) { 1943 switch( db->NIC_capability & tmp_reg ) { 1944 case 0x1: db->PHY_reg4 |= 0x0020; break; 1945 case 0x2: db->PHY_reg4 |= 0x0040; break; 1946 case 0x4: db->PHY_reg4 |= 0x0080; break; 1947 case 0x8: db->PHY_reg4 |= 0x0100; break; 1948 } 1949 } 1950 1951 /* Media Mode Force or not check */ 1952 dmfe_mode = (le32_to_cpup((__le32 *) (srom + 34)) & 1953 le32_to_cpup((__le32 *) (srom + 36))); 1954 switch(dmfe_mode) { 1955 case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */ 1956 case 0x2: dmfe_media_mode = DMFE_10MFD; break; /* 10MFD */ 1957 case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */ 1958 case 0x100: 1959 case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */ 1960 } 1961 1962 /* Special Function setting */ 1963 /* VLAN function */ 1964 if ( (SF_mode & 0x1) || (srom[43] & 0x80) ) 1965 db->cr15_data |= 0x40; 1966 1967 /* Flow Control */ 1968 if ( (SF_mode & 0x2) || (srom[40] & 0x1) ) 1969 db->cr15_data |= 0x400; 1970 1971 /* TX pause packet */ 1972 if ( (SF_mode & 0x4) || (srom[40] & 0xe) ) 1973 db->cr15_data |= 0x9800; 1974 } 1975 1976 /* Parse HPNA parameter */ 1977 db->HPNA_command = 1; 1978 1979 /* Accept remote command or not */ 1980 if (HPNA_rx_cmd == 0) 1981 db->HPNA_command |= 0x8000; 1982 1983 /* Issue remote command & operation mode */ 1984 if (HPNA_tx_cmd == 1) 1985 switch(HPNA_mode) { /* Issue Remote Command */ 1986 case 0: db->HPNA_command |= 0x0904; break; 1987 case 1: db->HPNA_command |= 0x0a00; break; 1988 case 2: db->HPNA_command |= 0x0506; break; 1989 case 3: db->HPNA_command |= 0x0602; break; 1990 } 1991 else 1992 switch(HPNA_mode) { /* Don't Issue */ 1993 case 0: db->HPNA_command |= 0x0004; break; 1994 case 1: db->HPNA_command |= 0x0000; break; 1995 case 2: db->HPNA_command |= 0x0006; break; 1996 case 3: db->HPNA_command |= 0x0002; break; 1997 } 1998 1999 /* Check DM9801 or DM9802 present or not */ 2000 db->HPNA_present = 0; 2001 update_cr6(db->cr6_data | 0x40000, db->ioaddr); 2002 tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id); 2003 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) { 2004 /* DM9801 or DM9802 present */ 2005 db->HPNA_timer = 8; 2006 if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) { 2007 /* DM9801 HomeRun */ 2008 db->HPNA_present = 1; 2009 dmfe_program_DM9801(db, tmp_reg); 2010 } else { 2011 /* DM9802 LongRun */ 2012 db->HPNA_present = 2; 2013 dmfe_program_DM9802(db); 2014 } 2015 } 2016 2017 } 2018 2019 2020 /* 2021 * Init HomeRun DM9801 2022 */ 2023 2024 static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev) 2025 { 2026 uint reg17, reg25; 2027 2028 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR; 2029 switch(HPNA_rev) { 2030 case 0xb900: /* DM9801 E3 */ 2031 db->HPNA_command |= 0x1000; 2032 reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id); 2033 reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000; 2034 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); 2035 break; 2036 case 0xb901: /* DM9801 E4 */ 2037 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); 2038 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor; 2039 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); 2040 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3; 2041 break; 2042 case 0xb902: /* DM9801 E5 */ 2043 case 0xb903: /* DM9801 E6 */ 2044 default: 2045 db->HPNA_command |= 0x1000; 2046 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); 2047 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5; 2048 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); 2049 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor; 2050 break; 2051 } 2052 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id); 2053 phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id); 2054 phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id); 2055 } 2056 2057 2058 /* 2059 * Init HomeRun DM9802 2060 */ 2061 2062 static void dmfe_program_DM9802(struct dmfe_board_info * db) 2063 { 2064 uint phy_reg; 2065 2066 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR; 2067 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id); 2068 phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); 2069 phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor; 2070 phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id); 2071 } 2072 2073 2074 /* 2075 * Check remote HPNA power and speed status. If not correct, 2076 * issue command again. 2077 */ 2078 2079 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db) 2080 { 2081 uint phy_reg; 2082 2083 /* Got remote device status */ 2084 phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60; 2085 switch(phy_reg) { 2086 case 0x00: phy_reg = 0x0a00;break; /* LP/LS */ 2087 case 0x20: phy_reg = 0x0900;break; /* LP/HS */ 2088 case 0x40: phy_reg = 0x0600;break; /* HP/LS */ 2089 case 0x60: phy_reg = 0x0500;break; /* HP/HS */ 2090 } 2091 2092 /* Check remote device status match our setting ot not */ 2093 if ( phy_reg != (db->HPNA_command & 0x0f00) ) { 2094 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, 2095 db->chip_id); 2096 db->HPNA_timer=8; 2097 } else 2098 db->HPNA_timer=600; /* Match, every 10 minutes, check */ 2099 } 2100 2101 2102 2103 static DEFINE_PCI_DEVICE_TABLE(dmfe_pci_tbl) = { 2104 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID }, 2105 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID }, 2106 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID }, 2107 { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID }, 2108 { 0, } 2109 }; 2110 MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl); 2111 2112 2113 #ifdef CONFIG_PM 2114 static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state) 2115 { 2116 struct net_device *dev = pci_get_drvdata(pci_dev); 2117 struct dmfe_board_info *db = netdev_priv(dev); 2118 void __iomem *ioaddr = db->ioaddr; 2119 u32 tmp; 2120 2121 /* Disable upper layer interface */ 2122 netif_device_detach(dev); 2123 2124 /* Disable Tx/Rx */ 2125 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); 2126 update_cr6(db->cr6_data, ioaddr); 2127 2128 /* Disable Interrupt */ 2129 dw32(DCR7, 0); 2130 dw32(DCR5, dr32(DCR5)); 2131 2132 /* Fre RX buffers */ 2133 dmfe_free_rxbuffer(db); 2134 2135 /* Enable WOL */ 2136 pci_read_config_dword(pci_dev, 0x40, &tmp); 2137 tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET); 2138 2139 if (db->wol_mode & WAKE_PHY) 2140 tmp |= DMFE_WOL_LINKCHANGE; 2141 if (db->wol_mode & WAKE_MAGIC) 2142 tmp |= DMFE_WOL_MAGICPACKET; 2143 2144 pci_write_config_dword(pci_dev, 0x40, tmp); 2145 2146 pci_enable_wake(pci_dev, PCI_D3hot, 1); 2147 pci_enable_wake(pci_dev, PCI_D3cold, 1); 2148 2149 /* Power down device*/ 2150 pci_save_state(pci_dev); 2151 pci_set_power_state(pci_dev, pci_choose_state (pci_dev, state)); 2152 2153 return 0; 2154 } 2155 2156 static int dmfe_resume(struct pci_dev *pci_dev) 2157 { 2158 struct net_device *dev = pci_get_drvdata(pci_dev); 2159 u32 tmp; 2160 2161 pci_set_power_state(pci_dev, PCI_D0); 2162 pci_restore_state(pci_dev); 2163 2164 /* Re-initialize DM910X board */ 2165 dmfe_init_dm910x(dev); 2166 2167 /* Disable WOL */ 2168 pci_read_config_dword(pci_dev, 0x40, &tmp); 2169 2170 tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET); 2171 pci_write_config_dword(pci_dev, 0x40, tmp); 2172 2173 pci_enable_wake(pci_dev, PCI_D3hot, 0); 2174 pci_enable_wake(pci_dev, PCI_D3cold, 0); 2175 2176 /* Restart upper layer interface */ 2177 netif_device_attach(dev); 2178 2179 return 0; 2180 } 2181 #else 2182 #define dmfe_suspend NULL 2183 #define dmfe_resume NULL 2184 #endif 2185 2186 static struct pci_driver dmfe_driver = { 2187 .name = "dmfe", 2188 .id_table = dmfe_pci_tbl, 2189 .probe = dmfe_init_one, 2190 .remove = __devexit_p(dmfe_remove_one), 2191 .suspend = dmfe_suspend, 2192 .resume = dmfe_resume 2193 }; 2194 2195 MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw"); 2196 MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver"); 2197 MODULE_LICENSE("GPL"); 2198 MODULE_VERSION(DRV_VERSION); 2199 2200 module_param(debug, int, 0); 2201 module_param(mode, byte, 0); 2202 module_param(cr6set, int, 0); 2203 module_param(chkmode, byte, 0); 2204 module_param(HPNA_mode, byte, 0); 2205 module_param(HPNA_rx_cmd, byte, 0); 2206 module_param(HPNA_tx_cmd, byte, 0); 2207 module_param(HPNA_NoiseFloor, byte, 0); 2208 module_param(SF_mode, byte, 0); 2209 MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)"); 2210 MODULE_PARM_DESC(mode, "Davicom DM9xxx: " 2211 "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA"); 2212 2213 MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function " 2214 "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)"); 2215 2216 /* Description: 2217 * when user used insmod to add module, system invoked init_module() 2218 * to initialize and register. 2219 */ 2220 2221 static int __init dmfe_init_module(void) 2222 { 2223 int rc; 2224 2225 pr_info("%s\n", version); 2226 printed_version = 1; 2227 2228 DMFE_DBUG(0, "init_module() ", debug); 2229 2230 if (debug) 2231 dmfe_debug = debug; /* set debug flag */ 2232 if (cr6set) 2233 dmfe_cr6_user_set = cr6set; 2234 2235 switch(mode) { 2236 case DMFE_10MHF: 2237 case DMFE_100MHF: 2238 case DMFE_10MFD: 2239 case DMFE_100MFD: 2240 case DMFE_1M_HPNA: 2241 dmfe_media_mode = mode; 2242 break; 2243 default:dmfe_media_mode = DMFE_AUTO; 2244 break; 2245 } 2246 2247 if (HPNA_mode > 4) 2248 HPNA_mode = 0; /* Default: LP/HS */ 2249 if (HPNA_rx_cmd > 1) 2250 HPNA_rx_cmd = 0; /* Default: Ignored remote cmd */ 2251 if (HPNA_tx_cmd > 1) 2252 HPNA_tx_cmd = 0; /* Default: Don't issue remote cmd */ 2253 if (HPNA_NoiseFloor > 15) 2254 HPNA_NoiseFloor = 0; 2255 2256 rc = pci_register_driver(&dmfe_driver); 2257 if (rc < 0) 2258 return rc; 2259 2260 return 0; 2261 } 2262 2263 2264 /* 2265 * Description: 2266 * when user used rmmod to delete module, system invoked clean_module() 2267 * to un-register all registered services. 2268 */ 2269 2270 static void __exit dmfe_cleanup_module(void) 2271 { 2272 DMFE_DBUG(0, "dmfe_clean_module() ", debug); 2273 pci_unregister_driver(&dmfe_driver); 2274 } 2275 2276 module_init(dmfe_init_module); 2277 module_exit(dmfe_cleanup_module); 2278