1 /* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */ 2 /* 3 * Copyright 1996-1999 Thomas Bogendoerfer 4 * 5 * Derived from the lance driver written 1993,1994,1995 by Donald Becker. 6 * 7 * Copyright 1993 United States Government as represented by the 8 * Director, National Security Agency. 9 * 10 * This software may be used and distributed according to the terms 11 * of the GNU General Public License, incorporated herein by reference. 12 * 13 * This driver is for PCnet32 and PCnetPCI based ethercards 14 */ 15 /************************************************************************** 16 * 23 Oct, 2000. 17 * Fixed a few bugs, related to running the controller in 32bit mode. 18 * 19 * Carsten Langgaard, carstenl@mips.com 20 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. 21 * 22 *************************************************************************/ 23 24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25 26 #define DRV_NAME "pcnet32" 27 #define DRV_VERSION "1.35" 28 #define DRV_RELDATE "21.Apr.2008" 29 #define PFX DRV_NAME ": " 30 31 static const char *const version = 32 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; 33 34 #include <linux/module.h> 35 #include <linux/kernel.h> 36 #include <linux/sched.h> 37 #include <linux/string.h> 38 #include <linux/errno.h> 39 #include <linux/ioport.h> 40 #include <linux/slab.h> 41 #include <linux/interrupt.h> 42 #include <linux/pci.h> 43 #include <linux/delay.h> 44 #include <linux/init.h> 45 #include <linux/ethtool.h> 46 #include <linux/mii.h> 47 #include <linux/crc32.h> 48 #include <linux/netdevice.h> 49 #include <linux/etherdevice.h> 50 #include <linux/if_ether.h> 51 #include <linux/skbuff.h> 52 #include <linux/spinlock.h> 53 #include <linux/moduleparam.h> 54 #include <linux/bitops.h> 55 #include <linux/io.h> 56 #include <linux/uaccess.h> 57 58 #include <asm/dma.h> 59 #include <asm/irq.h> 60 61 /* 62 * PCI device identifiers for "new style" Linux PCI Device Drivers 63 */ 64 static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = { 65 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), }, 66 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), }, 67 68 /* 69 * Adapters that were sold with IBM's RS/6000 or pSeries hardware have 70 * the incorrect vendor id. 71 */ 72 { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE), 73 .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, }, 74 75 { } /* terminate list */ 76 }; 77 78 MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl); 79 80 static int cards_found; 81 82 /* 83 * VLB I/O addresses 84 */ 85 static unsigned int pcnet32_portlist[] = 86 { 0x300, 0x320, 0x340, 0x360, 0 }; 87 88 static int pcnet32_debug; 89 static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */ 90 static int pcnet32vlb; /* check for VLB cards ? */ 91 92 static struct net_device *pcnet32_dev; 93 94 static int max_interrupt_work = 2; 95 static int rx_copybreak = 200; 96 97 #define PCNET32_PORT_AUI 0x00 98 #define PCNET32_PORT_10BT 0x01 99 #define PCNET32_PORT_GPSI 0x02 100 #define PCNET32_PORT_MII 0x03 101 102 #define PCNET32_PORT_PORTSEL 0x03 103 #define PCNET32_PORT_ASEL 0x04 104 #define PCNET32_PORT_100 0x40 105 #define PCNET32_PORT_FD 0x80 106 107 #define PCNET32_DMA_MASK 0xffffffff 108 109 #define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ)) 110 #define PCNET32_BLINK_TIMEOUT (jiffies + (HZ/4)) 111 112 /* 113 * table to translate option values from tulip 114 * to internal options 115 */ 116 static const unsigned char options_mapping[] = { 117 PCNET32_PORT_ASEL, /* 0 Auto-select */ 118 PCNET32_PORT_AUI, /* 1 BNC/AUI */ 119 PCNET32_PORT_AUI, /* 2 AUI/BNC */ 120 PCNET32_PORT_ASEL, /* 3 not supported */ 121 PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */ 122 PCNET32_PORT_ASEL, /* 5 not supported */ 123 PCNET32_PORT_ASEL, /* 6 not supported */ 124 PCNET32_PORT_ASEL, /* 7 not supported */ 125 PCNET32_PORT_ASEL, /* 8 not supported */ 126 PCNET32_PORT_MII, /* 9 MII 10baseT */ 127 PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */ 128 PCNET32_PORT_MII, /* 11 MII (autosel) */ 129 PCNET32_PORT_10BT, /* 12 10BaseT */ 130 PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */ 131 /* 14 MII 100BaseTx-FD */ 132 PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD, 133 PCNET32_PORT_ASEL /* 15 not supported */ 134 }; 135 136 static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = { 137 "Loopback test (offline)" 138 }; 139 140 #define PCNET32_TEST_LEN ARRAY_SIZE(pcnet32_gstrings_test) 141 142 #define PCNET32_NUM_REGS 136 143 144 #define MAX_UNITS 8 /* More are supported, limit only on options */ 145 static int options[MAX_UNITS]; 146 static int full_duplex[MAX_UNITS]; 147 static int homepna[MAX_UNITS]; 148 149 /* 150 * Theory of Operation 151 * 152 * This driver uses the same software structure as the normal lance 153 * driver. So look for a verbose description in lance.c. The differences 154 * to the normal lance driver is the use of the 32bit mode of PCnet32 155 * and PCnetPCI chips. Because these chips are 32bit chips, there is no 156 * 16MB limitation and we don't need bounce buffers. 157 */ 158 159 /* 160 * Set the number of Tx and Rx buffers, using Log_2(# buffers). 161 * Reasonable default values are 4 Tx buffers, and 16 Rx buffers. 162 * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). 163 */ 164 #ifndef PCNET32_LOG_TX_BUFFERS 165 #define PCNET32_LOG_TX_BUFFERS 4 166 #define PCNET32_LOG_RX_BUFFERS 5 167 #define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */ 168 #define PCNET32_LOG_MAX_RX_BUFFERS 9 169 #endif 170 171 #define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS)) 172 #define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS)) 173 174 #define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS)) 175 #define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS)) 176 177 #define PKT_BUF_SKB 1544 178 /* actual buffer length after being aligned */ 179 #define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN) 180 /* chip wants twos complement of the (aligned) buffer length */ 181 #define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB) 182 183 /* Offsets from base I/O address. */ 184 #define PCNET32_WIO_RDP 0x10 185 #define PCNET32_WIO_RAP 0x12 186 #define PCNET32_WIO_RESET 0x14 187 #define PCNET32_WIO_BDP 0x16 188 189 #define PCNET32_DWIO_RDP 0x10 190 #define PCNET32_DWIO_RAP 0x14 191 #define PCNET32_DWIO_RESET 0x18 192 #define PCNET32_DWIO_BDP 0x1C 193 194 #define PCNET32_TOTAL_SIZE 0x20 195 196 #define CSR0 0 197 #define CSR0_INIT 0x1 198 #define CSR0_START 0x2 199 #define CSR0_STOP 0x4 200 #define CSR0_TXPOLL 0x8 201 #define CSR0_INTEN 0x40 202 #define CSR0_IDON 0x0100 203 #define CSR0_NORMAL (CSR0_START | CSR0_INTEN) 204 #define PCNET32_INIT_LOW 1 205 #define PCNET32_INIT_HIGH 2 206 #define CSR3 3 207 #define CSR4 4 208 #define CSR5 5 209 #define CSR5_SUSPEND 0x0001 210 #define CSR15 15 211 #define PCNET32_MC_FILTER 8 212 213 #define PCNET32_79C970A 0x2621 214 215 /* The PCNET32 Rx and Tx ring descriptors. */ 216 struct pcnet32_rx_head { 217 __le32 base; 218 __le16 buf_length; /* two`s complement of length */ 219 __le16 status; 220 __le32 msg_length; 221 __le32 reserved; 222 }; 223 224 struct pcnet32_tx_head { 225 __le32 base; 226 __le16 length; /* two`s complement of length */ 227 __le16 status; 228 __le32 misc; 229 __le32 reserved; 230 }; 231 232 /* The PCNET32 32-Bit initialization block, described in databook. */ 233 struct pcnet32_init_block { 234 __le16 mode; 235 __le16 tlen_rlen; 236 u8 phys_addr[6]; 237 __le16 reserved; 238 __le32 filter[2]; 239 /* Receive and transmit ring base, along with extra bits. */ 240 __le32 rx_ring; 241 __le32 tx_ring; 242 }; 243 244 /* PCnet32 access functions */ 245 struct pcnet32_access { 246 u16 (*read_csr) (unsigned long, int); 247 void (*write_csr) (unsigned long, int, u16); 248 u16 (*read_bcr) (unsigned long, int); 249 void (*write_bcr) (unsigned long, int, u16); 250 u16 (*read_rap) (unsigned long); 251 void (*write_rap) (unsigned long, u16); 252 void (*reset) (unsigned long); 253 }; 254 255 /* 256 * The first field of pcnet32_private is read by the ethernet device 257 * so the structure should be allocated using pci_alloc_consistent(). 258 */ 259 struct pcnet32_private { 260 struct pcnet32_init_block *init_block; 261 /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ 262 struct pcnet32_rx_head *rx_ring; 263 struct pcnet32_tx_head *tx_ring; 264 dma_addr_t init_dma_addr;/* DMA address of beginning of the init block, 265 returned by pci_alloc_consistent */ 266 struct pci_dev *pci_dev; 267 const char *name; 268 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 269 struct sk_buff **tx_skbuff; 270 struct sk_buff **rx_skbuff; 271 dma_addr_t *tx_dma_addr; 272 dma_addr_t *rx_dma_addr; 273 const struct pcnet32_access *a; 274 spinlock_t lock; /* Guard lock */ 275 unsigned int cur_rx, cur_tx; /* The next free ring entry */ 276 unsigned int rx_ring_size; /* current rx ring size */ 277 unsigned int tx_ring_size; /* current tx ring size */ 278 unsigned int rx_mod_mask; /* rx ring modular mask */ 279 unsigned int tx_mod_mask; /* tx ring modular mask */ 280 unsigned short rx_len_bits; 281 unsigned short tx_len_bits; 282 dma_addr_t rx_ring_dma_addr; 283 dma_addr_t tx_ring_dma_addr; 284 unsigned int dirty_rx, /* ring entries to be freed. */ 285 dirty_tx; 286 287 struct net_device *dev; 288 struct napi_struct napi; 289 char tx_full; 290 char phycount; /* number of phys found */ 291 int options; 292 unsigned int shared_irq:1, /* shared irq possible */ 293 dxsuflo:1, /* disable transmit stop on uflo */ 294 mii:1; /* mii port available */ 295 struct net_device *next; 296 struct mii_if_info mii_if; 297 struct timer_list watchdog_timer; 298 u32 msg_enable; /* debug message level */ 299 300 /* each bit indicates an available PHY */ 301 u32 phymask; 302 unsigned short chip_version; /* which variant this is */ 303 304 /* saved registers during ethtool blink */ 305 u16 save_regs[4]; 306 }; 307 308 static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); 309 static int pcnet32_probe1(unsigned long, int, struct pci_dev *); 310 static int pcnet32_open(struct net_device *); 311 static int pcnet32_init_ring(struct net_device *); 312 static netdev_tx_t pcnet32_start_xmit(struct sk_buff *, 313 struct net_device *); 314 static void pcnet32_tx_timeout(struct net_device *dev); 315 static irqreturn_t pcnet32_interrupt(int, void *); 316 static int pcnet32_close(struct net_device *); 317 static struct net_device_stats *pcnet32_get_stats(struct net_device *); 318 static void pcnet32_load_multicast(struct net_device *dev); 319 static void pcnet32_set_multicast_list(struct net_device *); 320 static int pcnet32_ioctl(struct net_device *, struct ifreq *, int); 321 static void pcnet32_watchdog(struct net_device *); 322 static int mdio_read(struct net_device *dev, int phy_id, int reg_num); 323 static void mdio_write(struct net_device *dev, int phy_id, int reg_num, 324 int val); 325 static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits); 326 static void pcnet32_ethtool_test(struct net_device *dev, 327 struct ethtool_test *eth_test, u64 * data); 328 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1); 329 static int pcnet32_get_regs_len(struct net_device *dev); 330 static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 331 void *ptr); 332 static void pcnet32_purge_tx_ring(struct net_device *dev); 333 static int pcnet32_alloc_ring(struct net_device *dev, const char *name); 334 static void pcnet32_free_ring(struct net_device *dev); 335 static void pcnet32_check_media(struct net_device *dev, int verbose); 336 337 static u16 pcnet32_wio_read_csr(unsigned long addr, int index) 338 { 339 outw(index, addr + PCNET32_WIO_RAP); 340 return inw(addr + PCNET32_WIO_RDP); 341 } 342 343 static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val) 344 { 345 outw(index, addr + PCNET32_WIO_RAP); 346 outw(val, addr + PCNET32_WIO_RDP); 347 } 348 349 static u16 pcnet32_wio_read_bcr(unsigned long addr, int index) 350 { 351 outw(index, addr + PCNET32_WIO_RAP); 352 return inw(addr + PCNET32_WIO_BDP); 353 } 354 355 static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val) 356 { 357 outw(index, addr + PCNET32_WIO_RAP); 358 outw(val, addr + PCNET32_WIO_BDP); 359 } 360 361 static u16 pcnet32_wio_read_rap(unsigned long addr) 362 { 363 return inw(addr + PCNET32_WIO_RAP); 364 } 365 366 static void pcnet32_wio_write_rap(unsigned long addr, u16 val) 367 { 368 outw(val, addr + PCNET32_WIO_RAP); 369 } 370 371 static void pcnet32_wio_reset(unsigned long addr) 372 { 373 inw(addr + PCNET32_WIO_RESET); 374 } 375 376 static int pcnet32_wio_check(unsigned long addr) 377 { 378 outw(88, addr + PCNET32_WIO_RAP); 379 return inw(addr + PCNET32_WIO_RAP) == 88; 380 } 381 382 static const struct pcnet32_access pcnet32_wio = { 383 .read_csr = pcnet32_wio_read_csr, 384 .write_csr = pcnet32_wio_write_csr, 385 .read_bcr = pcnet32_wio_read_bcr, 386 .write_bcr = pcnet32_wio_write_bcr, 387 .read_rap = pcnet32_wio_read_rap, 388 .write_rap = pcnet32_wio_write_rap, 389 .reset = pcnet32_wio_reset 390 }; 391 392 static u16 pcnet32_dwio_read_csr(unsigned long addr, int index) 393 { 394 outl(index, addr + PCNET32_DWIO_RAP); 395 return inl(addr + PCNET32_DWIO_RDP) & 0xffff; 396 } 397 398 static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val) 399 { 400 outl(index, addr + PCNET32_DWIO_RAP); 401 outl(val, addr + PCNET32_DWIO_RDP); 402 } 403 404 static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index) 405 { 406 outl(index, addr + PCNET32_DWIO_RAP); 407 return inl(addr + PCNET32_DWIO_BDP) & 0xffff; 408 } 409 410 static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val) 411 { 412 outl(index, addr + PCNET32_DWIO_RAP); 413 outl(val, addr + PCNET32_DWIO_BDP); 414 } 415 416 static u16 pcnet32_dwio_read_rap(unsigned long addr) 417 { 418 return inl(addr + PCNET32_DWIO_RAP) & 0xffff; 419 } 420 421 static void pcnet32_dwio_write_rap(unsigned long addr, u16 val) 422 { 423 outl(val, addr + PCNET32_DWIO_RAP); 424 } 425 426 static void pcnet32_dwio_reset(unsigned long addr) 427 { 428 inl(addr + PCNET32_DWIO_RESET); 429 } 430 431 static int pcnet32_dwio_check(unsigned long addr) 432 { 433 outl(88, addr + PCNET32_DWIO_RAP); 434 return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88; 435 } 436 437 static const struct pcnet32_access pcnet32_dwio = { 438 .read_csr = pcnet32_dwio_read_csr, 439 .write_csr = pcnet32_dwio_write_csr, 440 .read_bcr = pcnet32_dwio_read_bcr, 441 .write_bcr = pcnet32_dwio_write_bcr, 442 .read_rap = pcnet32_dwio_read_rap, 443 .write_rap = pcnet32_dwio_write_rap, 444 .reset = pcnet32_dwio_reset 445 }; 446 447 static void pcnet32_netif_stop(struct net_device *dev) 448 { 449 struct pcnet32_private *lp = netdev_priv(dev); 450 451 dev->trans_start = jiffies; /* prevent tx timeout */ 452 napi_disable(&lp->napi); 453 netif_tx_disable(dev); 454 } 455 456 static void pcnet32_netif_start(struct net_device *dev) 457 { 458 struct pcnet32_private *lp = netdev_priv(dev); 459 ulong ioaddr = dev->base_addr; 460 u16 val; 461 462 netif_wake_queue(dev); 463 val = lp->a->read_csr(ioaddr, CSR3); 464 val &= 0x00ff; 465 lp->a->write_csr(ioaddr, CSR3, val); 466 napi_enable(&lp->napi); 467 } 468 469 /* 470 * Allocate space for the new sized tx ring. 471 * Free old resources 472 * Save new resources. 473 * Any failure keeps old resources. 474 * Must be called with lp->lock held. 475 */ 476 static void pcnet32_realloc_tx_ring(struct net_device *dev, 477 struct pcnet32_private *lp, 478 unsigned int size) 479 { 480 dma_addr_t new_ring_dma_addr; 481 dma_addr_t *new_dma_addr_list; 482 struct pcnet32_tx_head *new_tx_ring; 483 struct sk_buff **new_skb_list; 484 485 pcnet32_purge_tx_ring(dev); 486 487 new_tx_ring = pci_alloc_consistent(lp->pci_dev, 488 sizeof(struct pcnet32_tx_head) * 489 (1 << size), 490 &new_ring_dma_addr); 491 if (new_tx_ring == NULL) { 492 netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); 493 return; 494 } 495 memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size)); 496 497 new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), 498 GFP_ATOMIC); 499 if (!new_dma_addr_list) 500 goto free_new_tx_ring; 501 502 new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *), 503 GFP_ATOMIC); 504 if (!new_skb_list) 505 goto free_new_lists; 506 507 kfree(lp->tx_skbuff); 508 kfree(lp->tx_dma_addr); 509 pci_free_consistent(lp->pci_dev, 510 sizeof(struct pcnet32_tx_head) * 511 lp->tx_ring_size, lp->tx_ring, 512 lp->tx_ring_dma_addr); 513 514 lp->tx_ring_size = (1 << size); 515 lp->tx_mod_mask = lp->tx_ring_size - 1; 516 lp->tx_len_bits = (size << 12); 517 lp->tx_ring = new_tx_ring; 518 lp->tx_ring_dma_addr = new_ring_dma_addr; 519 lp->tx_dma_addr = new_dma_addr_list; 520 lp->tx_skbuff = new_skb_list; 521 return; 522 523 free_new_lists: 524 kfree(new_dma_addr_list); 525 free_new_tx_ring: 526 pci_free_consistent(lp->pci_dev, 527 sizeof(struct pcnet32_tx_head) * 528 (1 << size), 529 new_tx_ring, 530 new_ring_dma_addr); 531 } 532 533 /* 534 * Allocate space for the new sized rx ring. 535 * Re-use old receive buffers. 536 * alloc extra buffers 537 * free unneeded buffers 538 * free unneeded buffers 539 * Save new resources. 540 * Any failure keeps old resources. 541 * Must be called with lp->lock held. 542 */ 543 static void pcnet32_realloc_rx_ring(struct net_device *dev, 544 struct pcnet32_private *lp, 545 unsigned int size) 546 { 547 dma_addr_t new_ring_dma_addr; 548 dma_addr_t *new_dma_addr_list; 549 struct pcnet32_rx_head *new_rx_ring; 550 struct sk_buff **new_skb_list; 551 int new, overlap; 552 unsigned int entries = 1 << size; 553 554 new_rx_ring = pci_alloc_consistent(lp->pci_dev, 555 sizeof(struct pcnet32_rx_head) * 556 entries, 557 &new_ring_dma_addr); 558 if (new_rx_ring == NULL) { 559 netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); 560 return; 561 } 562 memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * entries); 563 564 new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC); 565 if (!new_dma_addr_list) 566 goto free_new_rx_ring; 567 568 new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC); 569 if (!new_skb_list) 570 goto free_new_lists; 571 572 /* first copy the current receive buffers */ 573 overlap = min(entries, lp->rx_ring_size); 574 for (new = 0; new < overlap; new++) { 575 new_rx_ring[new] = lp->rx_ring[new]; 576 new_dma_addr_list[new] = lp->rx_dma_addr[new]; 577 new_skb_list[new] = lp->rx_skbuff[new]; 578 } 579 /* now allocate any new buffers needed */ 580 for (; new < entries; new++) { 581 struct sk_buff *rx_skbuff; 582 new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB); 583 rx_skbuff = new_skb_list[new]; 584 if (!rx_skbuff) { 585 /* keep the original lists and buffers */ 586 netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n", 587 __func__); 588 goto free_all_new; 589 } 590 skb_reserve(rx_skbuff, NET_IP_ALIGN); 591 592 new_dma_addr_list[new] = 593 pci_map_single(lp->pci_dev, rx_skbuff->data, 594 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); 595 if (pci_dma_mapping_error(lp->pci_dev, 596 new_dma_addr_list[new])) { 597 netif_err(lp, drv, dev, "%s dma mapping failed\n", 598 __func__); 599 dev_kfree_skb(new_skb_list[new]); 600 goto free_all_new; 601 } 602 new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]); 603 new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE); 604 new_rx_ring[new].status = cpu_to_le16(0x8000); 605 } 606 /* and free any unneeded buffers */ 607 for (; new < lp->rx_ring_size; new++) { 608 if (lp->rx_skbuff[new]) { 609 if (!pci_dma_mapping_error(lp->pci_dev, 610 lp->rx_dma_addr[new])) 611 pci_unmap_single(lp->pci_dev, 612 lp->rx_dma_addr[new], 613 PKT_BUF_SIZE, 614 PCI_DMA_FROMDEVICE); 615 dev_kfree_skb(lp->rx_skbuff[new]); 616 } 617 } 618 619 kfree(lp->rx_skbuff); 620 kfree(lp->rx_dma_addr); 621 pci_free_consistent(lp->pci_dev, 622 sizeof(struct pcnet32_rx_head) * 623 lp->rx_ring_size, lp->rx_ring, 624 lp->rx_ring_dma_addr); 625 626 lp->rx_ring_size = entries; 627 lp->rx_mod_mask = lp->rx_ring_size - 1; 628 lp->rx_len_bits = (size << 4); 629 lp->rx_ring = new_rx_ring; 630 lp->rx_ring_dma_addr = new_ring_dma_addr; 631 lp->rx_dma_addr = new_dma_addr_list; 632 lp->rx_skbuff = new_skb_list; 633 return; 634 635 free_all_new: 636 while (--new >= lp->rx_ring_size) { 637 if (new_skb_list[new]) { 638 if (!pci_dma_mapping_error(lp->pci_dev, 639 new_dma_addr_list[new])) 640 pci_unmap_single(lp->pci_dev, 641 new_dma_addr_list[new], 642 PKT_BUF_SIZE, 643 PCI_DMA_FROMDEVICE); 644 dev_kfree_skb(new_skb_list[new]); 645 } 646 } 647 kfree(new_skb_list); 648 free_new_lists: 649 kfree(new_dma_addr_list); 650 free_new_rx_ring: 651 pci_free_consistent(lp->pci_dev, 652 sizeof(struct pcnet32_rx_head) * entries, 653 new_rx_ring, 654 new_ring_dma_addr); 655 } 656 657 static void pcnet32_purge_rx_ring(struct net_device *dev) 658 { 659 struct pcnet32_private *lp = netdev_priv(dev); 660 int i; 661 662 /* free all allocated skbuffs */ 663 for (i = 0; i < lp->rx_ring_size; i++) { 664 lp->rx_ring[i].status = 0; /* CPU owns buffer */ 665 wmb(); /* Make sure adapter sees owner change */ 666 if (lp->rx_skbuff[i]) { 667 if (!pci_dma_mapping_error(lp->pci_dev, 668 lp->rx_dma_addr[i])) 669 pci_unmap_single(lp->pci_dev, 670 lp->rx_dma_addr[i], 671 PKT_BUF_SIZE, 672 PCI_DMA_FROMDEVICE); 673 dev_kfree_skb_any(lp->rx_skbuff[i]); 674 } 675 lp->rx_skbuff[i] = NULL; 676 lp->rx_dma_addr[i] = 0; 677 } 678 } 679 680 #ifdef CONFIG_NET_POLL_CONTROLLER 681 static void pcnet32_poll_controller(struct net_device *dev) 682 { 683 disable_irq(dev->irq); 684 pcnet32_interrupt(0, dev); 685 enable_irq(dev->irq); 686 } 687 #endif 688 689 static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 690 { 691 struct pcnet32_private *lp = netdev_priv(dev); 692 unsigned long flags; 693 int r = -EOPNOTSUPP; 694 695 if (lp->mii) { 696 spin_lock_irqsave(&lp->lock, flags); 697 mii_ethtool_gset(&lp->mii_if, cmd); 698 spin_unlock_irqrestore(&lp->lock, flags); 699 r = 0; 700 } 701 return r; 702 } 703 704 static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 705 { 706 struct pcnet32_private *lp = netdev_priv(dev); 707 unsigned long flags; 708 int r = -EOPNOTSUPP; 709 710 if (lp->mii) { 711 spin_lock_irqsave(&lp->lock, flags); 712 r = mii_ethtool_sset(&lp->mii_if, cmd); 713 spin_unlock_irqrestore(&lp->lock, flags); 714 } 715 return r; 716 } 717 718 static void pcnet32_get_drvinfo(struct net_device *dev, 719 struct ethtool_drvinfo *info) 720 { 721 struct pcnet32_private *lp = netdev_priv(dev); 722 723 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 724 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 725 if (lp->pci_dev) 726 strlcpy(info->bus_info, pci_name(lp->pci_dev), 727 sizeof(info->bus_info)); 728 else 729 snprintf(info->bus_info, sizeof(info->bus_info), 730 "VLB 0x%lx", dev->base_addr); 731 } 732 733 static u32 pcnet32_get_link(struct net_device *dev) 734 { 735 struct pcnet32_private *lp = netdev_priv(dev); 736 unsigned long flags; 737 int r; 738 739 spin_lock_irqsave(&lp->lock, flags); 740 if (lp->mii) { 741 r = mii_link_ok(&lp->mii_if); 742 } else if (lp->chip_version >= PCNET32_79C970A) { 743 ulong ioaddr = dev->base_addr; /* card base I/O address */ 744 r = (lp->a->read_bcr(ioaddr, 4) != 0xc0); 745 } else { /* can not detect link on really old chips */ 746 r = 1; 747 } 748 spin_unlock_irqrestore(&lp->lock, flags); 749 750 return r; 751 } 752 753 static u32 pcnet32_get_msglevel(struct net_device *dev) 754 { 755 struct pcnet32_private *lp = netdev_priv(dev); 756 return lp->msg_enable; 757 } 758 759 static void pcnet32_set_msglevel(struct net_device *dev, u32 value) 760 { 761 struct pcnet32_private *lp = netdev_priv(dev); 762 lp->msg_enable = value; 763 } 764 765 static int pcnet32_nway_reset(struct net_device *dev) 766 { 767 struct pcnet32_private *lp = netdev_priv(dev); 768 unsigned long flags; 769 int r = -EOPNOTSUPP; 770 771 if (lp->mii) { 772 spin_lock_irqsave(&lp->lock, flags); 773 r = mii_nway_restart(&lp->mii_if); 774 spin_unlock_irqrestore(&lp->lock, flags); 775 } 776 return r; 777 } 778 779 static void pcnet32_get_ringparam(struct net_device *dev, 780 struct ethtool_ringparam *ering) 781 { 782 struct pcnet32_private *lp = netdev_priv(dev); 783 784 ering->tx_max_pending = TX_MAX_RING_SIZE; 785 ering->tx_pending = lp->tx_ring_size; 786 ering->rx_max_pending = RX_MAX_RING_SIZE; 787 ering->rx_pending = lp->rx_ring_size; 788 } 789 790 static int pcnet32_set_ringparam(struct net_device *dev, 791 struct ethtool_ringparam *ering) 792 { 793 struct pcnet32_private *lp = netdev_priv(dev); 794 unsigned long flags; 795 unsigned int size; 796 ulong ioaddr = dev->base_addr; 797 int i; 798 799 if (ering->rx_mini_pending || ering->rx_jumbo_pending) 800 return -EINVAL; 801 802 if (netif_running(dev)) 803 pcnet32_netif_stop(dev); 804 805 spin_lock_irqsave(&lp->lock, flags); 806 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ 807 808 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); 809 810 /* set the minimum ring size to 4, to allow the loopback test to work 811 * unchanged. 812 */ 813 for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { 814 if (size <= (1 << i)) 815 break; 816 } 817 if ((1 << i) != lp->tx_ring_size) 818 pcnet32_realloc_tx_ring(dev, lp, i); 819 820 size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE); 821 for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { 822 if (size <= (1 << i)) 823 break; 824 } 825 if ((1 << i) != lp->rx_ring_size) 826 pcnet32_realloc_rx_ring(dev, lp, i); 827 828 lp->napi.weight = lp->rx_ring_size / 2; 829 830 if (netif_running(dev)) { 831 pcnet32_netif_start(dev); 832 pcnet32_restart(dev, CSR0_NORMAL); 833 } 834 835 spin_unlock_irqrestore(&lp->lock, flags); 836 837 netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n", 838 lp->rx_ring_size, lp->tx_ring_size); 839 840 return 0; 841 } 842 843 static void pcnet32_get_strings(struct net_device *dev, u32 stringset, 844 u8 *data) 845 { 846 memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test)); 847 } 848 849 static int pcnet32_get_sset_count(struct net_device *dev, int sset) 850 { 851 switch (sset) { 852 case ETH_SS_TEST: 853 return PCNET32_TEST_LEN; 854 default: 855 return -EOPNOTSUPP; 856 } 857 } 858 859 static void pcnet32_ethtool_test(struct net_device *dev, 860 struct ethtool_test *test, u64 * data) 861 { 862 struct pcnet32_private *lp = netdev_priv(dev); 863 int rc; 864 865 if (test->flags == ETH_TEST_FL_OFFLINE) { 866 rc = pcnet32_loopback_test(dev, data); 867 if (rc) { 868 netif_printk(lp, hw, KERN_DEBUG, dev, 869 "Loopback test failed\n"); 870 test->flags |= ETH_TEST_FL_FAILED; 871 } else 872 netif_printk(lp, hw, KERN_DEBUG, dev, 873 "Loopback test passed\n"); 874 } else 875 netif_printk(lp, hw, KERN_DEBUG, dev, 876 "No tests to run (specify 'Offline' on ethtool)\n"); 877 } /* end pcnet32_ethtool_test */ 878 879 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) 880 { 881 struct pcnet32_private *lp = netdev_priv(dev); 882 const struct pcnet32_access *a = lp->a; /* access to registers */ 883 ulong ioaddr = dev->base_addr; /* card base I/O address */ 884 struct sk_buff *skb; /* sk buff */ 885 int x, i; /* counters */ 886 int numbuffs = 4; /* number of TX/RX buffers and descs */ 887 u16 status = 0x8300; /* TX ring status */ 888 __le16 teststatus; /* test of ring status */ 889 int rc; /* return code */ 890 int size; /* size of packets */ 891 unsigned char *packet; /* source packet data */ 892 static const int data_len = 60; /* length of source packets */ 893 unsigned long flags; 894 unsigned long ticks; 895 896 rc = 1; /* default to fail */ 897 898 if (netif_running(dev)) 899 pcnet32_netif_stop(dev); 900 901 spin_lock_irqsave(&lp->lock, flags); 902 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ 903 904 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size)); 905 906 /* Reset the PCNET32 */ 907 lp->a->reset(ioaddr); 908 lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ 909 910 /* switch pcnet32 to 32bit mode */ 911 lp->a->write_bcr(ioaddr, 20, 2); 912 913 /* purge & init rings but don't actually restart */ 914 pcnet32_restart(dev, 0x0000); 915 916 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ 917 918 /* Initialize Transmit buffers. */ 919 size = data_len + 15; 920 for (x = 0; x < numbuffs; x++) { 921 skb = netdev_alloc_skb(dev, size); 922 if (!skb) { 923 netif_printk(lp, hw, KERN_DEBUG, dev, 924 "Cannot allocate skb at line: %d!\n", 925 __LINE__); 926 goto clean_up; 927 } 928 packet = skb->data; 929 skb_put(skb, size); /* create space for data */ 930 lp->tx_skbuff[x] = skb; 931 lp->tx_ring[x].length = cpu_to_le16(-skb->len); 932 lp->tx_ring[x].misc = 0; 933 934 /* put DA and SA into the skb */ 935 for (i = 0; i < 6; i++) 936 *packet++ = dev->dev_addr[i]; 937 for (i = 0; i < 6; i++) 938 *packet++ = dev->dev_addr[i]; 939 /* type */ 940 *packet++ = 0x08; 941 *packet++ = 0x06; 942 /* packet number */ 943 *packet++ = x; 944 /* fill packet with data */ 945 for (i = 0; i < data_len; i++) 946 *packet++ = i; 947 948 lp->tx_dma_addr[x] = 949 pci_map_single(lp->pci_dev, skb->data, skb->len, 950 PCI_DMA_TODEVICE); 951 if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[x])) { 952 netif_printk(lp, hw, KERN_DEBUG, dev, 953 "DMA mapping error at line: %d!\n", 954 __LINE__); 955 goto clean_up; 956 } 957 lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]); 958 wmb(); /* Make sure owner changes after all others are visible */ 959 lp->tx_ring[x].status = cpu_to_le16(status); 960 } 961 962 x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */ 963 a->write_bcr(ioaddr, 32, x | 0x0002); 964 965 /* set int loopback in CSR15 */ 966 x = a->read_csr(ioaddr, CSR15) & 0xfffc; 967 lp->a->write_csr(ioaddr, CSR15, x | 0x0044); 968 969 teststatus = cpu_to_le16(0x8000); 970 lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */ 971 972 /* Check status of descriptors */ 973 for (x = 0; x < numbuffs; x++) { 974 ticks = 0; 975 rmb(); 976 while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { 977 spin_unlock_irqrestore(&lp->lock, flags); 978 msleep(1); 979 spin_lock_irqsave(&lp->lock, flags); 980 rmb(); 981 ticks++; 982 } 983 if (ticks == 200) { 984 netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x); 985 break; 986 } 987 } 988 989 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ 990 wmb(); 991 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { 992 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n"); 993 994 for (x = 0; x < numbuffs; x++) { 995 netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x); 996 skb = lp->rx_skbuff[x]; 997 for (i = 0; i < size; i++) 998 pr_cont(" %02x", *(skb->data + i)); 999 pr_cont("\n"); 1000 } 1001 } 1002 1003 x = 0; 1004 rc = 0; 1005 while (x < numbuffs && !rc) { 1006 skb = lp->rx_skbuff[x]; 1007 packet = lp->tx_skbuff[x]->data; 1008 for (i = 0; i < size; i++) { 1009 if (*(skb->data + i) != packet[i]) { 1010 netif_printk(lp, hw, KERN_DEBUG, dev, 1011 "Error in compare! %2x - %02x %02x\n", 1012 i, *(skb->data + i), packet[i]); 1013 rc = 1; 1014 break; 1015 } 1016 } 1017 x++; 1018 } 1019 1020 clean_up: 1021 *data1 = rc; 1022 pcnet32_purge_tx_ring(dev); 1023 1024 x = a->read_csr(ioaddr, CSR15); 1025 a->write_csr(ioaddr, CSR15, (x & ~0x0044)); /* reset bits 6 and 2 */ 1026 1027 x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ 1028 a->write_bcr(ioaddr, 32, (x & ~0x0002)); 1029 1030 if (netif_running(dev)) { 1031 pcnet32_netif_start(dev); 1032 pcnet32_restart(dev, CSR0_NORMAL); 1033 } else { 1034 pcnet32_purge_rx_ring(dev); 1035 lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ 1036 } 1037 spin_unlock_irqrestore(&lp->lock, flags); 1038 1039 return rc; 1040 } /* end pcnet32_loopback_test */ 1041 1042 static int pcnet32_set_phys_id(struct net_device *dev, 1043 enum ethtool_phys_id_state state) 1044 { 1045 struct pcnet32_private *lp = netdev_priv(dev); 1046 const struct pcnet32_access *a = lp->a; 1047 ulong ioaddr = dev->base_addr; 1048 unsigned long flags; 1049 int i; 1050 1051 switch (state) { 1052 case ETHTOOL_ID_ACTIVE: 1053 /* Save the current value of the bcrs */ 1054 spin_lock_irqsave(&lp->lock, flags); 1055 for (i = 4; i < 8; i++) 1056 lp->save_regs[i - 4] = a->read_bcr(ioaddr, i); 1057 spin_unlock_irqrestore(&lp->lock, flags); 1058 return 2; /* cycle on/off twice per second */ 1059 1060 case ETHTOOL_ID_ON: 1061 case ETHTOOL_ID_OFF: 1062 /* Blink the led */ 1063 spin_lock_irqsave(&lp->lock, flags); 1064 for (i = 4; i < 8; i++) 1065 a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000); 1066 spin_unlock_irqrestore(&lp->lock, flags); 1067 break; 1068 1069 case ETHTOOL_ID_INACTIVE: 1070 /* Restore the original value of the bcrs */ 1071 spin_lock_irqsave(&lp->lock, flags); 1072 for (i = 4; i < 8; i++) 1073 a->write_bcr(ioaddr, i, lp->save_regs[i - 4]); 1074 spin_unlock_irqrestore(&lp->lock, flags); 1075 } 1076 return 0; 1077 } 1078 1079 /* 1080 * lp->lock must be held. 1081 */ 1082 static int pcnet32_suspend(struct net_device *dev, unsigned long *flags, 1083 int can_sleep) 1084 { 1085 int csr5; 1086 struct pcnet32_private *lp = netdev_priv(dev); 1087 const struct pcnet32_access *a = lp->a; 1088 ulong ioaddr = dev->base_addr; 1089 int ticks; 1090 1091 /* really old chips have to be stopped. */ 1092 if (lp->chip_version < PCNET32_79C970A) 1093 return 0; 1094 1095 /* set SUSPEND (SPND) - CSR5 bit 0 */ 1096 csr5 = a->read_csr(ioaddr, CSR5); 1097 a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND); 1098 1099 /* poll waiting for bit to be set */ 1100 ticks = 0; 1101 while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) { 1102 spin_unlock_irqrestore(&lp->lock, *flags); 1103 if (can_sleep) 1104 msleep(1); 1105 else 1106 mdelay(1); 1107 spin_lock_irqsave(&lp->lock, *flags); 1108 ticks++; 1109 if (ticks > 200) { 1110 netif_printk(lp, hw, KERN_DEBUG, dev, 1111 "Error getting into suspend!\n"); 1112 return 0; 1113 } 1114 } 1115 return 1; 1116 } 1117 1118 /* 1119 * process one receive descriptor entry 1120 */ 1121 1122 static void pcnet32_rx_entry(struct net_device *dev, 1123 struct pcnet32_private *lp, 1124 struct pcnet32_rx_head *rxp, 1125 int entry) 1126 { 1127 int status = (short)le16_to_cpu(rxp->status) >> 8; 1128 int rx_in_place = 0; 1129 struct sk_buff *skb; 1130 short pkt_len; 1131 1132 if (status != 0x03) { /* There was an error. */ 1133 /* 1134 * There is a tricky error noted by John Murphy, 1135 * <murf@perftech.com> to Russ Nelson: Even with full-sized 1136 * buffers it's possible for a jabber packet to use two 1137 * buffers, with only the last correctly noting the error. 1138 */ 1139 if (status & 0x01) /* Only count a general error at the */ 1140 dev->stats.rx_errors++; /* end of a packet. */ 1141 if (status & 0x20) 1142 dev->stats.rx_frame_errors++; 1143 if (status & 0x10) 1144 dev->stats.rx_over_errors++; 1145 if (status & 0x08) 1146 dev->stats.rx_crc_errors++; 1147 if (status & 0x04) 1148 dev->stats.rx_fifo_errors++; 1149 return; 1150 } 1151 1152 pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4; 1153 1154 /* Discard oversize frames. */ 1155 if (unlikely(pkt_len > PKT_BUF_SIZE)) { 1156 netif_err(lp, drv, dev, "Impossible packet size %d!\n", 1157 pkt_len); 1158 dev->stats.rx_errors++; 1159 return; 1160 } 1161 if (pkt_len < 60) { 1162 netif_err(lp, rx_err, dev, "Runt packet!\n"); 1163 dev->stats.rx_errors++; 1164 return; 1165 } 1166 1167 if (pkt_len > rx_copybreak) { 1168 struct sk_buff *newskb; 1169 dma_addr_t new_dma_addr; 1170 1171 newskb = netdev_alloc_skb(dev, PKT_BUF_SKB); 1172 /* 1173 * map the new buffer, if mapping fails, drop the packet and 1174 * reuse the old buffer 1175 */ 1176 if (newskb) { 1177 skb_reserve(newskb, NET_IP_ALIGN); 1178 new_dma_addr = pci_map_single(lp->pci_dev, 1179 newskb->data, 1180 PKT_BUF_SIZE, 1181 PCI_DMA_FROMDEVICE); 1182 if (pci_dma_mapping_error(lp->pci_dev, new_dma_addr)) { 1183 netif_err(lp, rx_err, dev, 1184 "DMA mapping error.\n"); 1185 dev_kfree_skb(newskb); 1186 skb = NULL; 1187 } else { 1188 skb = lp->rx_skbuff[entry]; 1189 pci_unmap_single(lp->pci_dev, 1190 lp->rx_dma_addr[entry], 1191 PKT_BUF_SIZE, 1192 PCI_DMA_FROMDEVICE); 1193 skb_put(skb, pkt_len); 1194 lp->rx_skbuff[entry] = newskb; 1195 lp->rx_dma_addr[entry] = new_dma_addr; 1196 rxp->base = cpu_to_le32(new_dma_addr); 1197 rx_in_place = 1; 1198 } 1199 } else 1200 skb = NULL; 1201 } else 1202 skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN); 1203 1204 if (skb == NULL) { 1205 dev->stats.rx_dropped++; 1206 return; 1207 } 1208 if (!rx_in_place) { 1209 skb_reserve(skb, NET_IP_ALIGN); 1210 skb_put(skb, pkt_len); /* Make room */ 1211 pci_dma_sync_single_for_cpu(lp->pci_dev, 1212 lp->rx_dma_addr[entry], 1213 pkt_len, 1214 PCI_DMA_FROMDEVICE); 1215 skb_copy_to_linear_data(skb, 1216 (unsigned char *)(lp->rx_skbuff[entry]->data), 1217 pkt_len); 1218 pci_dma_sync_single_for_device(lp->pci_dev, 1219 lp->rx_dma_addr[entry], 1220 pkt_len, 1221 PCI_DMA_FROMDEVICE); 1222 } 1223 dev->stats.rx_bytes += skb->len; 1224 skb->protocol = eth_type_trans(skb, dev); 1225 netif_receive_skb(skb); 1226 dev->stats.rx_packets++; 1227 } 1228 1229 static int pcnet32_rx(struct net_device *dev, int budget) 1230 { 1231 struct pcnet32_private *lp = netdev_priv(dev); 1232 int entry = lp->cur_rx & lp->rx_mod_mask; 1233 struct pcnet32_rx_head *rxp = &lp->rx_ring[entry]; 1234 int npackets = 0; 1235 1236 /* If we own the next entry, it's a new packet. Send it up. */ 1237 while (npackets < budget && (short)le16_to_cpu(rxp->status) >= 0) { 1238 pcnet32_rx_entry(dev, lp, rxp, entry); 1239 npackets += 1; 1240 /* 1241 * The docs say that the buffer length isn't touched, but Andrew 1242 * Boyd of QNX reports that some revs of the 79C965 clear it. 1243 */ 1244 rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE); 1245 wmb(); /* Make sure owner changes after others are visible */ 1246 rxp->status = cpu_to_le16(0x8000); 1247 entry = (++lp->cur_rx) & lp->rx_mod_mask; 1248 rxp = &lp->rx_ring[entry]; 1249 } 1250 1251 return npackets; 1252 } 1253 1254 static int pcnet32_tx(struct net_device *dev) 1255 { 1256 struct pcnet32_private *lp = netdev_priv(dev); 1257 unsigned int dirty_tx = lp->dirty_tx; 1258 int delta; 1259 int must_restart = 0; 1260 1261 while (dirty_tx != lp->cur_tx) { 1262 int entry = dirty_tx & lp->tx_mod_mask; 1263 int status = (short)le16_to_cpu(lp->tx_ring[entry].status); 1264 1265 if (status < 0) 1266 break; /* It still hasn't been Txed */ 1267 1268 lp->tx_ring[entry].base = 0; 1269 1270 if (status & 0x4000) { 1271 /* There was a major error, log it. */ 1272 int err_status = le32_to_cpu(lp->tx_ring[entry].misc); 1273 dev->stats.tx_errors++; 1274 netif_err(lp, tx_err, dev, 1275 "Tx error status=%04x err_status=%08x\n", 1276 status, err_status); 1277 if (err_status & 0x04000000) 1278 dev->stats.tx_aborted_errors++; 1279 if (err_status & 0x08000000) 1280 dev->stats.tx_carrier_errors++; 1281 if (err_status & 0x10000000) 1282 dev->stats.tx_window_errors++; 1283 #ifndef DO_DXSUFLO 1284 if (err_status & 0x40000000) { 1285 dev->stats.tx_fifo_errors++; 1286 /* Ackk! On FIFO errors the Tx unit is turned off! */ 1287 /* Remove this verbosity later! */ 1288 netif_err(lp, tx_err, dev, "Tx FIFO error!\n"); 1289 must_restart = 1; 1290 } 1291 #else 1292 if (err_status & 0x40000000) { 1293 dev->stats.tx_fifo_errors++; 1294 if (!lp->dxsuflo) { /* If controller doesn't recover ... */ 1295 /* Ackk! On FIFO errors the Tx unit is turned off! */ 1296 /* Remove this verbosity later! */ 1297 netif_err(lp, tx_err, dev, "Tx FIFO error!\n"); 1298 must_restart = 1; 1299 } 1300 } 1301 #endif 1302 } else { 1303 if (status & 0x1800) 1304 dev->stats.collisions++; 1305 dev->stats.tx_packets++; 1306 } 1307 1308 /* We must free the original skb */ 1309 if (lp->tx_skbuff[entry]) { 1310 pci_unmap_single(lp->pci_dev, 1311 lp->tx_dma_addr[entry], 1312 lp->tx_skbuff[entry]-> 1313 len, PCI_DMA_TODEVICE); 1314 dev_kfree_skb_any(lp->tx_skbuff[entry]); 1315 lp->tx_skbuff[entry] = NULL; 1316 lp->tx_dma_addr[entry] = 0; 1317 } 1318 dirty_tx++; 1319 } 1320 1321 delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size); 1322 if (delta > lp->tx_ring_size) { 1323 netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n", 1324 dirty_tx, lp->cur_tx, lp->tx_full); 1325 dirty_tx += lp->tx_ring_size; 1326 delta -= lp->tx_ring_size; 1327 } 1328 1329 if (lp->tx_full && 1330 netif_queue_stopped(dev) && 1331 delta < lp->tx_ring_size - 2) { 1332 /* The ring is no longer full, clear tbusy. */ 1333 lp->tx_full = 0; 1334 netif_wake_queue(dev); 1335 } 1336 lp->dirty_tx = dirty_tx; 1337 1338 return must_restart; 1339 } 1340 1341 static int pcnet32_poll(struct napi_struct *napi, int budget) 1342 { 1343 struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi); 1344 struct net_device *dev = lp->dev; 1345 unsigned long ioaddr = dev->base_addr; 1346 unsigned long flags; 1347 int work_done; 1348 u16 val; 1349 1350 work_done = pcnet32_rx(dev, budget); 1351 1352 spin_lock_irqsave(&lp->lock, flags); 1353 if (pcnet32_tx(dev)) { 1354 /* reset the chip to clear the error condition, then restart */ 1355 lp->a->reset(ioaddr); 1356 lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ 1357 pcnet32_restart(dev, CSR0_START); 1358 netif_wake_queue(dev); 1359 } 1360 spin_unlock_irqrestore(&lp->lock, flags); 1361 1362 if (work_done < budget) { 1363 spin_lock_irqsave(&lp->lock, flags); 1364 1365 __napi_complete(napi); 1366 1367 /* clear interrupt masks */ 1368 val = lp->a->read_csr(ioaddr, CSR3); 1369 val &= 0x00ff; 1370 lp->a->write_csr(ioaddr, CSR3, val); 1371 1372 /* Set interrupt enable. */ 1373 lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN); 1374 1375 spin_unlock_irqrestore(&lp->lock, flags); 1376 } 1377 return work_done; 1378 } 1379 1380 #define PCNET32_REGS_PER_PHY 32 1381 #define PCNET32_MAX_PHYS 32 1382 static int pcnet32_get_regs_len(struct net_device *dev) 1383 { 1384 struct pcnet32_private *lp = netdev_priv(dev); 1385 int j = lp->phycount * PCNET32_REGS_PER_PHY; 1386 1387 return (PCNET32_NUM_REGS + j) * sizeof(u16); 1388 } 1389 1390 static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 1391 void *ptr) 1392 { 1393 int i, csr0; 1394 u16 *buff = ptr; 1395 struct pcnet32_private *lp = netdev_priv(dev); 1396 const struct pcnet32_access *a = lp->a; 1397 ulong ioaddr = dev->base_addr; 1398 unsigned long flags; 1399 1400 spin_lock_irqsave(&lp->lock, flags); 1401 1402 csr0 = a->read_csr(ioaddr, CSR0); 1403 if (!(csr0 & CSR0_STOP)) /* If not stopped */ 1404 pcnet32_suspend(dev, &flags, 1); 1405 1406 /* read address PROM */ 1407 for (i = 0; i < 16; i += 2) 1408 *buff++ = inw(ioaddr + i); 1409 1410 /* read control and status registers */ 1411 for (i = 0; i < 90; i++) 1412 *buff++ = a->read_csr(ioaddr, i); 1413 1414 *buff++ = a->read_csr(ioaddr, 112); 1415 *buff++ = a->read_csr(ioaddr, 114); 1416 1417 /* read bus configuration registers */ 1418 for (i = 0; i < 30; i++) 1419 *buff++ = a->read_bcr(ioaddr, i); 1420 1421 *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */ 1422 1423 for (i = 31; i < 36; i++) 1424 *buff++ = a->read_bcr(ioaddr, i); 1425 1426 /* read mii phy registers */ 1427 if (lp->mii) { 1428 int j; 1429 for (j = 0; j < PCNET32_MAX_PHYS; j++) { 1430 if (lp->phymask & (1 << j)) { 1431 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) { 1432 lp->a->write_bcr(ioaddr, 33, 1433 (j << 5) | i); 1434 *buff++ = lp->a->read_bcr(ioaddr, 34); 1435 } 1436 } 1437 } 1438 } 1439 1440 if (!(csr0 & CSR0_STOP)) { /* If not stopped */ 1441 int csr5; 1442 1443 /* clear SUSPEND (SPND) - CSR5 bit 0 */ 1444 csr5 = a->read_csr(ioaddr, CSR5); 1445 a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); 1446 } 1447 1448 spin_unlock_irqrestore(&lp->lock, flags); 1449 } 1450 1451 static const struct ethtool_ops pcnet32_ethtool_ops = { 1452 .get_settings = pcnet32_get_settings, 1453 .set_settings = pcnet32_set_settings, 1454 .get_drvinfo = pcnet32_get_drvinfo, 1455 .get_msglevel = pcnet32_get_msglevel, 1456 .set_msglevel = pcnet32_set_msglevel, 1457 .nway_reset = pcnet32_nway_reset, 1458 .get_link = pcnet32_get_link, 1459 .get_ringparam = pcnet32_get_ringparam, 1460 .set_ringparam = pcnet32_set_ringparam, 1461 .get_strings = pcnet32_get_strings, 1462 .self_test = pcnet32_ethtool_test, 1463 .set_phys_id = pcnet32_set_phys_id, 1464 .get_regs_len = pcnet32_get_regs_len, 1465 .get_regs = pcnet32_get_regs, 1466 .get_sset_count = pcnet32_get_sset_count, 1467 }; 1468 1469 /* only probes for non-PCI devices, the rest are handled by 1470 * pci_register_driver via pcnet32_probe_pci */ 1471 1472 static void pcnet32_probe_vlbus(unsigned int *pcnet32_portlist) 1473 { 1474 unsigned int *port, ioaddr; 1475 1476 /* search for PCnet32 VLB cards at known addresses */ 1477 for (port = pcnet32_portlist; (ioaddr = *port); port++) { 1478 if (request_region 1479 (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) { 1480 /* check if there is really a pcnet chip on that ioaddr */ 1481 if ((inb(ioaddr + 14) == 0x57) && 1482 (inb(ioaddr + 15) == 0x57)) { 1483 pcnet32_probe1(ioaddr, 0, NULL); 1484 } else { 1485 release_region(ioaddr, PCNET32_TOTAL_SIZE); 1486 } 1487 } 1488 } 1489 } 1490 1491 static int 1492 pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) 1493 { 1494 unsigned long ioaddr; 1495 int err; 1496 1497 err = pci_enable_device(pdev); 1498 if (err < 0) { 1499 if (pcnet32_debug & NETIF_MSG_PROBE) 1500 pr_err("failed to enable device -- err=%d\n", err); 1501 return err; 1502 } 1503 pci_set_master(pdev); 1504 1505 ioaddr = pci_resource_start(pdev, 0); 1506 if (!ioaddr) { 1507 if (pcnet32_debug & NETIF_MSG_PROBE) 1508 pr_err("card has no PCI IO resources, aborting\n"); 1509 return -ENODEV; 1510 } 1511 1512 if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) { 1513 if (pcnet32_debug & NETIF_MSG_PROBE) 1514 pr_err("architecture does not support 32bit PCI busmaster DMA\n"); 1515 return -ENODEV; 1516 } 1517 if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { 1518 if (pcnet32_debug & NETIF_MSG_PROBE) 1519 pr_err("io address range already allocated\n"); 1520 return -EBUSY; 1521 } 1522 1523 err = pcnet32_probe1(ioaddr, 1, pdev); 1524 if (err < 0) 1525 pci_disable_device(pdev); 1526 1527 return err; 1528 } 1529 1530 static const struct net_device_ops pcnet32_netdev_ops = { 1531 .ndo_open = pcnet32_open, 1532 .ndo_stop = pcnet32_close, 1533 .ndo_start_xmit = pcnet32_start_xmit, 1534 .ndo_tx_timeout = pcnet32_tx_timeout, 1535 .ndo_get_stats = pcnet32_get_stats, 1536 .ndo_set_rx_mode = pcnet32_set_multicast_list, 1537 .ndo_do_ioctl = pcnet32_ioctl, 1538 .ndo_change_mtu = eth_change_mtu, 1539 .ndo_set_mac_address = eth_mac_addr, 1540 .ndo_validate_addr = eth_validate_addr, 1541 #ifdef CONFIG_NET_POLL_CONTROLLER 1542 .ndo_poll_controller = pcnet32_poll_controller, 1543 #endif 1544 }; 1545 1546 /* pcnet32_probe1 1547 * Called from both pcnet32_probe_vlbus and pcnet_probe_pci. 1548 * pdev will be NULL when called from pcnet32_probe_vlbus. 1549 */ 1550 static int 1551 pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) 1552 { 1553 struct pcnet32_private *lp; 1554 int i, media; 1555 int fdx, mii, fset, dxsuflo; 1556 int chip_version; 1557 char *chipname; 1558 struct net_device *dev; 1559 const struct pcnet32_access *a = NULL; 1560 u8 promaddr[ETH_ALEN]; 1561 int ret = -ENODEV; 1562 1563 /* reset the chip */ 1564 pcnet32_wio_reset(ioaddr); 1565 1566 /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */ 1567 if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) { 1568 a = &pcnet32_wio; 1569 } else { 1570 pcnet32_dwio_reset(ioaddr); 1571 if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 && 1572 pcnet32_dwio_check(ioaddr)) { 1573 a = &pcnet32_dwio; 1574 } else { 1575 if (pcnet32_debug & NETIF_MSG_PROBE) 1576 pr_err("No access methods\n"); 1577 goto err_release_region; 1578 } 1579 } 1580 1581 chip_version = 1582 a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16); 1583 if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW)) 1584 pr_info(" PCnet chip version is %#x\n", chip_version); 1585 if ((chip_version & 0xfff) != 0x003) { 1586 if (pcnet32_debug & NETIF_MSG_PROBE) 1587 pr_info("Unsupported chip version\n"); 1588 goto err_release_region; 1589 } 1590 1591 /* initialize variables */ 1592 fdx = mii = fset = dxsuflo = 0; 1593 chip_version = (chip_version >> 12) & 0xffff; 1594 1595 switch (chip_version) { 1596 case 0x2420: 1597 chipname = "PCnet/PCI 79C970"; /* PCI */ 1598 break; 1599 case 0x2430: 1600 if (shared) 1601 chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */ 1602 else 1603 chipname = "PCnet/32 79C965"; /* 486/VL bus */ 1604 break; 1605 case 0x2621: 1606 chipname = "PCnet/PCI II 79C970A"; /* PCI */ 1607 fdx = 1; 1608 break; 1609 case 0x2623: 1610 chipname = "PCnet/FAST 79C971"; /* PCI */ 1611 fdx = 1; 1612 mii = 1; 1613 fset = 1; 1614 break; 1615 case 0x2624: 1616 chipname = "PCnet/FAST+ 79C972"; /* PCI */ 1617 fdx = 1; 1618 mii = 1; 1619 fset = 1; 1620 break; 1621 case 0x2625: 1622 chipname = "PCnet/FAST III 79C973"; /* PCI */ 1623 fdx = 1; 1624 mii = 1; 1625 break; 1626 case 0x2626: 1627 chipname = "PCnet/Home 79C978"; /* PCI */ 1628 fdx = 1; 1629 /* 1630 * This is based on specs published at www.amd.com. This section 1631 * assumes that a card with a 79C978 wants to go into standard 1632 * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode, 1633 * and the module option homepna=1 can select this instead. 1634 */ 1635 media = a->read_bcr(ioaddr, 49); 1636 media &= ~3; /* default to 10Mb ethernet */ 1637 if (cards_found < MAX_UNITS && homepna[cards_found]) 1638 media |= 1; /* switch to home wiring mode */ 1639 if (pcnet32_debug & NETIF_MSG_PROBE) 1640 printk(KERN_DEBUG PFX "media set to %sMbit mode\n", 1641 (media & 1) ? "1" : "10"); 1642 a->write_bcr(ioaddr, 49, media); 1643 break; 1644 case 0x2627: 1645 chipname = "PCnet/FAST III 79C975"; /* PCI */ 1646 fdx = 1; 1647 mii = 1; 1648 break; 1649 case 0x2628: 1650 chipname = "PCnet/PRO 79C976"; 1651 fdx = 1; 1652 mii = 1; 1653 break; 1654 default: 1655 if (pcnet32_debug & NETIF_MSG_PROBE) 1656 pr_info("PCnet version %#x, no PCnet32 chip\n", 1657 chip_version); 1658 goto err_release_region; 1659 } 1660 1661 /* 1662 * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit 1663 * starting until the packet is loaded. Strike one for reliability, lose 1664 * one for latency - although on PCI this isn't a big loss. Older chips 1665 * have FIFO's smaller than a packet, so you can't do this. 1666 * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn. 1667 */ 1668 1669 if (fset) { 1670 a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860)); 1671 a->write_csr(ioaddr, 80, 1672 (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00); 1673 dxsuflo = 1; 1674 } 1675 1676 dev = alloc_etherdev(sizeof(*lp)); 1677 if (!dev) { 1678 ret = -ENOMEM; 1679 goto err_release_region; 1680 } 1681 1682 if (pdev) 1683 SET_NETDEV_DEV(dev, &pdev->dev); 1684 1685 if (pcnet32_debug & NETIF_MSG_PROBE) 1686 pr_info("%s at %#3lx,", chipname, ioaddr); 1687 1688 /* In most chips, after a chip reset, the ethernet address is read from the 1689 * station address PROM at the base address and programmed into the 1690 * "Physical Address Registers" CSR12-14. 1691 * As a precautionary measure, we read the PROM values and complain if 1692 * they disagree with the CSRs. If they miscompare, and the PROM addr 1693 * is valid, then the PROM addr is used. 1694 */ 1695 for (i = 0; i < 3; i++) { 1696 unsigned int val; 1697 val = a->read_csr(ioaddr, i + 12) & 0x0ffff; 1698 /* There may be endianness issues here. */ 1699 dev->dev_addr[2 * i] = val & 0x0ff; 1700 dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff; 1701 } 1702 1703 /* read PROM address and compare with CSR address */ 1704 for (i = 0; i < ETH_ALEN; i++) 1705 promaddr[i] = inb(ioaddr + i); 1706 1707 if (!ether_addr_equal(promaddr, dev->dev_addr) || 1708 !is_valid_ether_addr(dev->dev_addr)) { 1709 if (is_valid_ether_addr(promaddr)) { 1710 if (pcnet32_debug & NETIF_MSG_PROBE) { 1711 pr_cont(" warning: CSR address invalid,\n"); 1712 pr_info(" using instead PROM address of"); 1713 } 1714 memcpy(dev->dev_addr, promaddr, ETH_ALEN); 1715 } 1716 } 1717 1718 /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ 1719 if (!is_valid_ether_addr(dev->dev_addr)) 1720 memset(dev->dev_addr, 0, ETH_ALEN); 1721 1722 if (pcnet32_debug & NETIF_MSG_PROBE) { 1723 pr_cont(" %pM", dev->dev_addr); 1724 1725 /* Version 0x2623 and 0x2624 */ 1726 if (((chip_version + 1) & 0xfffe) == 0x2624) { 1727 i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */ 1728 pr_info(" tx_start_pt(0x%04x):", i); 1729 switch (i >> 10) { 1730 case 0: 1731 pr_cont(" 20 bytes,"); 1732 break; 1733 case 1: 1734 pr_cont(" 64 bytes,"); 1735 break; 1736 case 2: 1737 pr_cont(" 128 bytes,"); 1738 break; 1739 case 3: 1740 pr_cont("~220 bytes,"); 1741 break; 1742 } 1743 i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */ 1744 pr_cont(" BCR18(%x):", i & 0xffff); 1745 if (i & (1 << 5)) 1746 pr_cont("BurstWrEn "); 1747 if (i & (1 << 6)) 1748 pr_cont("BurstRdEn "); 1749 if (i & (1 << 7)) 1750 pr_cont("DWordIO "); 1751 if (i & (1 << 11)) 1752 pr_cont("NoUFlow "); 1753 i = a->read_bcr(ioaddr, 25); 1754 pr_info(" SRAMSIZE=0x%04x,", i << 8); 1755 i = a->read_bcr(ioaddr, 26); 1756 pr_cont(" SRAM_BND=0x%04x,", i << 8); 1757 i = a->read_bcr(ioaddr, 27); 1758 if (i & (1 << 14)) 1759 pr_cont("LowLatRx"); 1760 } 1761 } 1762 1763 dev->base_addr = ioaddr; 1764 lp = netdev_priv(dev); 1765 /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */ 1766 lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block), 1767 &lp->init_dma_addr); 1768 if (!lp->init_block) { 1769 if (pcnet32_debug & NETIF_MSG_PROBE) 1770 pr_err("Consistent memory allocation failed\n"); 1771 ret = -ENOMEM; 1772 goto err_free_netdev; 1773 } 1774 lp->pci_dev = pdev; 1775 1776 lp->dev = dev; 1777 1778 spin_lock_init(&lp->lock); 1779 1780 lp->name = chipname; 1781 lp->shared_irq = shared; 1782 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ 1783 lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */ 1784 lp->tx_mod_mask = lp->tx_ring_size - 1; 1785 lp->rx_mod_mask = lp->rx_ring_size - 1; 1786 lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12); 1787 lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4); 1788 lp->mii_if.full_duplex = fdx; 1789 lp->mii_if.phy_id_mask = 0x1f; 1790 lp->mii_if.reg_num_mask = 0x1f; 1791 lp->dxsuflo = dxsuflo; 1792 lp->mii = mii; 1793 lp->chip_version = chip_version; 1794 lp->msg_enable = pcnet32_debug; 1795 if ((cards_found >= MAX_UNITS) || 1796 (options[cards_found] >= sizeof(options_mapping))) 1797 lp->options = PCNET32_PORT_ASEL; 1798 else 1799 lp->options = options_mapping[options[cards_found]]; 1800 lp->mii_if.dev = dev; 1801 lp->mii_if.mdio_read = mdio_read; 1802 lp->mii_if.mdio_write = mdio_write; 1803 1804 /* napi.weight is used in both the napi and non-napi cases */ 1805 lp->napi.weight = lp->rx_ring_size / 2; 1806 1807 netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2); 1808 1809 if (fdx && !(lp->options & PCNET32_PORT_ASEL) && 1810 ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) 1811 lp->options |= PCNET32_PORT_FD; 1812 1813 lp->a = a; 1814 1815 /* prior to register_netdev, dev->name is not yet correct */ 1816 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) { 1817 ret = -ENOMEM; 1818 goto err_free_ring; 1819 } 1820 /* detect special T1/E1 WAN card by checking for MAC address */ 1821 if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 && 1822 dev->dev_addr[2] == 0x75) 1823 lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; 1824 1825 lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */ 1826 lp->init_block->tlen_rlen = 1827 cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); 1828 for (i = 0; i < 6; i++) 1829 lp->init_block->phys_addr[i] = dev->dev_addr[i]; 1830 lp->init_block->filter[0] = 0x00000000; 1831 lp->init_block->filter[1] = 0x00000000; 1832 lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); 1833 lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); 1834 1835 /* switch pcnet32 to 32bit mode */ 1836 a->write_bcr(ioaddr, 20, 2); 1837 1838 a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); 1839 a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); 1840 1841 if (pdev) { /* use the IRQ provided by PCI */ 1842 dev->irq = pdev->irq; 1843 if (pcnet32_debug & NETIF_MSG_PROBE) 1844 pr_cont(" assigned IRQ %d\n", dev->irq); 1845 } else { 1846 unsigned long irq_mask = probe_irq_on(); 1847 1848 /* 1849 * To auto-IRQ we enable the initialization-done and DMA error 1850 * interrupts. For ISA boards we get a DMA error, but VLB and PCI 1851 * boards will work. 1852 */ 1853 /* Trigger an initialization just for the interrupt. */ 1854 a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_INIT); 1855 mdelay(1); 1856 1857 dev->irq = probe_irq_off(irq_mask); 1858 if (!dev->irq) { 1859 if (pcnet32_debug & NETIF_MSG_PROBE) 1860 pr_cont(", failed to detect IRQ line\n"); 1861 ret = -ENODEV; 1862 goto err_free_ring; 1863 } 1864 if (pcnet32_debug & NETIF_MSG_PROBE) 1865 pr_cont(", probed IRQ %d\n", dev->irq); 1866 } 1867 1868 /* Set the mii phy_id so that we can query the link state */ 1869 if (lp->mii) { 1870 /* lp->phycount and lp->phymask are set to 0 by memset above */ 1871 1872 lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f; 1873 /* scan for PHYs */ 1874 for (i = 0; i < PCNET32_MAX_PHYS; i++) { 1875 unsigned short id1, id2; 1876 1877 id1 = mdio_read(dev, i, MII_PHYSID1); 1878 if (id1 == 0xffff) 1879 continue; 1880 id2 = mdio_read(dev, i, MII_PHYSID2); 1881 if (id2 == 0xffff) 1882 continue; 1883 if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624) 1884 continue; /* 79C971 & 79C972 have phantom phy at id 31 */ 1885 lp->phycount++; 1886 lp->phymask |= (1 << i); 1887 lp->mii_if.phy_id = i; 1888 if (pcnet32_debug & NETIF_MSG_PROBE) 1889 pr_info("Found PHY %04x:%04x at address %d\n", 1890 id1, id2, i); 1891 } 1892 lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5); 1893 if (lp->phycount > 1) 1894 lp->options |= PCNET32_PORT_MII; 1895 } 1896 1897 init_timer(&lp->watchdog_timer); 1898 lp->watchdog_timer.data = (unsigned long)dev; 1899 lp->watchdog_timer.function = (void *)&pcnet32_watchdog; 1900 1901 /* The PCNET32-specific entries in the device structure. */ 1902 dev->netdev_ops = &pcnet32_netdev_ops; 1903 dev->ethtool_ops = &pcnet32_ethtool_ops; 1904 dev->watchdog_timeo = (5 * HZ); 1905 1906 /* Fill in the generic fields of the device structure. */ 1907 if (register_netdev(dev)) 1908 goto err_free_ring; 1909 1910 if (pdev) { 1911 pci_set_drvdata(pdev, dev); 1912 } else { 1913 lp->next = pcnet32_dev; 1914 pcnet32_dev = dev; 1915 } 1916 1917 if (pcnet32_debug & NETIF_MSG_PROBE) 1918 pr_info("%s: registered as %s\n", dev->name, lp->name); 1919 cards_found++; 1920 1921 /* enable LED writes */ 1922 a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000); 1923 1924 return 0; 1925 1926 err_free_ring: 1927 pcnet32_free_ring(dev); 1928 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), 1929 lp->init_block, lp->init_dma_addr); 1930 err_free_netdev: 1931 free_netdev(dev); 1932 err_release_region: 1933 release_region(ioaddr, PCNET32_TOTAL_SIZE); 1934 return ret; 1935 } 1936 1937 /* if any allocation fails, caller must also call pcnet32_free_ring */ 1938 static int pcnet32_alloc_ring(struct net_device *dev, const char *name) 1939 { 1940 struct pcnet32_private *lp = netdev_priv(dev); 1941 1942 lp->tx_ring = pci_alloc_consistent(lp->pci_dev, 1943 sizeof(struct pcnet32_tx_head) * 1944 lp->tx_ring_size, 1945 &lp->tx_ring_dma_addr); 1946 if (lp->tx_ring == NULL) { 1947 netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); 1948 return -ENOMEM; 1949 } 1950 1951 lp->rx_ring = pci_alloc_consistent(lp->pci_dev, 1952 sizeof(struct pcnet32_rx_head) * 1953 lp->rx_ring_size, 1954 &lp->rx_ring_dma_addr); 1955 if (lp->rx_ring == NULL) { 1956 netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); 1957 return -ENOMEM; 1958 } 1959 1960 lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t), 1961 GFP_ATOMIC); 1962 if (!lp->tx_dma_addr) 1963 return -ENOMEM; 1964 1965 lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t), 1966 GFP_ATOMIC); 1967 if (!lp->rx_dma_addr) 1968 return -ENOMEM; 1969 1970 lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *), 1971 GFP_ATOMIC); 1972 if (!lp->tx_skbuff) 1973 return -ENOMEM; 1974 1975 lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *), 1976 GFP_ATOMIC); 1977 if (!lp->rx_skbuff) 1978 return -ENOMEM; 1979 1980 return 0; 1981 } 1982 1983 static void pcnet32_free_ring(struct net_device *dev) 1984 { 1985 struct pcnet32_private *lp = netdev_priv(dev); 1986 1987 kfree(lp->tx_skbuff); 1988 lp->tx_skbuff = NULL; 1989 1990 kfree(lp->rx_skbuff); 1991 lp->rx_skbuff = NULL; 1992 1993 kfree(lp->tx_dma_addr); 1994 lp->tx_dma_addr = NULL; 1995 1996 kfree(lp->rx_dma_addr); 1997 lp->rx_dma_addr = NULL; 1998 1999 if (lp->tx_ring) { 2000 pci_free_consistent(lp->pci_dev, 2001 sizeof(struct pcnet32_tx_head) * 2002 lp->tx_ring_size, lp->tx_ring, 2003 lp->tx_ring_dma_addr); 2004 lp->tx_ring = NULL; 2005 } 2006 2007 if (lp->rx_ring) { 2008 pci_free_consistent(lp->pci_dev, 2009 sizeof(struct pcnet32_rx_head) * 2010 lp->rx_ring_size, lp->rx_ring, 2011 lp->rx_ring_dma_addr); 2012 lp->rx_ring = NULL; 2013 } 2014 } 2015 2016 static int pcnet32_open(struct net_device *dev) 2017 { 2018 struct pcnet32_private *lp = netdev_priv(dev); 2019 struct pci_dev *pdev = lp->pci_dev; 2020 unsigned long ioaddr = dev->base_addr; 2021 u16 val; 2022 int i; 2023 int rc; 2024 unsigned long flags; 2025 2026 if (request_irq(dev->irq, pcnet32_interrupt, 2027 lp->shared_irq ? IRQF_SHARED : 0, dev->name, 2028 (void *)dev)) { 2029 return -EAGAIN; 2030 } 2031 2032 spin_lock_irqsave(&lp->lock, flags); 2033 /* Check for a valid station address */ 2034 if (!is_valid_ether_addr(dev->dev_addr)) { 2035 rc = -EINVAL; 2036 goto err_free_irq; 2037 } 2038 2039 /* Reset the PCNET32 */ 2040 lp->a->reset(ioaddr); 2041 2042 /* switch pcnet32 to 32bit mode */ 2043 lp->a->write_bcr(ioaddr, 20, 2); 2044 2045 netif_printk(lp, ifup, KERN_DEBUG, dev, 2046 "%s() irq %d tx/rx rings %#x/%#x init %#x\n", 2047 __func__, dev->irq, (u32) (lp->tx_ring_dma_addr), 2048 (u32) (lp->rx_ring_dma_addr), 2049 (u32) (lp->init_dma_addr)); 2050 2051 /* set/reset autoselect bit */ 2052 val = lp->a->read_bcr(ioaddr, 2) & ~2; 2053 if (lp->options & PCNET32_PORT_ASEL) 2054 val |= 2; 2055 lp->a->write_bcr(ioaddr, 2, val); 2056 2057 /* handle full duplex setting */ 2058 if (lp->mii_if.full_duplex) { 2059 val = lp->a->read_bcr(ioaddr, 9) & ~3; 2060 if (lp->options & PCNET32_PORT_FD) { 2061 val |= 1; 2062 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI)) 2063 val |= 2; 2064 } else if (lp->options & PCNET32_PORT_ASEL) { 2065 /* workaround of xSeries250, turn on for 79C975 only */ 2066 if (lp->chip_version == 0x2627) 2067 val |= 3; 2068 } 2069 lp->a->write_bcr(ioaddr, 9, val); 2070 } 2071 2072 /* set/reset GPSI bit in test register */ 2073 val = lp->a->read_csr(ioaddr, 124) & ~0x10; 2074 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI) 2075 val |= 0x10; 2076 lp->a->write_csr(ioaddr, 124, val); 2077 2078 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ 2079 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT && 2080 (pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || 2081 pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { 2082 if (lp->options & PCNET32_PORT_ASEL) { 2083 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; 2084 netif_printk(lp, link, KERN_DEBUG, dev, 2085 "Setting 100Mb-Full Duplex\n"); 2086 } 2087 } 2088 if (lp->phycount < 2) { 2089 /* 2090 * 24 Jun 2004 according AMD, in order to change the PHY, 2091 * DANAS (or DISPM for 79C976) must be set; then select the speed, 2092 * duplex, and/or enable auto negotiation, and clear DANAS 2093 */ 2094 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) { 2095 lp->a->write_bcr(ioaddr, 32, 2096 lp->a->read_bcr(ioaddr, 32) | 0x0080); 2097 /* disable Auto Negotiation, set 10Mpbs, HD */ 2098 val = lp->a->read_bcr(ioaddr, 32) & ~0xb8; 2099 if (lp->options & PCNET32_PORT_FD) 2100 val |= 0x10; 2101 if (lp->options & PCNET32_PORT_100) 2102 val |= 0x08; 2103 lp->a->write_bcr(ioaddr, 32, val); 2104 } else { 2105 if (lp->options & PCNET32_PORT_ASEL) { 2106 lp->a->write_bcr(ioaddr, 32, 2107 lp->a->read_bcr(ioaddr, 2108 32) | 0x0080); 2109 /* enable auto negotiate, setup, disable fd */ 2110 val = lp->a->read_bcr(ioaddr, 32) & ~0x98; 2111 val |= 0x20; 2112 lp->a->write_bcr(ioaddr, 32, val); 2113 } 2114 } 2115 } else { 2116 int first_phy = -1; 2117 u16 bmcr; 2118 u32 bcr9; 2119 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; 2120 2121 /* 2122 * There is really no good other way to handle multiple PHYs 2123 * other than turning off all automatics 2124 */ 2125 val = lp->a->read_bcr(ioaddr, 2); 2126 lp->a->write_bcr(ioaddr, 2, val & ~2); 2127 val = lp->a->read_bcr(ioaddr, 32); 2128 lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */ 2129 2130 if (!(lp->options & PCNET32_PORT_ASEL)) { 2131 /* setup ecmd */ 2132 ecmd.port = PORT_MII; 2133 ecmd.transceiver = XCVR_INTERNAL; 2134 ecmd.autoneg = AUTONEG_DISABLE; 2135 ethtool_cmd_speed_set(&ecmd, 2136 (lp->options & PCNET32_PORT_100) ? 2137 SPEED_100 : SPEED_10); 2138 bcr9 = lp->a->read_bcr(ioaddr, 9); 2139 2140 if (lp->options & PCNET32_PORT_FD) { 2141 ecmd.duplex = DUPLEX_FULL; 2142 bcr9 |= (1 << 0); 2143 } else { 2144 ecmd.duplex = DUPLEX_HALF; 2145 bcr9 |= ~(1 << 0); 2146 } 2147 lp->a->write_bcr(ioaddr, 9, bcr9); 2148 } 2149 2150 for (i = 0; i < PCNET32_MAX_PHYS; i++) { 2151 if (lp->phymask & (1 << i)) { 2152 /* isolate all but the first PHY */ 2153 bmcr = mdio_read(dev, i, MII_BMCR); 2154 if (first_phy == -1) { 2155 first_phy = i; 2156 mdio_write(dev, i, MII_BMCR, 2157 bmcr & ~BMCR_ISOLATE); 2158 } else { 2159 mdio_write(dev, i, MII_BMCR, 2160 bmcr | BMCR_ISOLATE); 2161 } 2162 /* use mii_ethtool_sset to setup PHY */ 2163 lp->mii_if.phy_id = i; 2164 ecmd.phy_address = i; 2165 if (lp->options & PCNET32_PORT_ASEL) { 2166 mii_ethtool_gset(&lp->mii_if, &ecmd); 2167 ecmd.autoneg = AUTONEG_ENABLE; 2168 } 2169 mii_ethtool_sset(&lp->mii_if, &ecmd); 2170 } 2171 } 2172 lp->mii_if.phy_id = first_phy; 2173 netif_info(lp, link, dev, "Using PHY number %d\n", first_phy); 2174 } 2175 2176 #ifdef DO_DXSUFLO 2177 if (lp->dxsuflo) { /* Disable transmit stop on underflow */ 2178 val = lp->a->read_csr(ioaddr, CSR3); 2179 val |= 0x40; 2180 lp->a->write_csr(ioaddr, CSR3, val); 2181 } 2182 #endif 2183 2184 lp->init_block->mode = 2185 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); 2186 pcnet32_load_multicast(dev); 2187 2188 if (pcnet32_init_ring(dev)) { 2189 rc = -ENOMEM; 2190 goto err_free_ring; 2191 } 2192 2193 napi_enable(&lp->napi); 2194 2195 /* Re-initialize the PCNET32, and start it when done. */ 2196 lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); 2197 lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); 2198 2199 lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ 2200 lp->a->write_csr(ioaddr, CSR0, CSR0_INIT); 2201 2202 netif_start_queue(dev); 2203 2204 if (lp->chip_version >= PCNET32_79C970A) { 2205 /* Print the link status and start the watchdog */ 2206 pcnet32_check_media(dev, 1); 2207 mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT); 2208 } 2209 2210 i = 0; 2211 while (i++ < 100) 2212 if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON) 2213 break; 2214 /* 2215 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton 2216 * reports that doing so triggers a bug in the '974. 2217 */ 2218 lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL); 2219 2220 netif_printk(lp, ifup, KERN_DEBUG, dev, 2221 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n", 2222 i, 2223 (u32) (lp->init_dma_addr), 2224 lp->a->read_csr(ioaddr, CSR0)); 2225 2226 spin_unlock_irqrestore(&lp->lock, flags); 2227 2228 return 0; /* Always succeed */ 2229 2230 err_free_ring: 2231 /* free any allocated skbuffs */ 2232 pcnet32_purge_rx_ring(dev); 2233 2234 /* 2235 * Switch back to 16bit mode to avoid problems with dumb 2236 * DOS packet driver after a warm reboot 2237 */ 2238 lp->a->write_bcr(ioaddr, 20, 4); 2239 2240 err_free_irq: 2241 spin_unlock_irqrestore(&lp->lock, flags); 2242 free_irq(dev->irq, dev); 2243 return rc; 2244 } 2245 2246 /* 2247 * The LANCE has been halted for one reason or another (busmaster memory 2248 * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure, 2249 * etc.). Modern LANCE variants always reload their ring-buffer 2250 * configuration when restarted, so we must reinitialize our ring 2251 * context before restarting. As part of this reinitialization, 2252 * find all packets still on the Tx ring and pretend that they had been 2253 * sent (in effect, drop the packets on the floor) - the higher-level 2254 * protocols will time out and retransmit. It'd be better to shuffle 2255 * these skbs to a temp list and then actually re-Tx them after 2256 * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com 2257 */ 2258 2259 static void pcnet32_purge_tx_ring(struct net_device *dev) 2260 { 2261 struct pcnet32_private *lp = netdev_priv(dev); 2262 int i; 2263 2264 for (i = 0; i < lp->tx_ring_size; i++) { 2265 lp->tx_ring[i].status = 0; /* CPU owns buffer */ 2266 wmb(); /* Make sure adapter sees owner change */ 2267 if (lp->tx_skbuff[i]) { 2268 if (!pci_dma_mapping_error(lp->pci_dev, 2269 lp->tx_dma_addr[i])) 2270 pci_unmap_single(lp->pci_dev, 2271 lp->tx_dma_addr[i], 2272 lp->tx_skbuff[i]->len, 2273 PCI_DMA_TODEVICE); 2274 dev_kfree_skb_any(lp->tx_skbuff[i]); 2275 } 2276 lp->tx_skbuff[i] = NULL; 2277 lp->tx_dma_addr[i] = 0; 2278 } 2279 } 2280 2281 /* Initialize the PCNET32 Rx and Tx rings. */ 2282 static int pcnet32_init_ring(struct net_device *dev) 2283 { 2284 struct pcnet32_private *lp = netdev_priv(dev); 2285 int i; 2286 2287 lp->tx_full = 0; 2288 lp->cur_rx = lp->cur_tx = 0; 2289 lp->dirty_rx = lp->dirty_tx = 0; 2290 2291 for (i = 0; i < lp->rx_ring_size; i++) { 2292 struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; 2293 if (rx_skbuff == NULL) { 2294 lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB); 2295 rx_skbuff = lp->rx_skbuff[i]; 2296 if (!rx_skbuff) { 2297 /* there is not much we can do at this point */ 2298 netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n", 2299 __func__); 2300 return -1; 2301 } 2302 skb_reserve(rx_skbuff, NET_IP_ALIGN); 2303 } 2304 2305 rmb(); 2306 if (lp->rx_dma_addr[i] == 0) { 2307 lp->rx_dma_addr[i] = 2308 pci_map_single(lp->pci_dev, rx_skbuff->data, 2309 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); 2310 if (pci_dma_mapping_error(lp->pci_dev, 2311 lp->rx_dma_addr[i])) { 2312 /* there is not much we can do at this point */ 2313 netif_err(lp, drv, dev, 2314 "%s pci dma mapping error\n", 2315 __func__); 2316 return -1; 2317 } 2318 } 2319 lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); 2320 lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE); 2321 wmb(); /* Make sure owner changes after all others are visible */ 2322 lp->rx_ring[i].status = cpu_to_le16(0x8000); 2323 } 2324 /* The Tx buffer address is filled in as needed, but we do need to clear 2325 * the upper ownership bit. */ 2326 for (i = 0; i < lp->tx_ring_size; i++) { 2327 lp->tx_ring[i].status = 0; /* CPU owns buffer */ 2328 wmb(); /* Make sure adapter sees owner change */ 2329 lp->tx_ring[i].base = 0; 2330 lp->tx_dma_addr[i] = 0; 2331 } 2332 2333 lp->init_block->tlen_rlen = 2334 cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); 2335 for (i = 0; i < 6; i++) 2336 lp->init_block->phys_addr[i] = dev->dev_addr[i]; 2337 lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); 2338 lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); 2339 wmb(); /* Make sure all changes are visible */ 2340 return 0; 2341 } 2342 2343 /* the pcnet32 has been issued a stop or reset. Wait for the stop bit 2344 * then flush the pending transmit operations, re-initialize the ring, 2345 * and tell the chip to initialize. 2346 */ 2347 static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits) 2348 { 2349 struct pcnet32_private *lp = netdev_priv(dev); 2350 unsigned long ioaddr = dev->base_addr; 2351 int i; 2352 2353 /* wait for stop */ 2354 for (i = 0; i < 100; i++) 2355 if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP) 2356 break; 2357 2358 if (i >= 100) 2359 netif_err(lp, drv, dev, "%s timed out waiting for stop\n", 2360 __func__); 2361 2362 pcnet32_purge_tx_ring(dev); 2363 if (pcnet32_init_ring(dev)) 2364 return; 2365 2366 /* ReInit Ring */ 2367 lp->a->write_csr(ioaddr, CSR0, CSR0_INIT); 2368 i = 0; 2369 while (i++ < 1000) 2370 if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON) 2371 break; 2372 2373 lp->a->write_csr(ioaddr, CSR0, csr0_bits); 2374 } 2375 2376 static void pcnet32_tx_timeout(struct net_device *dev) 2377 { 2378 struct pcnet32_private *lp = netdev_priv(dev); 2379 unsigned long ioaddr = dev->base_addr, flags; 2380 2381 spin_lock_irqsave(&lp->lock, flags); 2382 /* Transmitter timeout, serious problems. */ 2383 if (pcnet32_debug & NETIF_MSG_DRV) 2384 pr_err("%s: transmit timed out, status %4.4x, resetting\n", 2385 dev->name, lp->a->read_csr(ioaddr, CSR0)); 2386 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); 2387 dev->stats.tx_errors++; 2388 if (netif_msg_tx_err(lp)) { 2389 int i; 2390 printk(KERN_DEBUG 2391 " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", 2392 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", 2393 lp->cur_rx); 2394 for (i = 0; i < lp->rx_ring_size; i++) 2395 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", 2396 le32_to_cpu(lp->rx_ring[i].base), 2397 (-le16_to_cpu(lp->rx_ring[i].buf_length)) & 2398 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length), 2399 le16_to_cpu(lp->rx_ring[i].status)); 2400 for (i = 0; i < lp->tx_ring_size; i++) 2401 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", 2402 le32_to_cpu(lp->tx_ring[i].base), 2403 (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, 2404 le32_to_cpu(lp->tx_ring[i].misc), 2405 le16_to_cpu(lp->tx_ring[i].status)); 2406 printk("\n"); 2407 } 2408 pcnet32_restart(dev, CSR0_NORMAL); 2409 2410 dev->trans_start = jiffies; /* prevent tx timeout */ 2411 netif_wake_queue(dev); 2412 2413 spin_unlock_irqrestore(&lp->lock, flags); 2414 } 2415 2416 static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb, 2417 struct net_device *dev) 2418 { 2419 struct pcnet32_private *lp = netdev_priv(dev); 2420 unsigned long ioaddr = dev->base_addr; 2421 u16 status; 2422 int entry; 2423 unsigned long flags; 2424 2425 spin_lock_irqsave(&lp->lock, flags); 2426 2427 netif_printk(lp, tx_queued, KERN_DEBUG, dev, 2428 "%s() called, csr0 %4.4x\n", 2429 __func__, lp->a->read_csr(ioaddr, CSR0)); 2430 2431 /* Default status -- will not enable Successful-TxDone 2432 * interrupt when that option is available to us. 2433 */ 2434 status = 0x8300; 2435 2436 /* Fill in a Tx ring entry */ 2437 2438 /* Mask to ring buffer boundary. */ 2439 entry = lp->cur_tx & lp->tx_mod_mask; 2440 2441 /* Caution: the write order is important here, set the status 2442 * with the "ownership" bits last. */ 2443 2444 lp->tx_ring[entry].length = cpu_to_le16(-skb->len); 2445 2446 lp->tx_ring[entry].misc = 0x00000000; 2447 2448 lp->tx_dma_addr[entry] = 2449 pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); 2450 if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) { 2451 dev_kfree_skb_any(skb); 2452 dev->stats.tx_dropped++; 2453 goto drop_packet; 2454 } 2455 lp->tx_skbuff[entry] = skb; 2456 lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]); 2457 wmb(); /* Make sure owner changes after all others are visible */ 2458 lp->tx_ring[entry].status = cpu_to_le16(status); 2459 2460 lp->cur_tx++; 2461 dev->stats.tx_bytes += skb->len; 2462 2463 /* Trigger an immediate send poll. */ 2464 lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL); 2465 2466 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) { 2467 lp->tx_full = 1; 2468 netif_stop_queue(dev); 2469 } 2470 drop_packet: 2471 spin_unlock_irqrestore(&lp->lock, flags); 2472 return NETDEV_TX_OK; 2473 } 2474 2475 /* The PCNET32 interrupt handler. */ 2476 static irqreturn_t 2477 pcnet32_interrupt(int irq, void *dev_id) 2478 { 2479 struct net_device *dev = dev_id; 2480 struct pcnet32_private *lp; 2481 unsigned long ioaddr; 2482 u16 csr0; 2483 int boguscnt = max_interrupt_work; 2484 2485 ioaddr = dev->base_addr; 2486 lp = netdev_priv(dev); 2487 2488 spin_lock(&lp->lock); 2489 2490 csr0 = lp->a->read_csr(ioaddr, CSR0); 2491 while ((csr0 & 0x8f00) && --boguscnt >= 0) { 2492 if (csr0 == 0xffff) 2493 break; /* PCMCIA remove happened */ 2494 /* Acknowledge all of the current interrupt sources ASAP. */ 2495 lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f); 2496 2497 netif_printk(lp, intr, KERN_DEBUG, dev, 2498 "interrupt csr0=%#2.2x new csr=%#2.2x\n", 2499 csr0, lp->a->read_csr(ioaddr, CSR0)); 2500 2501 /* Log misc errors. */ 2502 if (csr0 & 0x4000) 2503 dev->stats.tx_errors++; /* Tx babble. */ 2504 if (csr0 & 0x1000) { 2505 /* 2506 * This happens when our receive ring is full. This 2507 * shouldn't be a problem as we will see normal rx 2508 * interrupts for the frames in the receive ring. But 2509 * there are some PCI chipsets (I can reproduce this 2510 * on SP3G with Intel saturn chipset) which have 2511 * sometimes problems and will fill up the receive 2512 * ring with error descriptors. In this situation we 2513 * don't get a rx interrupt, but a missed frame 2514 * interrupt sooner or later. 2515 */ 2516 dev->stats.rx_errors++; /* Missed a Rx frame. */ 2517 } 2518 if (csr0 & 0x0800) { 2519 netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n", 2520 csr0); 2521 /* unlike for the lance, there is no restart needed */ 2522 } 2523 if (napi_schedule_prep(&lp->napi)) { 2524 u16 val; 2525 /* set interrupt masks */ 2526 val = lp->a->read_csr(ioaddr, CSR3); 2527 val |= 0x5f00; 2528 lp->a->write_csr(ioaddr, CSR3, val); 2529 2530 __napi_schedule(&lp->napi); 2531 break; 2532 } 2533 csr0 = lp->a->read_csr(ioaddr, CSR0); 2534 } 2535 2536 netif_printk(lp, intr, KERN_DEBUG, dev, 2537 "exiting interrupt, csr0=%#4.4x\n", 2538 lp->a->read_csr(ioaddr, CSR0)); 2539 2540 spin_unlock(&lp->lock); 2541 2542 return IRQ_HANDLED; 2543 } 2544 2545 static int pcnet32_close(struct net_device *dev) 2546 { 2547 unsigned long ioaddr = dev->base_addr; 2548 struct pcnet32_private *lp = netdev_priv(dev); 2549 unsigned long flags; 2550 2551 del_timer_sync(&lp->watchdog_timer); 2552 2553 netif_stop_queue(dev); 2554 napi_disable(&lp->napi); 2555 2556 spin_lock_irqsave(&lp->lock, flags); 2557 2558 dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112); 2559 2560 netif_printk(lp, ifdown, KERN_DEBUG, dev, 2561 "Shutting down ethercard, status was %2.2x\n", 2562 lp->a->read_csr(ioaddr, CSR0)); 2563 2564 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */ 2565 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); 2566 2567 /* 2568 * Switch back to 16bit mode to avoid problems with dumb 2569 * DOS packet driver after a warm reboot 2570 */ 2571 lp->a->write_bcr(ioaddr, 20, 4); 2572 2573 spin_unlock_irqrestore(&lp->lock, flags); 2574 2575 free_irq(dev->irq, dev); 2576 2577 spin_lock_irqsave(&lp->lock, flags); 2578 2579 pcnet32_purge_rx_ring(dev); 2580 pcnet32_purge_tx_ring(dev); 2581 2582 spin_unlock_irqrestore(&lp->lock, flags); 2583 2584 return 0; 2585 } 2586 2587 static struct net_device_stats *pcnet32_get_stats(struct net_device *dev) 2588 { 2589 struct pcnet32_private *lp = netdev_priv(dev); 2590 unsigned long ioaddr = dev->base_addr; 2591 unsigned long flags; 2592 2593 spin_lock_irqsave(&lp->lock, flags); 2594 dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112); 2595 spin_unlock_irqrestore(&lp->lock, flags); 2596 2597 return &dev->stats; 2598 } 2599 2600 /* taken from the sunlance driver, which it took from the depca driver */ 2601 static void pcnet32_load_multicast(struct net_device *dev) 2602 { 2603 struct pcnet32_private *lp = netdev_priv(dev); 2604 volatile struct pcnet32_init_block *ib = lp->init_block; 2605 volatile __le16 *mcast_table = (__le16 *)ib->filter; 2606 struct netdev_hw_addr *ha; 2607 unsigned long ioaddr = dev->base_addr; 2608 int i; 2609 u32 crc; 2610 2611 /* set all multicast bits */ 2612 if (dev->flags & IFF_ALLMULTI) { 2613 ib->filter[0] = cpu_to_le32(~0U); 2614 ib->filter[1] = cpu_to_le32(~0U); 2615 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff); 2616 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff); 2617 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff); 2618 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff); 2619 return; 2620 } 2621 /* clear the multicast filter */ 2622 ib->filter[0] = 0; 2623 ib->filter[1] = 0; 2624 2625 /* Add addresses */ 2626 netdev_for_each_mc_addr(ha, dev) { 2627 crc = ether_crc_le(6, ha->addr); 2628 crc = crc >> 26; 2629 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf)); 2630 } 2631 for (i = 0; i < 4; i++) 2632 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i, 2633 le16_to_cpu(mcast_table[i])); 2634 } 2635 2636 /* 2637 * Set or clear the multicast filter for this adaptor. 2638 */ 2639 static void pcnet32_set_multicast_list(struct net_device *dev) 2640 { 2641 unsigned long ioaddr = dev->base_addr, flags; 2642 struct pcnet32_private *lp = netdev_priv(dev); 2643 int csr15, suspended; 2644 2645 spin_lock_irqsave(&lp->lock, flags); 2646 suspended = pcnet32_suspend(dev, &flags, 0); 2647 csr15 = lp->a->read_csr(ioaddr, CSR15); 2648 if (dev->flags & IFF_PROMISC) { 2649 /* Log any net taps. */ 2650 netif_info(lp, hw, dev, "Promiscuous mode enabled\n"); 2651 lp->init_block->mode = 2652 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << 2653 7); 2654 lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000); 2655 } else { 2656 lp->init_block->mode = 2657 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); 2658 lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff); 2659 pcnet32_load_multicast(dev); 2660 } 2661 2662 if (suspended) { 2663 int csr5; 2664 /* clear SUSPEND (SPND) - CSR5 bit 0 */ 2665 csr5 = lp->a->read_csr(ioaddr, CSR5); 2666 lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); 2667 } else { 2668 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); 2669 pcnet32_restart(dev, CSR0_NORMAL); 2670 netif_wake_queue(dev); 2671 } 2672 2673 spin_unlock_irqrestore(&lp->lock, flags); 2674 } 2675 2676 /* This routine assumes that the lp->lock is held */ 2677 static int mdio_read(struct net_device *dev, int phy_id, int reg_num) 2678 { 2679 struct pcnet32_private *lp = netdev_priv(dev); 2680 unsigned long ioaddr = dev->base_addr; 2681 u16 val_out; 2682 2683 if (!lp->mii) 2684 return 0; 2685 2686 lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); 2687 val_out = lp->a->read_bcr(ioaddr, 34); 2688 2689 return val_out; 2690 } 2691 2692 /* This routine assumes that the lp->lock is held */ 2693 static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val) 2694 { 2695 struct pcnet32_private *lp = netdev_priv(dev); 2696 unsigned long ioaddr = dev->base_addr; 2697 2698 if (!lp->mii) 2699 return; 2700 2701 lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); 2702 lp->a->write_bcr(ioaddr, 34, val); 2703 } 2704 2705 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2706 { 2707 struct pcnet32_private *lp = netdev_priv(dev); 2708 int rc; 2709 unsigned long flags; 2710 2711 /* SIOC[GS]MIIxxx ioctls */ 2712 if (lp->mii) { 2713 spin_lock_irqsave(&lp->lock, flags); 2714 rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL); 2715 spin_unlock_irqrestore(&lp->lock, flags); 2716 } else { 2717 rc = -EOPNOTSUPP; 2718 } 2719 2720 return rc; 2721 } 2722 2723 static int pcnet32_check_otherphy(struct net_device *dev) 2724 { 2725 struct pcnet32_private *lp = netdev_priv(dev); 2726 struct mii_if_info mii = lp->mii_if; 2727 u16 bmcr; 2728 int i; 2729 2730 for (i = 0; i < PCNET32_MAX_PHYS; i++) { 2731 if (i == lp->mii_if.phy_id) 2732 continue; /* skip active phy */ 2733 if (lp->phymask & (1 << i)) { 2734 mii.phy_id = i; 2735 if (mii_link_ok(&mii)) { 2736 /* found PHY with active link */ 2737 netif_info(lp, link, dev, "Using PHY number %d\n", 2738 i); 2739 2740 /* isolate inactive phy */ 2741 bmcr = 2742 mdio_read(dev, lp->mii_if.phy_id, MII_BMCR); 2743 mdio_write(dev, lp->mii_if.phy_id, MII_BMCR, 2744 bmcr | BMCR_ISOLATE); 2745 2746 /* de-isolate new phy */ 2747 bmcr = mdio_read(dev, i, MII_BMCR); 2748 mdio_write(dev, i, MII_BMCR, 2749 bmcr & ~BMCR_ISOLATE); 2750 2751 /* set new phy address */ 2752 lp->mii_if.phy_id = i; 2753 return 1; 2754 } 2755 } 2756 } 2757 return 0; 2758 } 2759 2760 /* 2761 * Show the status of the media. Similar to mii_check_media however it 2762 * correctly shows the link speed for all (tested) pcnet32 variants. 2763 * Devices with no mii just report link state without speed. 2764 * 2765 * Caller is assumed to hold and release the lp->lock. 2766 */ 2767 2768 static void pcnet32_check_media(struct net_device *dev, int verbose) 2769 { 2770 struct pcnet32_private *lp = netdev_priv(dev); 2771 int curr_link; 2772 int prev_link = netif_carrier_ok(dev) ? 1 : 0; 2773 u32 bcr9; 2774 2775 if (lp->mii) { 2776 curr_link = mii_link_ok(&lp->mii_if); 2777 } else { 2778 ulong ioaddr = dev->base_addr; /* card base I/O address */ 2779 curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0); 2780 } 2781 if (!curr_link) { 2782 if (prev_link || verbose) { 2783 netif_carrier_off(dev); 2784 netif_info(lp, link, dev, "link down\n"); 2785 } 2786 if (lp->phycount > 1) { 2787 curr_link = pcnet32_check_otherphy(dev); 2788 prev_link = 0; 2789 } 2790 } else if (verbose || !prev_link) { 2791 netif_carrier_on(dev); 2792 if (lp->mii) { 2793 if (netif_msg_link(lp)) { 2794 struct ethtool_cmd ecmd = { 2795 .cmd = ETHTOOL_GSET }; 2796 mii_ethtool_gset(&lp->mii_if, &ecmd); 2797 netdev_info(dev, "link up, %uMbps, %s-duplex\n", 2798 ethtool_cmd_speed(&ecmd), 2799 (ecmd.duplex == DUPLEX_FULL) 2800 ? "full" : "half"); 2801 } 2802 bcr9 = lp->a->read_bcr(dev->base_addr, 9); 2803 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) { 2804 if (lp->mii_if.full_duplex) 2805 bcr9 |= (1 << 0); 2806 else 2807 bcr9 &= ~(1 << 0); 2808 lp->a->write_bcr(dev->base_addr, 9, bcr9); 2809 } 2810 } else { 2811 netif_info(lp, link, dev, "link up\n"); 2812 } 2813 } 2814 } 2815 2816 /* 2817 * Check for loss of link and link establishment. 2818 * Can not use mii_check_media because it does nothing if mode is forced. 2819 */ 2820 2821 static void pcnet32_watchdog(struct net_device *dev) 2822 { 2823 struct pcnet32_private *lp = netdev_priv(dev); 2824 unsigned long flags; 2825 2826 /* Print the link status if it has changed */ 2827 spin_lock_irqsave(&lp->lock, flags); 2828 pcnet32_check_media(dev, 0); 2829 spin_unlock_irqrestore(&lp->lock, flags); 2830 2831 mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT)); 2832 } 2833 2834 static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state) 2835 { 2836 struct net_device *dev = pci_get_drvdata(pdev); 2837 2838 if (netif_running(dev)) { 2839 netif_device_detach(dev); 2840 pcnet32_close(dev); 2841 } 2842 pci_save_state(pdev); 2843 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 2844 return 0; 2845 } 2846 2847 static int pcnet32_pm_resume(struct pci_dev *pdev) 2848 { 2849 struct net_device *dev = pci_get_drvdata(pdev); 2850 2851 pci_set_power_state(pdev, PCI_D0); 2852 pci_restore_state(pdev); 2853 2854 if (netif_running(dev)) { 2855 pcnet32_open(dev); 2856 netif_device_attach(dev); 2857 } 2858 return 0; 2859 } 2860 2861 static void pcnet32_remove_one(struct pci_dev *pdev) 2862 { 2863 struct net_device *dev = pci_get_drvdata(pdev); 2864 2865 if (dev) { 2866 struct pcnet32_private *lp = netdev_priv(dev); 2867 2868 unregister_netdev(dev); 2869 pcnet32_free_ring(dev); 2870 release_region(dev->base_addr, PCNET32_TOTAL_SIZE); 2871 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), 2872 lp->init_block, lp->init_dma_addr); 2873 free_netdev(dev); 2874 pci_disable_device(pdev); 2875 } 2876 } 2877 2878 static struct pci_driver pcnet32_driver = { 2879 .name = DRV_NAME, 2880 .probe = pcnet32_probe_pci, 2881 .remove = pcnet32_remove_one, 2882 .id_table = pcnet32_pci_tbl, 2883 .suspend = pcnet32_pm_suspend, 2884 .resume = pcnet32_pm_resume, 2885 }; 2886 2887 /* An additional parameter that may be passed in... */ 2888 static int debug = -1; 2889 static int tx_start_pt = -1; 2890 static int pcnet32_have_pci; 2891 2892 module_param(debug, int, 0); 2893 MODULE_PARM_DESC(debug, DRV_NAME " debug level"); 2894 module_param(max_interrupt_work, int, 0); 2895 MODULE_PARM_DESC(max_interrupt_work, 2896 DRV_NAME " maximum events handled per interrupt"); 2897 module_param(rx_copybreak, int, 0); 2898 MODULE_PARM_DESC(rx_copybreak, 2899 DRV_NAME " copy breakpoint for copy-only-tiny-frames"); 2900 module_param(tx_start_pt, int, 0); 2901 MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)"); 2902 module_param(pcnet32vlb, int, 0); 2903 MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)"); 2904 module_param_array(options, int, NULL, 0); 2905 MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)"); 2906 module_param_array(full_duplex, int, NULL, 0); 2907 MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)"); 2908 /* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */ 2909 module_param_array(homepna, int, NULL, 0); 2910 MODULE_PARM_DESC(homepna, 2911 DRV_NAME 2912 " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet"); 2913 2914 MODULE_AUTHOR("Thomas Bogendoerfer"); 2915 MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards"); 2916 MODULE_LICENSE("GPL"); 2917 2918 #define PCNET32_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 2919 2920 static int __init pcnet32_init_module(void) 2921 { 2922 pr_info("%s", version); 2923 2924 pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT); 2925 2926 if ((tx_start_pt >= 0) && (tx_start_pt <= 3)) 2927 tx_start = tx_start_pt; 2928 2929 /* find the PCI devices */ 2930 if (!pci_register_driver(&pcnet32_driver)) 2931 pcnet32_have_pci = 1; 2932 2933 /* should we find any remaining VLbus devices ? */ 2934 if (pcnet32vlb) 2935 pcnet32_probe_vlbus(pcnet32_portlist); 2936 2937 if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) 2938 pr_info("%d cards_found\n", cards_found); 2939 2940 return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV; 2941 } 2942 2943 static void __exit pcnet32_cleanup_module(void) 2944 { 2945 struct net_device *next_dev; 2946 2947 while (pcnet32_dev) { 2948 struct pcnet32_private *lp = netdev_priv(pcnet32_dev); 2949 next_dev = lp->next; 2950 unregister_netdev(pcnet32_dev); 2951 pcnet32_free_ring(pcnet32_dev); 2952 release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); 2953 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), 2954 lp->init_block, lp->init_dma_addr); 2955 free_netdev(pcnet32_dev); 2956 pcnet32_dev = next_dev; 2957 } 2958 2959 if (pcnet32_have_pci) 2960 pci_unregister_driver(&pcnet32_driver); 2961 } 2962 2963 module_init(pcnet32_init_module); 2964 module_exit(pcnet32_cleanup_module); 2965 2966 /* 2967 * Local variables: 2968 * c-indent-level: 4 2969 * tab-width: 8 2970 * End: 2971 */ 2972