1 /* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */ 2 /* 3 Copyright (c) 2001, 2002 by D-Link Corporation 4 Written by Edward Peng.<edward_peng@dlink.com.tw> 5 Created 03-May-2001, base on Linux' sundance.c. 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 2 of the License, or 10 (at your option) any later version. 11 */ 12 13 #define DRV_NAME "DL2000/TC902x-based linux driver" 14 #define DRV_VERSION "v1.19" 15 #define DRV_RELDATE "2007/08/12" 16 #include "dl2k.h" 17 #include <linux/dma-mapping.h> 18 19 #define dw32(reg, val) iowrite32(val, ioaddr + (reg)) 20 #define dw16(reg, val) iowrite16(val, ioaddr + (reg)) 21 #define dw8(reg, val) iowrite8(val, ioaddr + (reg)) 22 #define dr32(reg) ioread32(ioaddr + (reg)) 23 #define dr16(reg) ioread16(ioaddr + (reg)) 24 #define dr8(reg) ioread8(ioaddr + (reg)) 25 26 static char version[] = 27 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; 28 #define MAX_UNITS 8 29 static int mtu[MAX_UNITS]; 30 static int vlan[MAX_UNITS]; 31 static int jumbo[MAX_UNITS]; 32 static char *media[MAX_UNITS]; 33 static int tx_flow=-1; 34 static int rx_flow=-1; 35 static int copy_thresh; 36 static int rx_coalesce=10; /* Rx frame count each interrupt */ 37 static int rx_timeout=200; /* Rx DMA wait time in 640ns increments */ 38 static int tx_coalesce=16; /* HW xmit count each TxDMAComplete */ 39 40 41 MODULE_AUTHOR ("Edward Peng"); 42 MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter"); 43 MODULE_LICENSE("GPL"); 44 module_param_array(mtu, int, NULL, 0); 45 module_param_array(media, charp, NULL, 0); 46 module_param_array(vlan, int, NULL, 0); 47 module_param_array(jumbo, int, NULL, 0); 48 module_param(tx_flow, int, 0); 49 module_param(rx_flow, int, 0); 50 module_param(copy_thresh, int, 0); 51 module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */ 52 module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */ 53 module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */ 54 55 56 /* Enable the default interrupts */ 57 #define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \ 58 UpdateStats | LinkEvent) 59 60 static void dl2k_enable_int(struct netdev_private *np) 61 { 62 void __iomem *ioaddr = np->ioaddr; 63 64 dw16(IntEnable, DEFAULT_INTR); 65 } 66 67 static const int max_intrloop = 50; 68 static const int multicast_filter_limit = 0x40; 69 70 static int rio_open (struct net_device *dev); 71 static void rio_timer (unsigned long data); 72 static void rio_tx_timeout (struct net_device *dev); 73 static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev); 74 static irqreturn_t rio_interrupt (int irq, void *dev_instance); 75 static void rio_free_tx (struct net_device *dev, int irq); 76 static void tx_error (struct net_device *dev, int tx_status); 77 static int receive_packet (struct net_device *dev); 78 static void rio_error (struct net_device *dev, int int_status); 79 static void set_multicast (struct net_device *dev); 80 static struct net_device_stats *get_stats (struct net_device *dev); 81 static int clear_stats (struct net_device *dev); 82 static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); 83 static int rio_close (struct net_device *dev); 84 static int find_miiphy (struct net_device *dev); 85 static int parse_eeprom (struct net_device *dev); 86 static int read_eeprom (struct netdev_private *, int eep_addr); 87 static int mii_wait_link (struct net_device *dev, int wait); 88 static int mii_set_media (struct net_device *dev); 89 static int mii_get_media (struct net_device *dev); 90 static int mii_set_media_pcs (struct net_device *dev); 91 static int mii_get_media_pcs (struct net_device *dev); 92 static int mii_read (struct net_device *dev, int phy_addr, int reg_num); 93 static int mii_write (struct net_device *dev, int phy_addr, int reg_num, 94 u16 data); 95 96 static const struct ethtool_ops ethtool_ops; 97 98 static const struct net_device_ops netdev_ops = { 99 .ndo_open = rio_open, 100 .ndo_start_xmit = start_xmit, 101 .ndo_stop = rio_close, 102 .ndo_get_stats = get_stats, 103 .ndo_validate_addr = eth_validate_addr, 104 .ndo_set_mac_address = eth_mac_addr, 105 .ndo_set_rx_mode = set_multicast, 106 .ndo_do_ioctl = rio_ioctl, 107 .ndo_tx_timeout = rio_tx_timeout, 108 }; 109 110 static int 111 rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) 112 { 113 struct net_device *dev; 114 struct netdev_private *np; 115 static int card_idx; 116 int chip_idx = ent->driver_data; 117 int err, irq; 118 void __iomem *ioaddr; 119 static int version_printed; 120 void *ring_space; 121 dma_addr_t ring_dma; 122 123 if (!version_printed++) 124 printk ("%s", version); 125 126 err = pci_enable_device (pdev); 127 if (err) 128 return err; 129 130 irq = pdev->irq; 131 err = pci_request_regions (pdev, "dl2k"); 132 if (err) 133 goto err_out_disable; 134 135 pci_set_master (pdev); 136 137 err = -ENOMEM; 138 139 dev = alloc_etherdev (sizeof (*np)); 140 if (!dev) 141 goto err_out_res; 142 SET_NETDEV_DEV(dev, &pdev->dev); 143 144 np = netdev_priv(dev); 145 146 /* IO registers range. */ 147 ioaddr = pci_iomap(pdev, 0, 0); 148 if (!ioaddr) 149 goto err_out_dev; 150 np->eeprom_addr = ioaddr; 151 152 #ifdef MEM_MAPPING 153 /* MM registers range. */ 154 ioaddr = pci_iomap(pdev, 1, 0); 155 if (!ioaddr) 156 goto err_out_iounmap; 157 #endif 158 np->ioaddr = ioaddr; 159 np->chip_id = chip_idx; 160 np->pdev = pdev; 161 spin_lock_init (&np->tx_lock); 162 spin_lock_init (&np->rx_lock); 163 164 /* Parse manual configuration */ 165 np->an_enable = 1; 166 np->tx_coalesce = 1; 167 if (card_idx < MAX_UNITS) { 168 if (media[card_idx] != NULL) { 169 np->an_enable = 0; 170 if (strcmp (media[card_idx], "auto") == 0 || 171 strcmp (media[card_idx], "autosense") == 0 || 172 strcmp (media[card_idx], "0") == 0 ) { 173 np->an_enable = 2; 174 } else if (strcmp (media[card_idx], "100mbps_fd") == 0 || 175 strcmp (media[card_idx], "4") == 0) { 176 np->speed = 100; 177 np->full_duplex = 1; 178 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || 179 strcmp (media[card_idx], "3") == 0) { 180 np->speed = 100; 181 np->full_duplex = 0; 182 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || 183 strcmp (media[card_idx], "2") == 0) { 184 np->speed = 10; 185 np->full_duplex = 1; 186 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || 187 strcmp (media[card_idx], "1") == 0) { 188 np->speed = 10; 189 np->full_duplex = 0; 190 } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 || 191 strcmp (media[card_idx], "6") == 0) { 192 np->speed=1000; 193 np->full_duplex=1; 194 } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 || 195 strcmp (media[card_idx], "5") == 0) { 196 np->speed = 1000; 197 np->full_duplex = 0; 198 } else { 199 np->an_enable = 1; 200 } 201 } 202 if (jumbo[card_idx] != 0) { 203 np->jumbo = 1; 204 dev->mtu = MAX_JUMBO; 205 } else { 206 np->jumbo = 0; 207 if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE) 208 dev->mtu = mtu[card_idx]; 209 } 210 np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ? 211 vlan[card_idx] : 0; 212 if (rx_coalesce > 0 && rx_timeout > 0) { 213 np->rx_coalesce = rx_coalesce; 214 np->rx_timeout = rx_timeout; 215 np->coalesce = 1; 216 } 217 np->tx_flow = (tx_flow == 0) ? 0 : 1; 218 np->rx_flow = (rx_flow == 0) ? 0 : 1; 219 220 if (tx_coalesce < 1) 221 tx_coalesce = 1; 222 else if (tx_coalesce > TX_RING_SIZE-1) 223 tx_coalesce = TX_RING_SIZE - 1; 224 } 225 dev->netdev_ops = &netdev_ops; 226 dev->watchdog_timeo = TX_TIMEOUT; 227 dev->ethtool_ops = ðtool_ops; 228 #if 0 229 dev->features = NETIF_F_IP_CSUM; 230 #endif 231 /* MTU range: 68 - 1536 or 8000 */ 232 dev->min_mtu = ETH_MIN_MTU; 233 dev->max_mtu = np->jumbo ? MAX_JUMBO : PACKET_SIZE; 234 235 pci_set_drvdata (pdev, dev); 236 237 ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma); 238 if (!ring_space) 239 goto err_out_iounmap; 240 np->tx_ring = ring_space; 241 np->tx_ring_dma = ring_dma; 242 243 ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma); 244 if (!ring_space) 245 goto err_out_unmap_tx; 246 np->rx_ring = ring_space; 247 np->rx_ring_dma = ring_dma; 248 249 /* Parse eeprom data */ 250 parse_eeprom (dev); 251 252 /* Find PHY address */ 253 err = find_miiphy (dev); 254 if (err) 255 goto err_out_unmap_rx; 256 257 /* Fiber device? */ 258 np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0; 259 np->link_status = 0; 260 /* Set media and reset PHY */ 261 if (np->phy_media) { 262 /* default Auto-Negotiation for fiber deivices */ 263 if (np->an_enable == 2) { 264 np->an_enable = 1; 265 } 266 } else { 267 /* Auto-Negotiation is mandatory for 1000BASE-T, 268 IEEE 802.3ab Annex 28D page 14 */ 269 if (np->speed == 1000) 270 np->an_enable = 1; 271 } 272 273 err = register_netdev (dev); 274 if (err) 275 goto err_out_unmap_rx; 276 277 card_idx++; 278 279 printk (KERN_INFO "%s: %s, %pM, IRQ %d\n", 280 dev->name, np->name, dev->dev_addr, irq); 281 if (tx_coalesce > 1) 282 printk(KERN_INFO "tx_coalesce:\t%d packets\n", 283 tx_coalesce); 284 if (np->coalesce) 285 printk(KERN_INFO 286 "rx_coalesce:\t%d packets\n" 287 "rx_timeout: \t%d ns\n", 288 np->rx_coalesce, np->rx_timeout*640); 289 if (np->vlan) 290 printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); 291 return 0; 292 293 err_out_unmap_rx: 294 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); 295 err_out_unmap_tx: 296 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); 297 err_out_iounmap: 298 #ifdef MEM_MAPPING 299 pci_iounmap(pdev, np->ioaddr); 300 #endif 301 pci_iounmap(pdev, np->eeprom_addr); 302 err_out_dev: 303 free_netdev (dev); 304 err_out_res: 305 pci_release_regions (pdev); 306 err_out_disable: 307 pci_disable_device (pdev); 308 return err; 309 } 310 311 static int 312 find_miiphy (struct net_device *dev) 313 { 314 struct netdev_private *np = netdev_priv(dev); 315 int i, phy_found = 0; 316 np = netdev_priv(dev); 317 np->phy_addr = 1; 318 319 for (i = 31; i >= 0; i--) { 320 int mii_status = mii_read (dev, i, 1); 321 if (mii_status != 0xffff && mii_status != 0x0000) { 322 np->phy_addr = i; 323 phy_found++; 324 } 325 } 326 if (!phy_found) { 327 printk (KERN_ERR "%s: No MII PHY found!\n", dev->name); 328 return -ENODEV; 329 } 330 return 0; 331 } 332 333 static int 334 parse_eeprom (struct net_device *dev) 335 { 336 struct netdev_private *np = netdev_priv(dev); 337 void __iomem *ioaddr = np->ioaddr; 338 int i, j; 339 u8 sromdata[256]; 340 u8 *psib; 341 u32 crc; 342 PSROM_t psrom = (PSROM_t) sromdata; 343 344 int cid, next; 345 346 for (i = 0; i < 128; i++) 347 ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i)); 348 349 if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */ 350 /* Check CRC */ 351 crc = ~ether_crc_le (256 - 4, sromdata); 352 if (psrom->crc != cpu_to_le32(crc)) { 353 printk (KERN_ERR "%s: EEPROM data CRC error.\n", 354 dev->name); 355 return -1; 356 } 357 } 358 359 /* Set MAC address */ 360 for (i = 0; i < 6; i++) 361 dev->dev_addr[i] = psrom->mac_addr[i]; 362 363 if (np->chip_id == CHIP_IP1000A) { 364 np->led_mode = psrom->led_mode; 365 return 0; 366 } 367 368 if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) { 369 return 0; 370 } 371 372 /* Parse Software Information Block */ 373 i = 0x30; 374 psib = (u8 *) sromdata; 375 do { 376 cid = psib[i++]; 377 next = psib[i++]; 378 if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) { 379 printk (KERN_ERR "Cell data error\n"); 380 return -1; 381 } 382 switch (cid) { 383 case 0: /* Format version */ 384 break; 385 case 1: /* End of cell */ 386 return 0; 387 case 2: /* Duplex Polarity */ 388 np->duplex_polarity = psib[i]; 389 dw8(PhyCtrl, dr8(PhyCtrl) | psib[i]); 390 break; 391 case 3: /* Wake Polarity */ 392 np->wake_polarity = psib[i]; 393 break; 394 case 9: /* Adapter description */ 395 j = (next - i > 255) ? 255 : next - i; 396 memcpy (np->name, &(psib[i]), j); 397 break; 398 case 4: 399 case 5: 400 case 6: 401 case 7: 402 case 8: /* Reversed */ 403 break; 404 default: /* Unknown cell */ 405 return -1; 406 } 407 i = next; 408 } while (1); 409 410 return 0; 411 } 412 413 static void rio_set_led_mode(struct net_device *dev) 414 { 415 struct netdev_private *np = netdev_priv(dev); 416 void __iomem *ioaddr = np->ioaddr; 417 u32 mode; 418 419 if (np->chip_id != CHIP_IP1000A) 420 return; 421 422 mode = dr32(ASICCtrl); 423 mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED); 424 425 if (np->led_mode & 0x01) 426 mode |= IPG_AC_LED_MODE; 427 if (np->led_mode & 0x02) 428 mode |= IPG_AC_LED_MODE_BIT_1; 429 if (np->led_mode & 0x08) 430 mode |= IPG_AC_LED_SPEED; 431 432 dw32(ASICCtrl, mode); 433 } 434 435 static inline dma_addr_t desc_to_dma(struct netdev_desc *desc) 436 { 437 return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48); 438 } 439 440 static void free_list(struct net_device *dev) 441 { 442 struct netdev_private *np = netdev_priv(dev); 443 struct sk_buff *skb; 444 int i; 445 446 /* Free all the skbuffs in the queue. */ 447 for (i = 0; i < RX_RING_SIZE; i++) { 448 skb = np->rx_skbuff[i]; 449 if (skb) { 450 pci_unmap_single(np->pdev, desc_to_dma(&np->rx_ring[i]), 451 skb->len, PCI_DMA_FROMDEVICE); 452 dev_kfree_skb(skb); 453 np->rx_skbuff[i] = NULL; 454 } 455 np->rx_ring[i].status = 0; 456 np->rx_ring[i].fraginfo = 0; 457 } 458 for (i = 0; i < TX_RING_SIZE; i++) { 459 skb = np->tx_skbuff[i]; 460 if (skb) { 461 pci_unmap_single(np->pdev, desc_to_dma(&np->tx_ring[i]), 462 skb->len, PCI_DMA_TODEVICE); 463 dev_kfree_skb(skb); 464 np->tx_skbuff[i] = NULL; 465 } 466 } 467 } 468 469 static void rio_reset_ring(struct netdev_private *np) 470 { 471 int i; 472 473 np->cur_rx = 0; 474 np->cur_tx = 0; 475 np->old_rx = 0; 476 np->old_tx = 0; 477 478 for (i = 0; i < TX_RING_SIZE; i++) 479 np->tx_ring[i].status = cpu_to_le64(TFDDone); 480 481 for (i = 0; i < RX_RING_SIZE; i++) 482 np->rx_ring[i].status = 0; 483 } 484 485 /* allocate and initialize Tx and Rx descriptors */ 486 static int alloc_list(struct net_device *dev) 487 { 488 struct netdev_private *np = netdev_priv(dev); 489 int i; 490 491 rio_reset_ring(np); 492 np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32); 493 494 /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */ 495 for (i = 0; i < TX_RING_SIZE; i++) { 496 np->tx_skbuff[i] = NULL; 497 np->tx_ring[i].next_desc = cpu_to_le64(np->tx_ring_dma + 498 ((i + 1) % TX_RING_SIZE) * 499 sizeof(struct netdev_desc)); 500 } 501 502 /* Initialize Rx descriptors & allocate buffers */ 503 for (i = 0; i < RX_RING_SIZE; i++) { 504 /* Allocated fixed size of skbuff */ 505 struct sk_buff *skb; 506 507 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); 508 np->rx_skbuff[i] = skb; 509 if (!skb) { 510 free_list(dev); 511 return -ENOMEM; 512 } 513 514 np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma + 515 ((i + 1) % RX_RING_SIZE) * 516 sizeof(struct netdev_desc)); 517 /* Rubicon now supports 40 bits of addressing space. */ 518 np->rx_ring[i].fraginfo = 519 cpu_to_le64(pci_map_single( 520 np->pdev, skb->data, np->rx_buf_sz, 521 PCI_DMA_FROMDEVICE)); 522 np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); 523 } 524 525 return 0; 526 } 527 528 static void rio_hw_init(struct net_device *dev) 529 { 530 struct netdev_private *np = netdev_priv(dev); 531 void __iomem *ioaddr = np->ioaddr; 532 int i; 533 u16 macctrl; 534 535 /* Reset all logic functions */ 536 dw16(ASICCtrl + 2, 537 GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset); 538 mdelay(10); 539 540 rio_set_led_mode(dev); 541 542 /* DebugCtrl bit 4, 5, 9 must set */ 543 dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230); 544 545 if (np->chip_id == CHIP_IP1000A && 546 (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) { 547 /* PHY magic taken from ipg driver, undocumented registers */ 548 mii_write(dev, np->phy_addr, 31, 0x0001); 549 mii_write(dev, np->phy_addr, 27, 0x01e0); 550 mii_write(dev, np->phy_addr, 31, 0x0002); 551 mii_write(dev, np->phy_addr, 27, 0xeb8e); 552 mii_write(dev, np->phy_addr, 31, 0x0000); 553 mii_write(dev, np->phy_addr, 30, 0x005e); 554 /* advertise 1000BASE-T half & full duplex, prefer MASTER */ 555 mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700); 556 } 557 558 if (np->phy_media) 559 mii_set_media_pcs(dev); 560 else 561 mii_set_media(dev); 562 563 /* Jumbo frame */ 564 if (np->jumbo != 0) 565 dw16(MaxFrameSize, MAX_JUMBO+14); 566 567 /* Set RFDListPtr */ 568 dw32(RFDListPtr0, np->rx_ring_dma); 569 dw32(RFDListPtr1, 0); 570 571 /* Set station address */ 572 /* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works 573 * too. However, it doesn't work on IP1000A so we use 16-bit access. 574 */ 575 for (i = 0; i < 3; i++) 576 dw16(StationAddr0 + 2 * i, 577 cpu_to_le16(((u16 *)dev->dev_addr)[i])); 578 579 set_multicast (dev); 580 if (np->coalesce) { 581 dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16); 582 } 583 /* Set RIO to poll every N*320nsec. */ 584 dw8(RxDMAPollPeriod, 0x20); 585 dw8(TxDMAPollPeriod, 0xff); 586 dw8(RxDMABurstThresh, 0x30); 587 dw8(RxDMAUrgentThresh, 0x30); 588 dw32(RmonStatMask, 0x0007ffff); 589 /* clear statistics */ 590 clear_stats (dev); 591 592 /* VLAN supported */ 593 if (np->vlan) { 594 /* priority field in RxDMAIntCtrl */ 595 dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10); 596 /* VLANId */ 597 dw16(VLANId, np->vlan); 598 /* Length/Type should be 0x8100 */ 599 dw32(VLANTag, 0x8100 << 16 | np->vlan); 600 /* Enable AutoVLANuntagging, but disable AutoVLANtagging. 601 VLAN information tagged by TFC' VID, CFI fields. */ 602 dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging); 603 } 604 605 /* Start Tx/Rx */ 606 dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable); 607 608 macctrl = 0; 609 macctrl |= (np->vlan) ? AutoVLANuntagging : 0; 610 macctrl |= (np->full_duplex) ? DuplexSelect : 0; 611 macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; 612 macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; 613 dw16(MACCtrl, macctrl); 614 } 615 616 static void rio_hw_stop(struct net_device *dev) 617 { 618 struct netdev_private *np = netdev_priv(dev); 619 void __iomem *ioaddr = np->ioaddr; 620 621 /* Disable interrupts */ 622 dw16(IntEnable, 0); 623 624 /* Stop Tx and Rx logics */ 625 dw32(MACCtrl, TxDisable | RxDisable | StatsDisable); 626 } 627 628 static int rio_open(struct net_device *dev) 629 { 630 struct netdev_private *np = netdev_priv(dev); 631 const int irq = np->pdev->irq; 632 int i; 633 634 i = alloc_list(dev); 635 if (i) 636 return i; 637 638 rio_hw_init(dev); 639 640 i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev); 641 if (i) { 642 rio_hw_stop(dev); 643 free_list(dev); 644 return i; 645 } 646 647 setup_timer(&np->timer, rio_timer, (unsigned long)dev); 648 np->timer.expires = jiffies + 1 * HZ; 649 add_timer(&np->timer); 650 651 netif_start_queue (dev); 652 653 dl2k_enable_int(np); 654 return 0; 655 } 656 657 static void 658 rio_timer (unsigned long data) 659 { 660 struct net_device *dev = (struct net_device *)data; 661 struct netdev_private *np = netdev_priv(dev); 662 unsigned int entry; 663 int next_tick = 1*HZ; 664 unsigned long flags; 665 666 spin_lock_irqsave(&np->rx_lock, flags); 667 /* Recover rx ring exhausted error */ 668 if (np->cur_rx - np->old_rx >= RX_RING_SIZE) { 669 printk(KERN_INFO "Try to recover rx ring exhausted...\n"); 670 /* Re-allocate skbuffs to fill the descriptor ring */ 671 for (; np->cur_rx - np->old_rx > 0; np->old_rx++) { 672 struct sk_buff *skb; 673 entry = np->old_rx % RX_RING_SIZE; 674 /* Dropped packets don't need to re-allocate */ 675 if (np->rx_skbuff[entry] == NULL) { 676 skb = netdev_alloc_skb_ip_align(dev, 677 np->rx_buf_sz); 678 if (skb == NULL) { 679 np->rx_ring[entry].fraginfo = 0; 680 printk (KERN_INFO 681 "%s: Still unable to re-allocate Rx skbuff.#%d\n", 682 dev->name, entry); 683 break; 684 } 685 np->rx_skbuff[entry] = skb; 686 np->rx_ring[entry].fraginfo = 687 cpu_to_le64 (pci_map_single 688 (np->pdev, skb->data, np->rx_buf_sz, 689 PCI_DMA_FROMDEVICE)); 690 } 691 np->rx_ring[entry].fraginfo |= 692 cpu_to_le64((u64)np->rx_buf_sz << 48); 693 np->rx_ring[entry].status = 0; 694 } /* end for */ 695 } /* end if */ 696 spin_unlock_irqrestore (&np->rx_lock, flags); 697 np->timer.expires = jiffies + next_tick; 698 add_timer(&np->timer); 699 } 700 701 static void 702 rio_tx_timeout (struct net_device *dev) 703 { 704 struct netdev_private *np = netdev_priv(dev); 705 void __iomem *ioaddr = np->ioaddr; 706 707 printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n", 708 dev->name, dr32(TxStatus)); 709 rio_free_tx(dev, 0); 710 dev->if_port = 0; 711 netif_trans_update(dev); /* prevent tx timeout */ 712 } 713 714 static netdev_tx_t 715 start_xmit (struct sk_buff *skb, struct net_device *dev) 716 { 717 struct netdev_private *np = netdev_priv(dev); 718 void __iomem *ioaddr = np->ioaddr; 719 struct netdev_desc *txdesc; 720 unsigned entry; 721 u64 tfc_vlan_tag = 0; 722 723 if (np->link_status == 0) { /* Link Down */ 724 dev_kfree_skb(skb); 725 return NETDEV_TX_OK; 726 } 727 entry = np->cur_tx % TX_RING_SIZE; 728 np->tx_skbuff[entry] = skb; 729 txdesc = &np->tx_ring[entry]; 730 731 #if 0 732 if (skb->ip_summed == CHECKSUM_PARTIAL) { 733 txdesc->status |= 734 cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable | 735 IPChecksumEnable); 736 } 737 #endif 738 if (np->vlan) { 739 tfc_vlan_tag = VLANTagInsert | 740 ((u64)np->vlan << 32) | 741 ((u64)skb->priority << 45); 742 } 743 txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, 744 skb->len, 745 PCI_DMA_TODEVICE)); 746 txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48); 747 748 /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode 749 * Work around: Always use 1 descriptor in 10Mbps mode */ 750 if (entry % np->tx_coalesce == 0 || np->speed == 10) 751 txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | 752 WordAlignDisable | 753 TxDMAIndicate | 754 (1 << FragCountShift)); 755 else 756 txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | 757 WordAlignDisable | 758 (1 << FragCountShift)); 759 760 /* TxDMAPollNow */ 761 dw32(DMACtrl, dr32(DMACtrl) | 0x00001000); 762 /* Schedule ISR */ 763 dw32(CountDown, 10000); 764 np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE; 765 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE 766 < TX_QUEUE_LEN - 1 && np->speed != 10) { 767 /* do nothing */ 768 } else if (!netif_queue_stopped(dev)) { 769 netif_stop_queue (dev); 770 } 771 772 /* The first TFDListPtr */ 773 if (!dr32(TFDListPtr0)) { 774 dw32(TFDListPtr0, np->tx_ring_dma + 775 entry * sizeof (struct netdev_desc)); 776 dw32(TFDListPtr1, 0); 777 } 778 779 return NETDEV_TX_OK; 780 } 781 782 static irqreturn_t 783 rio_interrupt (int irq, void *dev_instance) 784 { 785 struct net_device *dev = dev_instance; 786 struct netdev_private *np = netdev_priv(dev); 787 void __iomem *ioaddr = np->ioaddr; 788 unsigned int_status; 789 int cnt = max_intrloop; 790 int handled = 0; 791 792 while (1) { 793 int_status = dr16(IntStatus); 794 dw16(IntStatus, int_status); 795 int_status &= DEFAULT_INTR; 796 if (int_status == 0 || --cnt < 0) 797 break; 798 handled = 1; 799 /* Processing received packets */ 800 if (int_status & RxDMAComplete) 801 receive_packet (dev); 802 /* TxDMAComplete interrupt */ 803 if ((int_status & (TxDMAComplete|IntRequested))) { 804 int tx_status; 805 tx_status = dr32(TxStatus); 806 if (tx_status & 0x01) 807 tx_error (dev, tx_status); 808 /* Free used tx skbuffs */ 809 rio_free_tx (dev, 1); 810 } 811 812 /* Handle uncommon events */ 813 if (int_status & 814 (HostError | LinkEvent | UpdateStats)) 815 rio_error (dev, int_status); 816 } 817 if (np->cur_tx != np->old_tx) 818 dw32(CountDown, 100); 819 return IRQ_RETVAL(handled); 820 } 821 822 static void 823 rio_free_tx (struct net_device *dev, int irq) 824 { 825 struct netdev_private *np = netdev_priv(dev); 826 int entry = np->old_tx % TX_RING_SIZE; 827 int tx_use = 0; 828 unsigned long flag = 0; 829 830 if (irq) 831 spin_lock(&np->tx_lock); 832 else 833 spin_lock_irqsave(&np->tx_lock, flag); 834 835 /* Free used tx skbuffs */ 836 while (entry != np->cur_tx) { 837 struct sk_buff *skb; 838 839 if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone))) 840 break; 841 skb = np->tx_skbuff[entry]; 842 pci_unmap_single (np->pdev, 843 desc_to_dma(&np->tx_ring[entry]), 844 skb->len, PCI_DMA_TODEVICE); 845 if (irq) 846 dev_kfree_skb_irq (skb); 847 else 848 dev_kfree_skb (skb); 849 850 np->tx_skbuff[entry] = NULL; 851 entry = (entry + 1) % TX_RING_SIZE; 852 tx_use++; 853 } 854 if (irq) 855 spin_unlock(&np->tx_lock); 856 else 857 spin_unlock_irqrestore(&np->tx_lock, flag); 858 np->old_tx = entry; 859 860 /* If the ring is no longer full, clear tx_full and 861 call netif_wake_queue() */ 862 863 if (netif_queue_stopped(dev) && 864 ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE 865 < TX_QUEUE_LEN - 1 || np->speed == 10)) { 866 netif_wake_queue (dev); 867 } 868 } 869 870 static void 871 tx_error (struct net_device *dev, int tx_status) 872 { 873 struct netdev_private *np = netdev_priv(dev); 874 void __iomem *ioaddr = np->ioaddr; 875 int frame_id; 876 int i; 877 878 frame_id = (tx_status & 0xffff0000); 879 printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", 880 dev->name, tx_status, frame_id); 881 np->stats.tx_errors++; 882 /* Ttransmit Underrun */ 883 if (tx_status & 0x10) { 884 np->stats.tx_fifo_errors++; 885 dw16(TxStartThresh, dr16(TxStartThresh) + 0x10); 886 /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */ 887 dw16(ASICCtrl + 2, 888 TxReset | DMAReset | FIFOReset | NetworkReset); 889 /* Wait for ResetBusy bit clear */ 890 for (i = 50; i > 0; i--) { 891 if (!(dr16(ASICCtrl + 2) & ResetBusy)) 892 break; 893 mdelay (1); 894 } 895 rio_set_led_mode(dev); 896 rio_free_tx (dev, 1); 897 /* Reset TFDListPtr */ 898 dw32(TFDListPtr0, np->tx_ring_dma + 899 np->old_tx * sizeof (struct netdev_desc)); 900 dw32(TFDListPtr1, 0); 901 902 /* Let TxStartThresh stay default value */ 903 } 904 /* Late Collision */ 905 if (tx_status & 0x04) { 906 np->stats.tx_fifo_errors++; 907 /* TxReset and clear FIFO */ 908 dw16(ASICCtrl + 2, TxReset | FIFOReset); 909 /* Wait reset done */ 910 for (i = 50; i > 0; i--) { 911 if (!(dr16(ASICCtrl + 2) & ResetBusy)) 912 break; 913 mdelay (1); 914 } 915 rio_set_led_mode(dev); 916 /* Let TxStartThresh stay default value */ 917 } 918 /* Maximum Collisions */ 919 #ifdef ETHER_STATS 920 if (tx_status & 0x08) 921 np->stats.collisions16++; 922 #else 923 if (tx_status & 0x08) 924 np->stats.collisions++; 925 #endif 926 /* Restart the Tx */ 927 dw32(MACCtrl, dr16(MACCtrl) | TxEnable); 928 } 929 930 static int 931 receive_packet (struct net_device *dev) 932 { 933 struct netdev_private *np = netdev_priv(dev); 934 int entry = np->cur_rx % RX_RING_SIZE; 935 int cnt = 30; 936 937 /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */ 938 while (1) { 939 struct netdev_desc *desc = &np->rx_ring[entry]; 940 int pkt_len; 941 u64 frame_status; 942 943 if (!(desc->status & cpu_to_le64(RFDDone)) || 944 !(desc->status & cpu_to_le64(FrameStart)) || 945 !(desc->status & cpu_to_le64(FrameEnd))) 946 break; 947 948 /* Chip omits the CRC. */ 949 frame_status = le64_to_cpu(desc->status); 950 pkt_len = frame_status & 0xffff; 951 if (--cnt < 0) 952 break; 953 /* Update rx error statistics, drop packet. */ 954 if (frame_status & RFS_Errors) { 955 np->stats.rx_errors++; 956 if (frame_status & (RxRuntFrame | RxLengthError)) 957 np->stats.rx_length_errors++; 958 if (frame_status & RxFCSError) 959 np->stats.rx_crc_errors++; 960 if (frame_status & RxAlignmentError && np->speed != 1000) 961 np->stats.rx_frame_errors++; 962 if (frame_status & RxFIFOOverrun) 963 np->stats.rx_fifo_errors++; 964 } else { 965 struct sk_buff *skb; 966 967 /* Small skbuffs for short packets */ 968 if (pkt_len > copy_thresh) { 969 pci_unmap_single (np->pdev, 970 desc_to_dma(desc), 971 np->rx_buf_sz, 972 PCI_DMA_FROMDEVICE); 973 skb_put (skb = np->rx_skbuff[entry], pkt_len); 974 np->rx_skbuff[entry] = NULL; 975 } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) { 976 pci_dma_sync_single_for_cpu(np->pdev, 977 desc_to_dma(desc), 978 np->rx_buf_sz, 979 PCI_DMA_FROMDEVICE); 980 skb_copy_to_linear_data (skb, 981 np->rx_skbuff[entry]->data, 982 pkt_len); 983 skb_put (skb, pkt_len); 984 pci_dma_sync_single_for_device(np->pdev, 985 desc_to_dma(desc), 986 np->rx_buf_sz, 987 PCI_DMA_FROMDEVICE); 988 } 989 skb->protocol = eth_type_trans (skb, dev); 990 #if 0 991 /* Checksum done by hw, but csum value unavailable. */ 992 if (np->pdev->pci_rev_id >= 0x0c && 993 !(frame_status & (TCPError | UDPError | IPError))) { 994 skb->ip_summed = CHECKSUM_UNNECESSARY; 995 } 996 #endif 997 netif_rx (skb); 998 } 999 entry = (entry + 1) % RX_RING_SIZE; 1000 } 1001 spin_lock(&np->rx_lock); 1002 np->cur_rx = entry; 1003 /* Re-allocate skbuffs to fill the descriptor ring */ 1004 entry = np->old_rx; 1005 while (entry != np->cur_rx) { 1006 struct sk_buff *skb; 1007 /* Dropped packets don't need to re-allocate */ 1008 if (np->rx_skbuff[entry] == NULL) { 1009 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); 1010 if (skb == NULL) { 1011 np->rx_ring[entry].fraginfo = 0; 1012 printk (KERN_INFO 1013 "%s: receive_packet: " 1014 "Unable to re-allocate Rx skbuff.#%d\n", 1015 dev->name, entry); 1016 break; 1017 } 1018 np->rx_skbuff[entry] = skb; 1019 np->rx_ring[entry].fraginfo = 1020 cpu_to_le64 (pci_map_single 1021 (np->pdev, skb->data, np->rx_buf_sz, 1022 PCI_DMA_FROMDEVICE)); 1023 } 1024 np->rx_ring[entry].fraginfo |= 1025 cpu_to_le64((u64)np->rx_buf_sz << 48); 1026 np->rx_ring[entry].status = 0; 1027 entry = (entry + 1) % RX_RING_SIZE; 1028 } 1029 np->old_rx = entry; 1030 spin_unlock(&np->rx_lock); 1031 return 0; 1032 } 1033 1034 static void 1035 rio_error (struct net_device *dev, int int_status) 1036 { 1037 struct netdev_private *np = netdev_priv(dev); 1038 void __iomem *ioaddr = np->ioaddr; 1039 u16 macctrl; 1040 1041 /* Link change event */ 1042 if (int_status & LinkEvent) { 1043 if (mii_wait_link (dev, 10) == 0) { 1044 printk (KERN_INFO "%s: Link up\n", dev->name); 1045 if (np->phy_media) 1046 mii_get_media_pcs (dev); 1047 else 1048 mii_get_media (dev); 1049 if (np->speed == 1000) 1050 np->tx_coalesce = tx_coalesce; 1051 else 1052 np->tx_coalesce = 1; 1053 macctrl = 0; 1054 macctrl |= (np->vlan) ? AutoVLANuntagging : 0; 1055 macctrl |= (np->full_duplex) ? DuplexSelect : 0; 1056 macctrl |= (np->tx_flow) ? 1057 TxFlowControlEnable : 0; 1058 macctrl |= (np->rx_flow) ? 1059 RxFlowControlEnable : 0; 1060 dw16(MACCtrl, macctrl); 1061 np->link_status = 1; 1062 netif_carrier_on(dev); 1063 } else { 1064 printk (KERN_INFO "%s: Link off\n", dev->name); 1065 np->link_status = 0; 1066 netif_carrier_off(dev); 1067 } 1068 } 1069 1070 /* UpdateStats statistics registers */ 1071 if (int_status & UpdateStats) { 1072 get_stats (dev); 1073 } 1074 1075 /* PCI Error, a catastronphic error related to the bus interface 1076 occurs, set GlobalReset and HostReset to reset. */ 1077 if (int_status & HostError) { 1078 printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n", 1079 dev->name, int_status); 1080 dw16(ASICCtrl + 2, GlobalReset | HostReset); 1081 mdelay (500); 1082 rio_set_led_mode(dev); 1083 } 1084 } 1085 1086 static struct net_device_stats * 1087 get_stats (struct net_device *dev) 1088 { 1089 struct netdev_private *np = netdev_priv(dev); 1090 void __iomem *ioaddr = np->ioaddr; 1091 #ifdef MEM_MAPPING 1092 int i; 1093 #endif 1094 unsigned int stat_reg; 1095 1096 /* All statistics registers need to be acknowledged, 1097 else statistic overflow could cause problems */ 1098 1099 np->stats.rx_packets += dr32(FramesRcvOk); 1100 np->stats.tx_packets += dr32(FramesXmtOk); 1101 np->stats.rx_bytes += dr32(OctetRcvOk); 1102 np->stats.tx_bytes += dr32(OctetXmtOk); 1103 1104 np->stats.multicast = dr32(McstFramesRcvdOk); 1105 np->stats.collisions += dr32(SingleColFrames) 1106 + dr32(MultiColFrames); 1107 1108 /* detailed tx errors */ 1109 stat_reg = dr16(FramesAbortXSColls); 1110 np->stats.tx_aborted_errors += stat_reg; 1111 np->stats.tx_errors += stat_reg; 1112 1113 stat_reg = dr16(CarrierSenseErrors); 1114 np->stats.tx_carrier_errors += stat_reg; 1115 np->stats.tx_errors += stat_reg; 1116 1117 /* Clear all other statistic register. */ 1118 dr32(McstOctetXmtOk); 1119 dr16(BcstFramesXmtdOk); 1120 dr32(McstFramesXmtdOk); 1121 dr16(BcstFramesRcvdOk); 1122 dr16(MacControlFramesRcvd); 1123 dr16(FrameTooLongErrors); 1124 dr16(InRangeLengthErrors); 1125 dr16(FramesCheckSeqErrors); 1126 dr16(FramesLostRxErrors); 1127 dr32(McstOctetXmtOk); 1128 dr32(BcstOctetXmtOk); 1129 dr32(McstFramesXmtdOk); 1130 dr32(FramesWDeferredXmt); 1131 dr32(LateCollisions); 1132 dr16(BcstFramesXmtdOk); 1133 dr16(MacControlFramesXmtd); 1134 dr16(FramesWEXDeferal); 1135 1136 #ifdef MEM_MAPPING 1137 for (i = 0x100; i <= 0x150; i += 4) 1138 dr32(i); 1139 #endif 1140 dr16(TxJumboFrames); 1141 dr16(RxJumboFrames); 1142 dr16(TCPCheckSumErrors); 1143 dr16(UDPCheckSumErrors); 1144 dr16(IPCheckSumErrors); 1145 return &np->stats; 1146 } 1147 1148 static int 1149 clear_stats (struct net_device *dev) 1150 { 1151 struct netdev_private *np = netdev_priv(dev); 1152 void __iomem *ioaddr = np->ioaddr; 1153 #ifdef MEM_MAPPING 1154 int i; 1155 #endif 1156 1157 /* All statistics registers need to be acknowledged, 1158 else statistic overflow could cause problems */ 1159 dr32(FramesRcvOk); 1160 dr32(FramesXmtOk); 1161 dr32(OctetRcvOk); 1162 dr32(OctetXmtOk); 1163 1164 dr32(McstFramesRcvdOk); 1165 dr32(SingleColFrames); 1166 dr32(MultiColFrames); 1167 dr32(LateCollisions); 1168 /* detailed rx errors */ 1169 dr16(FrameTooLongErrors); 1170 dr16(InRangeLengthErrors); 1171 dr16(FramesCheckSeqErrors); 1172 dr16(FramesLostRxErrors); 1173 1174 /* detailed tx errors */ 1175 dr16(FramesAbortXSColls); 1176 dr16(CarrierSenseErrors); 1177 1178 /* Clear all other statistic register. */ 1179 dr32(McstOctetXmtOk); 1180 dr16(BcstFramesXmtdOk); 1181 dr32(McstFramesXmtdOk); 1182 dr16(BcstFramesRcvdOk); 1183 dr16(MacControlFramesRcvd); 1184 dr32(McstOctetXmtOk); 1185 dr32(BcstOctetXmtOk); 1186 dr32(McstFramesXmtdOk); 1187 dr32(FramesWDeferredXmt); 1188 dr16(BcstFramesXmtdOk); 1189 dr16(MacControlFramesXmtd); 1190 dr16(FramesWEXDeferal); 1191 #ifdef MEM_MAPPING 1192 for (i = 0x100; i <= 0x150; i += 4) 1193 dr32(i); 1194 #endif 1195 dr16(TxJumboFrames); 1196 dr16(RxJumboFrames); 1197 dr16(TCPCheckSumErrors); 1198 dr16(UDPCheckSumErrors); 1199 dr16(IPCheckSumErrors); 1200 return 0; 1201 } 1202 1203 static void 1204 set_multicast (struct net_device *dev) 1205 { 1206 struct netdev_private *np = netdev_priv(dev); 1207 void __iomem *ioaddr = np->ioaddr; 1208 u32 hash_table[2]; 1209 u16 rx_mode = 0; 1210 1211 hash_table[0] = hash_table[1] = 0; 1212 /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */ 1213 hash_table[1] |= 0x02000000; 1214 if (dev->flags & IFF_PROMISC) { 1215 /* Receive all frames promiscuously. */ 1216 rx_mode = ReceiveAllFrames; 1217 } else if ((dev->flags & IFF_ALLMULTI) || 1218 (netdev_mc_count(dev) > multicast_filter_limit)) { 1219 /* Receive broadcast and multicast frames */ 1220 rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast; 1221 } else if (!netdev_mc_empty(dev)) { 1222 struct netdev_hw_addr *ha; 1223 /* Receive broadcast frames and multicast frames filtering 1224 by Hashtable */ 1225 rx_mode = 1226 ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast; 1227 netdev_for_each_mc_addr(ha, dev) { 1228 int bit, index = 0; 1229 int crc = ether_crc_le(ETH_ALEN, ha->addr); 1230 /* The inverted high significant 6 bits of CRC are 1231 used as an index to hashtable */ 1232 for (bit = 0; bit < 6; bit++) 1233 if (crc & (1 << (31 - bit))) 1234 index |= (1 << bit); 1235 hash_table[index / 32] |= (1 << (index % 32)); 1236 } 1237 } else { 1238 rx_mode = ReceiveBroadcast | ReceiveUnicast; 1239 } 1240 if (np->vlan) { 1241 /* ReceiveVLANMatch field in ReceiveMode */ 1242 rx_mode |= ReceiveVLANMatch; 1243 } 1244 1245 dw32(HashTable0, hash_table[0]); 1246 dw32(HashTable1, hash_table[1]); 1247 dw16(ReceiveMode, rx_mode); 1248 } 1249 1250 static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1251 { 1252 struct netdev_private *np = netdev_priv(dev); 1253 1254 strlcpy(info->driver, "dl2k", sizeof(info->driver)); 1255 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1256 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); 1257 } 1258 1259 static int rio_get_link_ksettings(struct net_device *dev, 1260 struct ethtool_link_ksettings *cmd) 1261 { 1262 struct netdev_private *np = netdev_priv(dev); 1263 u32 supported, advertising; 1264 1265 if (np->phy_media) { 1266 /* fiber device */ 1267 supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE; 1268 advertising = ADVERTISED_Autoneg | ADVERTISED_FIBRE; 1269 cmd->base.port = PORT_FIBRE; 1270 } else { 1271 /* copper device */ 1272 supported = SUPPORTED_10baseT_Half | 1273 SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half 1274 | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | 1275 SUPPORTED_Autoneg | SUPPORTED_MII; 1276 advertising = ADVERTISED_10baseT_Half | 1277 ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | 1278 ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full | 1279 ADVERTISED_Autoneg | ADVERTISED_MII; 1280 cmd->base.port = PORT_MII; 1281 } 1282 if (np->link_status) { 1283 cmd->base.speed = np->speed; 1284 cmd->base.duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 1285 } else { 1286 cmd->base.speed = SPEED_UNKNOWN; 1287 cmd->base.duplex = DUPLEX_UNKNOWN; 1288 } 1289 if (np->an_enable) 1290 cmd->base.autoneg = AUTONEG_ENABLE; 1291 else 1292 cmd->base.autoneg = AUTONEG_DISABLE; 1293 1294 cmd->base.phy_address = np->phy_addr; 1295 1296 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 1297 supported); 1298 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 1299 advertising); 1300 1301 return 0; 1302 } 1303 1304 static int rio_set_link_ksettings(struct net_device *dev, 1305 const struct ethtool_link_ksettings *cmd) 1306 { 1307 struct netdev_private *np = netdev_priv(dev); 1308 u32 speed = cmd->base.speed; 1309 u8 duplex = cmd->base.duplex; 1310 1311 netif_carrier_off(dev); 1312 if (cmd->base.autoneg == AUTONEG_ENABLE) { 1313 if (np->an_enable) { 1314 return 0; 1315 } else { 1316 np->an_enable = 1; 1317 mii_set_media(dev); 1318 return 0; 1319 } 1320 } else { 1321 np->an_enable = 0; 1322 if (np->speed == 1000) { 1323 speed = SPEED_100; 1324 duplex = DUPLEX_FULL; 1325 printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n"); 1326 } 1327 switch (speed) { 1328 case SPEED_10: 1329 np->speed = 10; 1330 np->full_duplex = (duplex == DUPLEX_FULL); 1331 break; 1332 case SPEED_100: 1333 np->speed = 100; 1334 np->full_duplex = (duplex == DUPLEX_FULL); 1335 break; 1336 case SPEED_1000: /* not supported */ 1337 default: 1338 return -EINVAL; 1339 } 1340 mii_set_media(dev); 1341 } 1342 return 0; 1343 } 1344 1345 static u32 rio_get_link(struct net_device *dev) 1346 { 1347 struct netdev_private *np = netdev_priv(dev); 1348 return np->link_status; 1349 } 1350 1351 static const struct ethtool_ops ethtool_ops = { 1352 .get_drvinfo = rio_get_drvinfo, 1353 .get_link = rio_get_link, 1354 .get_link_ksettings = rio_get_link_ksettings, 1355 .set_link_ksettings = rio_set_link_ksettings, 1356 }; 1357 1358 static int 1359 rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) 1360 { 1361 int phy_addr; 1362 struct netdev_private *np = netdev_priv(dev); 1363 struct mii_ioctl_data *miidata = if_mii(rq); 1364 1365 phy_addr = np->phy_addr; 1366 switch (cmd) { 1367 case SIOCGMIIPHY: 1368 miidata->phy_id = phy_addr; 1369 break; 1370 case SIOCGMIIREG: 1371 miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num); 1372 break; 1373 case SIOCSMIIREG: 1374 if (!capable(CAP_NET_ADMIN)) 1375 return -EPERM; 1376 mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in); 1377 break; 1378 default: 1379 return -EOPNOTSUPP; 1380 } 1381 return 0; 1382 } 1383 1384 #define EEP_READ 0x0200 1385 #define EEP_BUSY 0x8000 1386 /* Read the EEPROM word */ 1387 /* We use I/O instruction to read/write eeprom to avoid fail on some machines */ 1388 static int read_eeprom(struct netdev_private *np, int eep_addr) 1389 { 1390 void __iomem *ioaddr = np->eeprom_addr; 1391 int i = 1000; 1392 1393 dw16(EepromCtrl, EEP_READ | (eep_addr & 0xff)); 1394 while (i-- > 0) { 1395 if (!(dr16(EepromCtrl) & EEP_BUSY)) 1396 return dr16(EepromData); 1397 } 1398 return 0; 1399 } 1400 1401 enum phy_ctrl_bits { 1402 MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04, 1403 MII_DUPLEX = 0x08, 1404 }; 1405 1406 #define mii_delay() dr8(PhyCtrl) 1407 static void 1408 mii_sendbit (struct net_device *dev, u32 data) 1409 { 1410 struct netdev_private *np = netdev_priv(dev); 1411 void __iomem *ioaddr = np->ioaddr; 1412 1413 data = ((data) ? MII_DATA1 : 0) | (dr8(PhyCtrl) & 0xf8) | MII_WRITE; 1414 dw8(PhyCtrl, data); 1415 mii_delay (); 1416 dw8(PhyCtrl, data | MII_CLK); 1417 mii_delay (); 1418 } 1419 1420 static int 1421 mii_getbit (struct net_device *dev) 1422 { 1423 struct netdev_private *np = netdev_priv(dev); 1424 void __iomem *ioaddr = np->ioaddr; 1425 u8 data; 1426 1427 data = (dr8(PhyCtrl) & 0xf8) | MII_READ; 1428 dw8(PhyCtrl, data); 1429 mii_delay (); 1430 dw8(PhyCtrl, data | MII_CLK); 1431 mii_delay (); 1432 return (dr8(PhyCtrl) >> 1) & 1; 1433 } 1434 1435 static void 1436 mii_send_bits (struct net_device *dev, u32 data, int len) 1437 { 1438 int i; 1439 1440 for (i = len - 1; i >= 0; i--) { 1441 mii_sendbit (dev, data & (1 << i)); 1442 } 1443 } 1444 1445 static int 1446 mii_read (struct net_device *dev, int phy_addr, int reg_num) 1447 { 1448 u32 cmd; 1449 int i; 1450 u32 retval = 0; 1451 1452 /* Preamble */ 1453 mii_send_bits (dev, 0xffffffff, 32); 1454 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ 1455 /* ST,OP = 0110'b for read operation */ 1456 cmd = (0x06 << 10 | phy_addr << 5 | reg_num); 1457 mii_send_bits (dev, cmd, 14); 1458 /* Turnaround */ 1459 if (mii_getbit (dev)) 1460 goto err_out; 1461 /* Read data */ 1462 for (i = 0; i < 16; i++) { 1463 retval |= mii_getbit (dev); 1464 retval <<= 1; 1465 } 1466 /* End cycle */ 1467 mii_getbit (dev); 1468 return (retval >> 1) & 0xffff; 1469 1470 err_out: 1471 return 0; 1472 } 1473 static int 1474 mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data) 1475 { 1476 u32 cmd; 1477 1478 /* Preamble */ 1479 mii_send_bits (dev, 0xffffffff, 32); 1480 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ 1481 /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */ 1482 cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data; 1483 mii_send_bits (dev, cmd, 32); 1484 /* End cycle */ 1485 mii_getbit (dev); 1486 return 0; 1487 } 1488 static int 1489 mii_wait_link (struct net_device *dev, int wait) 1490 { 1491 __u16 bmsr; 1492 int phy_addr; 1493 struct netdev_private *np; 1494 1495 np = netdev_priv(dev); 1496 phy_addr = np->phy_addr; 1497 1498 do { 1499 bmsr = mii_read (dev, phy_addr, MII_BMSR); 1500 if (bmsr & BMSR_LSTATUS) 1501 return 0; 1502 mdelay (1); 1503 } while (--wait > 0); 1504 return -1; 1505 } 1506 static int 1507 mii_get_media (struct net_device *dev) 1508 { 1509 __u16 negotiate; 1510 __u16 bmsr; 1511 __u16 mscr; 1512 __u16 mssr; 1513 int phy_addr; 1514 struct netdev_private *np; 1515 1516 np = netdev_priv(dev); 1517 phy_addr = np->phy_addr; 1518 1519 bmsr = mii_read (dev, phy_addr, MII_BMSR); 1520 if (np->an_enable) { 1521 if (!(bmsr & BMSR_ANEGCOMPLETE)) { 1522 /* Auto-Negotiation not completed */ 1523 return -1; 1524 } 1525 negotiate = mii_read (dev, phy_addr, MII_ADVERTISE) & 1526 mii_read (dev, phy_addr, MII_LPA); 1527 mscr = mii_read (dev, phy_addr, MII_CTRL1000); 1528 mssr = mii_read (dev, phy_addr, MII_STAT1000); 1529 if (mscr & ADVERTISE_1000FULL && mssr & LPA_1000FULL) { 1530 np->speed = 1000; 1531 np->full_duplex = 1; 1532 printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); 1533 } else if (mscr & ADVERTISE_1000HALF && mssr & LPA_1000HALF) { 1534 np->speed = 1000; 1535 np->full_duplex = 0; 1536 printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n"); 1537 } else if (negotiate & ADVERTISE_100FULL) { 1538 np->speed = 100; 1539 np->full_duplex = 1; 1540 printk (KERN_INFO "Auto 100 Mbps, Full duplex\n"); 1541 } else if (negotiate & ADVERTISE_100HALF) { 1542 np->speed = 100; 1543 np->full_duplex = 0; 1544 printk (KERN_INFO "Auto 100 Mbps, Half duplex\n"); 1545 } else if (negotiate & ADVERTISE_10FULL) { 1546 np->speed = 10; 1547 np->full_duplex = 1; 1548 printk (KERN_INFO "Auto 10 Mbps, Full duplex\n"); 1549 } else if (negotiate & ADVERTISE_10HALF) { 1550 np->speed = 10; 1551 np->full_duplex = 0; 1552 printk (KERN_INFO "Auto 10 Mbps, Half duplex\n"); 1553 } 1554 if (negotiate & ADVERTISE_PAUSE_CAP) { 1555 np->tx_flow &= 1; 1556 np->rx_flow &= 1; 1557 } else if (negotiate & ADVERTISE_PAUSE_ASYM) { 1558 np->tx_flow = 0; 1559 np->rx_flow &= 1; 1560 } 1561 /* else tx_flow, rx_flow = user select */ 1562 } else { 1563 __u16 bmcr = mii_read (dev, phy_addr, MII_BMCR); 1564 switch (bmcr & (BMCR_SPEED100 | BMCR_SPEED1000)) { 1565 case BMCR_SPEED1000: 1566 printk (KERN_INFO "Operating at 1000 Mbps, "); 1567 break; 1568 case BMCR_SPEED100: 1569 printk (KERN_INFO "Operating at 100 Mbps, "); 1570 break; 1571 case 0: 1572 printk (KERN_INFO "Operating at 10 Mbps, "); 1573 } 1574 if (bmcr & BMCR_FULLDPLX) { 1575 printk (KERN_CONT "Full duplex\n"); 1576 } else { 1577 printk (KERN_CONT "Half duplex\n"); 1578 } 1579 } 1580 if (np->tx_flow) 1581 printk(KERN_INFO "Enable Tx Flow Control\n"); 1582 else 1583 printk(KERN_INFO "Disable Tx Flow Control\n"); 1584 if (np->rx_flow) 1585 printk(KERN_INFO "Enable Rx Flow Control\n"); 1586 else 1587 printk(KERN_INFO "Disable Rx Flow Control\n"); 1588 1589 return 0; 1590 } 1591 1592 static int 1593 mii_set_media (struct net_device *dev) 1594 { 1595 __u16 pscr; 1596 __u16 bmcr; 1597 __u16 bmsr; 1598 __u16 anar; 1599 int phy_addr; 1600 struct netdev_private *np; 1601 np = netdev_priv(dev); 1602 phy_addr = np->phy_addr; 1603 1604 /* Does user set speed? */ 1605 if (np->an_enable) { 1606 /* Advertise capabilities */ 1607 bmsr = mii_read (dev, phy_addr, MII_BMSR); 1608 anar = mii_read (dev, phy_addr, MII_ADVERTISE) & 1609 ~(ADVERTISE_100FULL | ADVERTISE_10FULL | 1610 ADVERTISE_100HALF | ADVERTISE_10HALF | 1611 ADVERTISE_100BASE4); 1612 if (bmsr & BMSR_100FULL) 1613 anar |= ADVERTISE_100FULL; 1614 if (bmsr & BMSR_100HALF) 1615 anar |= ADVERTISE_100HALF; 1616 if (bmsr & BMSR_100BASE4) 1617 anar |= ADVERTISE_100BASE4; 1618 if (bmsr & BMSR_10FULL) 1619 anar |= ADVERTISE_10FULL; 1620 if (bmsr & BMSR_10HALF) 1621 anar |= ADVERTISE_10HALF; 1622 anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 1623 mii_write (dev, phy_addr, MII_ADVERTISE, anar); 1624 1625 /* Enable Auto crossover */ 1626 pscr = mii_read (dev, phy_addr, MII_PHY_SCR); 1627 pscr |= 3 << 5; /* 11'b */ 1628 mii_write (dev, phy_addr, MII_PHY_SCR, pscr); 1629 1630 /* Soft reset PHY */ 1631 mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET); 1632 bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET; 1633 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1634 mdelay(1); 1635 } else { 1636 /* Force speed setting */ 1637 /* 1) Disable Auto crossover */ 1638 pscr = mii_read (dev, phy_addr, MII_PHY_SCR); 1639 pscr &= ~(3 << 5); 1640 mii_write (dev, phy_addr, MII_PHY_SCR, pscr); 1641 1642 /* 2) PHY Reset */ 1643 bmcr = mii_read (dev, phy_addr, MII_BMCR); 1644 bmcr |= BMCR_RESET; 1645 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1646 1647 /* 3) Power Down */ 1648 bmcr = 0x1940; /* must be 0x1940 */ 1649 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1650 mdelay (100); /* wait a certain time */ 1651 1652 /* 4) Advertise nothing */ 1653 mii_write (dev, phy_addr, MII_ADVERTISE, 0); 1654 1655 /* 5) Set media and Power Up */ 1656 bmcr = BMCR_PDOWN; 1657 if (np->speed == 100) { 1658 bmcr |= BMCR_SPEED100; 1659 printk (KERN_INFO "Manual 100 Mbps, "); 1660 } else if (np->speed == 10) { 1661 printk (KERN_INFO "Manual 10 Mbps, "); 1662 } 1663 if (np->full_duplex) { 1664 bmcr |= BMCR_FULLDPLX; 1665 printk (KERN_CONT "Full duplex\n"); 1666 } else { 1667 printk (KERN_CONT "Half duplex\n"); 1668 } 1669 #if 0 1670 /* Set 1000BaseT Master/Slave setting */ 1671 mscr = mii_read (dev, phy_addr, MII_CTRL1000); 1672 mscr |= MII_MSCR_CFG_ENABLE; 1673 mscr &= ~MII_MSCR_CFG_VALUE = 0; 1674 #endif 1675 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1676 mdelay(10); 1677 } 1678 return 0; 1679 } 1680 1681 static int 1682 mii_get_media_pcs (struct net_device *dev) 1683 { 1684 __u16 negotiate; 1685 __u16 bmsr; 1686 int phy_addr; 1687 struct netdev_private *np; 1688 1689 np = netdev_priv(dev); 1690 phy_addr = np->phy_addr; 1691 1692 bmsr = mii_read (dev, phy_addr, PCS_BMSR); 1693 if (np->an_enable) { 1694 if (!(bmsr & BMSR_ANEGCOMPLETE)) { 1695 /* Auto-Negotiation not completed */ 1696 return -1; 1697 } 1698 negotiate = mii_read (dev, phy_addr, PCS_ANAR) & 1699 mii_read (dev, phy_addr, PCS_ANLPAR); 1700 np->speed = 1000; 1701 if (negotiate & PCS_ANAR_FULL_DUPLEX) { 1702 printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); 1703 np->full_duplex = 1; 1704 } else { 1705 printk (KERN_INFO "Auto 1000 Mbps, half duplex\n"); 1706 np->full_duplex = 0; 1707 } 1708 if (negotiate & PCS_ANAR_PAUSE) { 1709 np->tx_flow &= 1; 1710 np->rx_flow &= 1; 1711 } else if (negotiate & PCS_ANAR_ASYMMETRIC) { 1712 np->tx_flow = 0; 1713 np->rx_flow &= 1; 1714 } 1715 /* else tx_flow, rx_flow = user select */ 1716 } else { 1717 __u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR); 1718 printk (KERN_INFO "Operating at 1000 Mbps, "); 1719 if (bmcr & BMCR_FULLDPLX) { 1720 printk (KERN_CONT "Full duplex\n"); 1721 } else { 1722 printk (KERN_CONT "Half duplex\n"); 1723 } 1724 } 1725 if (np->tx_flow) 1726 printk(KERN_INFO "Enable Tx Flow Control\n"); 1727 else 1728 printk(KERN_INFO "Disable Tx Flow Control\n"); 1729 if (np->rx_flow) 1730 printk(KERN_INFO "Enable Rx Flow Control\n"); 1731 else 1732 printk(KERN_INFO "Disable Rx Flow Control\n"); 1733 1734 return 0; 1735 } 1736 1737 static int 1738 mii_set_media_pcs (struct net_device *dev) 1739 { 1740 __u16 bmcr; 1741 __u16 esr; 1742 __u16 anar; 1743 int phy_addr; 1744 struct netdev_private *np; 1745 np = netdev_priv(dev); 1746 phy_addr = np->phy_addr; 1747 1748 /* Auto-Negotiation? */ 1749 if (np->an_enable) { 1750 /* Advertise capabilities */ 1751 esr = mii_read (dev, phy_addr, PCS_ESR); 1752 anar = mii_read (dev, phy_addr, MII_ADVERTISE) & 1753 ~PCS_ANAR_HALF_DUPLEX & 1754 ~PCS_ANAR_FULL_DUPLEX; 1755 if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD)) 1756 anar |= PCS_ANAR_HALF_DUPLEX; 1757 if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD)) 1758 anar |= PCS_ANAR_FULL_DUPLEX; 1759 anar |= PCS_ANAR_PAUSE | PCS_ANAR_ASYMMETRIC; 1760 mii_write (dev, phy_addr, MII_ADVERTISE, anar); 1761 1762 /* Soft reset PHY */ 1763 mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET); 1764 bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET; 1765 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1766 mdelay(1); 1767 } else { 1768 /* Force speed setting */ 1769 /* PHY Reset */ 1770 bmcr = BMCR_RESET; 1771 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1772 mdelay(10); 1773 if (np->full_duplex) { 1774 bmcr = BMCR_FULLDPLX; 1775 printk (KERN_INFO "Manual full duplex\n"); 1776 } else { 1777 bmcr = 0; 1778 printk (KERN_INFO "Manual half duplex\n"); 1779 } 1780 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1781 mdelay(10); 1782 1783 /* Advertise nothing */ 1784 mii_write (dev, phy_addr, MII_ADVERTISE, 0); 1785 } 1786 return 0; 1787 } 1788 1789 1790 static int 1791 rio_close (struct net_device *dev) 1792 { 1793 struct netdev_private *np = netdev_priv(dev); 1794 struct pci_dev *pdev = np->pdev; 1795 1796 netif_stop_queue (dev); 1797 1798 rio_hw_stop(dev); 1799 1800 free_irq(pdev->irq, dev); 1801 del_timer_sync (&np->timer); 1802 1803 free_list(dev); 1804 1805 return 0; 1806 } 1807 1808 static void 1809 rio_remove1 (struct pci_dev *pdev) 1810 { 1811 struct net_device *dev = pci_get_drvdata (pdev); 1812 1813 if (dev) { 1814 struct netdev_private *np = netdev_priv(dev); 1815 1816 unregister_netdev (dev); 1817 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, 1818 np->rx_ring_dma); 1819 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, 1820 np->tx_ring_dma); 1821 #ifdef MEM_MAPPING 1822 pci_iounmap(pdev, np->ioaddr); 1823 #endif 1824 pci_iounmap(pdev, np->eeprom_addr); 1825 free_netdev (dev); 1826 pci_release_regions (pdev); 1827 pci_disable_device (pdev); 1828 } 1829 } 1830 1831 #ifdef CONFIG_PM_SLEEP 1832 static int rio_suspend(struct device *device) 1833 { 1834 struct net_device *dev = dev_get_drvdata(device); 1835 struct netdev_private *np = netdev_priv(dev); 1836 1837 if (!netif_running(dev)) 1838 return 0; 1839 1840 netif_device_detach(dev); 1841 del_timer_sync(&np->timer); 1842 rio_hw_stop(dev); 1843 1844 return 0; 1845 } 1846 1847 static int rio_resume(struct device *device) 1848 { 1849 struct net_device *dev = dev_get_drvdata(device); 1850 struct netdev_private *np = netdev_priv(dev); 1851 1852 if (!netif_running(dev)) 1853 return 0; 1854 1855 rio_reset_ring(np); 1856 rio_hw_init(dev); 1857 np->timer.expires = jiffies + 1 * HZ; 1858 add_timer(&np->timer); 1859 netif_device_attach(dev); 1860 dl2k_enable_int(np); 1861 1862 return 0; 1863 } 1864 1865 static SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume); 1866 #define RIO_PM_OPS (&rio_pm_ops) 1867 1868 #else 1869 1870 #define RIO_PM_OPS NULL 1871 1872 #endif /* CONFIG_PM_SLEEP */ 1873 1874 static struct pci_driver rio_driver = { 1875 .name = "dl2k", 1876 .id_table = rio_pci_tbl, 1877 .probe = rio_probe1, 1878 .remove = rio_remove1, 1879 .driver.pm = RIO_PM_OPS, 1880 }; 1881 1882 module_pci_driver(rio_driver); 1883 /* 1884 1885 Compile command: 1886 1887 gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c 1888 1889 Read Documentation/networking/dl2k.txt for details. 1890 1891 */ 1892 1893