1 /* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */ 2 /* 3 Copyright (c) 2001, 2002 by D-Link Corporation 4 Written by Edward Peng.<edward_peng@dlink.com.tw> 5 Created 03-May-2001, base on Linux' sundance.c. 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 2 of the License, or 10 (at your option) any later version. 11 */ 12 13 #define DRV_NAME "DL2000/TC902x-based linux driver" 14 #define DRV_VERSION "v1.19" 15 #define DRV_RELDATE "2007/08/12" 16 #include "dl2k.h" 17 #include <linux/dma-mapping.h> 18 19 #define dw32(reg, val) iowrite32(val, ioaddr + (reg)) 20 #define dw16(reg, val) iowrite16(val, ioaddr + (reg)) 21 #define dw8(reg, val) iowrite8(val, ioaddr + (reg)) 22 #define dr32(reg) ioread32(ioaddr + (reg)) 23 #define dr16(reg) ioread16(ioaddr + (reg)) 24 #define dr8(reg) ioread8(ioaddr + (reg)) 25 26 static char version[] = 27 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; 28 #define MAX_UNITS 8 29 static int mtu[MAX_UNITS]; 30 static int vlan[MAX_UNITS]; 31 static int jumbo[MAX_UNITS]; 32 static char *media[MAX_UNITS]; 33 static int tx_flow=-1; 34 static int rx_flow=-1; 35 static int copy_thresh; 36 static int rx_coalesce=10; /* Rx frame count each interrupt */ 37 static int rx_timeout=200; /* Rx DMA wait time in 640ns increments */ 38 static int tx_coalesce=16; /* HW xmit count each TxDMAComplete */ 39 40 41 MODULE_AUTHOR ("Edward Peng"); 42 MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter"); 43 MODULE_LICENSE("GPL"); 44 module_param_array(mtu, int, NULL, 0); 45 module_param_array(media, charp, NULL, 0); 46 module_param_array(vlan, int, NULL, 0); 47 module_param_array(jumbo, int, NULL, 0); 48 module_param(tx_flow, int, 0); 49 module_param(rx_flow, int, 0); 50 module_param(copy_thresh, int, 0); 51 module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */ 52 module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */ 53 module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */ 54 55 56 /* Enable the default interrupts */ 57 #define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \ 58 UpdateStats | LinkEvent) 59 60 static void dl2k_enable_int(struct netdev_private *np) 61 { 62 void __iomem *ioaddr = np->ioaddr; 63 64 dw16(IntEnable, DEFAULT_INTR); 65 } 66 67 static const int max_intrloop = 50; 68 static const int multicast_filter_limit = 0x40; 69 70 static int rio_open (struct net_device *dev); 71 static void rio_timer (unsigned long data); 72 static void rio_tx_timeout (struct net_device *dev); 73 static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev); 74 static irqreturn_t rio_interrupt (int irq, void *dev_instance); 75 static void rio_free_tx (struct net_device *dev, int irq); 76 static void tx_error (struct net_device *dev, int tx_status); 77 static int receive_packet (struct net_device *dev); 78 static void rio_error (struct net_device *dev, int int_status); 79 static int change_mtu (struct net_device *dev, int new_mtu); 80 static void set_multicast (struct net_device *dev); 81 static struct net_device_stats *get_stats (struct net_device *dev); 82 static int clear_stats (struct net_device *dev); 83 static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); 84 static int rio_close (struct net_device *dev); 85 static int find_miiphy (struct net_device *dev); 86 static int parse_eeprom (struct net_device *dev); 87 static int read_eeprom (struct netdev_private *, int eep_addr); 88 static int mii_wait_link (struct net_device *dev, int wait); 89 static int mii_set_media (struct net_device *dev); 90 static int mii_get_media (struct net_device *dev); 91 static int mii_set_media_pcs (struct net_device *dev); 92 static int mii_get_media_pcs (struct net_device *dev); 93 static int mii_read (struct net_device *dev, int phy_addr, int reg_num); 94 static int mii_write (struct net_device *dev, int phy_addr, int reg_num, 95 u16 data); 96 97 static const struct ethtool_ops ethtool_ops; 98 99 static const struct net_device_ops netdev_ops = { 100 .ndo_open = rio_open, 101 .ndo_start_xmit = start_xmit, 102 .ndo_stop = rio_close, 103 .ndo_get_stats = get_stats, 104 .ndo_validate_addr = eth_validate_addr, 105 .ndo_set_mac_address = eth_mac_addr, 106 .ndo_set_rx_mode = set_multicast, 107 .ndo_do_ioctl = rio_ioctl, 108 .ndo_tx_timeout = rio_tx_timeout, 109 .ndo_change_mtu = change_mtu, 110 }; 111 112 static int 113 rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) 114 { 115 struct net_device *dev; 116 struct netdev_private *np; 117 static int card_idx; 118 int chip_idx = ent->driver_data; 119 int err, irq; 120 void __iomem *ioaddr; 121 static int version_printed; 122 void *ring_space; 123 dma_addr_t ring_dma; 124 125 if (!version_printed++) 126 printk ("%s", version); 127 128 err = pci_enable_device (pdev); 129 if (err) 130 return err; 131 132 irq = pdev->irq; 133 err = pci_request_regions (pdev, "dl2k"); 134 if (err) 135 goto err_out_disable; 136 137 pci_set_master (pdev); 138 139 err = -ENOMEM; 140 141 dev = alloc_etherdev (sizeof (*np)); 142 if (!dev) 143 goto err_out_res; 144 SET_NETDEV_DEV(dev, &pdev->dev); 145 146 np = netdev_priv(dev); 147 148 /* IO registers range. */ 149 ioaddr = pci_iomap(pdev, 0, 0); 150 if (!ioaddr) 151 goto err_out_dev; 152 np->eeprom_addr = ioaddr; 153 154 #ifdef MEM_MAPPING 155 /* MM registers range. */ 156 ioaddr = pci_iomap(pdev, 1, 0); 157 if (!ioaddr) 158 goto err_out_iounmap; 159 #endif 160 np->ioaddr = ioaddr; 161 np->chip_id = chip_idx; 162 np->pdev = pdev; 163 spin_lock_init (&np->tx_lock); 164 spin_lock_init (&np->rx_lock); 165 166 /* Parse manual configuration */ 167 np->an_enable = 1; 168 np->tx_coalesce = 1; 169 if (card_idx < MAX_UNITS) { 170 if (media[card_idx] != NULL) { 171 np->an_enable = 0; 172 if (strcmp (media[card_idx], "auto") == 0 || 173 strcmp (media[card_idx], "autosense") == 0 || 174 strcmp (media[card_idx], "0") == 0 ) { 175 np->an_enable = 2; 176 } else if (strcmp (media[card_idx], "100mbps_fd") == 0 || 177 strcmp (media[card_idx], "4") == 0) { 178 np->speed = 100; 179 np->full_duplex = 1; 180 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || 181 strcmp (media[card_idx], "3") == 0) { 182 np->speed = 100; 183 np->full_duplex = 0; 184 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || 185 strcmp (media[card_idx], "2") == 0) { 186 np->speed = 10; 187 np->full_duplex = 1; 188 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || 189 strcmp (media[card_idx], "1") == 0) { 190 np->speed = 10; 191 np->full_duplex = 0; 192 } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 || 193 strcmp (media[card_idx], "6") == 0) { 194 np->speed=1000; 195 np->full_duplex=1; 196 } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 || 197 strcmp (media[card_idx], "5") == 0) { 198 np->speed = 1000; 199 np->full_duplex = 0; 200 } else { 201 np->an_enable = 1; 202 } 203 } 204 if (jumbo[card_idx] != 0) { 205 np->jumbo = 1; 206 dev->mtu = MAX_JUMBO; 207 } else { 208 np->jumbo = 0; 209 if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE) 210 dev->mtu = mtu[card_idx]; 211 } 212 np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ? 213 vlan[card_idx] : 0; 214 if (rx_coalesce > 0 && rx_timeout > 0) { 215 np->rx_coalesce = rx_coalesce; 216 np->rx_timeout = rx_timeout; 217 np->coalesce = 1; 218 } 219 np->tx_flow = (tx_flow == 0) ? 0 : 1; 220 np->rx_flow = (rx_flow == 0) ? 0 : 1; 221 222 if (tx_coalesce < 1) 223 tx_coalesce = 1; 224 else if (tx_coalesce > TX_RING_SIZE-1) 225 tx_coalesce = TX_RING_SIZE - 1; 226 } 227 dev->netdev_ops = &netdev_ops; 228 dev->watchdog_timeo = TX_TIMEOUT; 229 dev->ethtool_ops = ðtool_ops; 230 #if 0 231 dev->features = NETIF_F_IP_CSUM; 232 #endif 233 pci_set_drvdata (pdev, dev); 234 235 ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma); 236 if (!ring_space) 237 goto err_out_iounmap; 238 np->tx_ring = ring_space; 239 np->tx_ring_dma = ring_dma; 240 241 ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma); 242 if (!ring_space) 243 goto err_out_unmap_tx; 244 np->rx_ring = ring_space; 245 np->rx_ring_dma = ring_dma; 246 247 /* Parse eeprom data */ 248 parse_eeprom (dev); 249 250 /* Find PHY address */ 251 err = find_miiphy (dev); 252 if (err) 253 goto err_out_unmap_rx; 254 255 /* Fiber device? */ 256 np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0; 257 np->link_status = 0; 258 /* Set media and reset PHY */ 259 if (np->phy_media) { 260 /* default Auto-Negotiation for fiber deivices */ 261 if (np->an_enable == 2) { 262 np->an_enable = 1; 263 } 264 } else { 265 /* Auto-Negotiation is mandatory for 1000BASE-T, 266 IEEE 802.3ab Annex 28D page 14 */ 267 if (np->speed == 1000) 268 np->an_enable = 1; 269 } 270 271 err = register_netdev (dev); 272 if (err) 273 goto err_out_unmap_rx; 274 275 card_idx++; 276 277 printk (KERN_INFO "%s: %s, %pM, IRQ %d\n", 278 dev->name, np->name, dev->dev_addr, irq); 279 if (tx_coalesce > 1) 280 printk(KERN_INFO "tx_coalesce:\t%d packets\n", 281 tx_coalesce); 282 if (np->coalesce) 283 printk(KERN_INFO 284 "rx_coalesce:\t%d packets\n" 285 "rx_timeout: \t%d ns\n", 286 np->rx_coalesce, np->rx_timeout*640); 287 if (np->vlan) 288 printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); 289 return 0; 290 291 err_out_unmap_rx: 292 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); 293 err_out_unmap_tx: 294 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); 295 err_out_iounmap: 296 #ifdef MEM_MAPPING 297 pci_iounmap(pdev, np->ioaddr); 298 #endif 299 pci_iounmap(pdev, np->eeprom_addr); 300 err_out_dev: 301 free_netdev (dev); 302 err_out_res: 303 pci_release_regions (pdev); 304 err_out_disable: 305 pci_disable_device (pdev); 306 return err; 307 } 308 309 static int 310 find_miiphy (struct net_device *dev) 311 { 312 struct netdev_private *np = netdev_priv(dev); 313 int i, phy_found = 0; 314 np = netdev_priv(dev); 315 np->phy_addr = 1; 316 317 for (i = 31; i >= 0; i--) { 318 int mii_status = mii_read (dev, i, 1); 319 if (mii_status != 0xffff && mii_status != 0x0000) { 320 np->phy_addr = i; 321 phy_found++; 322 } 323 } 324 if (!phy_found) { 325 printk (KERN_ERR "%s: No MII PHY found!\n", dev->name); 326 return -ENODEV; 327 } 328 return 0; 329 } 330 331 static int 332 parse_eeprom (struct net_device *dev) 333 { 334 struct netdev_private *np = netdev_priv(dev); 335 void __iomem *ioaddr = np->ioaddr; 336 int i, j; 337 u8 sromdata[256]; 338 u8 *psib; 339 u32 crc; 340 PSROM_t psrom = (PSROM_t) sromdata; 341 342 int cid, next; 343 344 for (i = 0; i < 128; i++) 345 ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i)); 346 347 if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */ 348 /* Check CRC */ 349 crc = ~ether_crc_le (256 - 4, sromdata); 350 if (psrom->crc != cpu_to_le32(crc)) { 351 printk (KERN_ERR "%s: EEPROM data CRC error.\n", 352 dev->name); 353 return -1; 354 } 355 } 356 357 /* Set MAC address */ 358 for (i = 0; i < 6; i++) 359 dev->dev_addr[i] = psrom->mac_addr[i]; 360 361 if (np->chip_id == CHIP_IP1000A) { 362 np->led_mode = psrom->led_mode; 363 return 0; 364 } 365 366 if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) { 367 return 0; 368 } 369 370 /* Parse Software Information Block */ 371 i = 0x30; 372 psib = (u8 *) sromdata; 373 do { 374 cid = psib[i++]; 375 next = psib[i++]; 376 if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) { 377 printk (KERN_ERR "Cell data error\n"); 378 return -1; 379 } 380 switch (cid) { 381 case 0: /* Format version */ 382 break; 383 case 1: /* End of cell */ 384 return 0; 385 case 2: /* Duplex Polarity */ 386 np->duplex_polarity = psib[i]; 387 dw8(PhyCtrl, dr8(PhyCtrl) | psib[i]); 388 break; 389 case 3: /* Wake Polarity */ 390 np->wake_polarity = psib[i]; 391 break; 392 case 9: /* Adapter description */ 393 j = (next - i > 255) ? 255 : next - i; 394 memcpy (np->name, &(psib[i]), j); 395 break; 396 case 4: 397 case 5: 398 case 6: 399 case 7: 400 case 8: /* Reversed */ 401 break; 402 default: /* Unknown cell */ 403 return -1; 404 } 405 i = next; 406 } while (1); 407 408 return 0; 409 } 410 411 static void rio_set_led_mode(struct net_device *dev) 412 { 413 struct netdev_private *np = netdev_priv(dev); 414 void __iomem *ioaddr = np->ioaddr; 415 u32 mode; 416 417 if (np->chip_id != CHIP_IP1000A) 418 return; 419 420 mode = dr32(ASICCtrl); 421 mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED); 422 423 if (np->led_mode & 0x01) 424 mode |= IPG_AC_LED_MODE; 425 if (np->led_mode & 0x02) 426 mode |= IPG_AC_LED_MODE_BIT_1; 427 if (np->led_mode & 0x08) 428 mode |= IPG_AC_LED_SPEED; 429 430 dw32(ASICCtrl, mode); 431 } 432 433 static inline dma_addr_t desc_to_dma(struct netdev_desc *desc) 434 { 435 return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48); 436 } 437 438 static void free_list(struct net_device *dev) 439 { 440 struct netdev_private *np = netdev_priv(dev); 441 struct sk_buff *skb; 442 int i; 443 444 /* Free all the skbuffs in the queue. */ 445 for (i = 0; i < RX_RING_SIZE; i++) { 446 skb = np->rx_skbuff[i]; 447 if (skb) { 448 pci_unmap_single(np->pdev, desc_to_dma(&np->rx_ring[i]), 449 skb->len, PCI_DMA_FROMDEVICE); 450 dev_kfree_skb(skb); 451 np->rx_skbuff[i] = NULL; 452 } 453 np->rx_ring[i].status = 0; 454 np->rx_ring[i].fraginfo = 0; 455 } 456 for (i = 0; i < TX_RING_SIZE; i++) { 457 skb = np->tx_skbuff[i]; 458 if (skb) { 459 pci_unmap_single(np->pdev, desc_to_dma(&np->tx_ring[i]), 460 skb->len, PCI_DMA_TODEVICE); 461 dev_kfree_skb(skb); 462 np->tx_skbuff[i] = NULL; 463 } 464 } 465 } 466 467 static void rio_reset_ring(struct netdev_private *np) 468 { 469 int i; 470 471 np->cur_rx = 0; 472 np->cur_tx = 0; 473 np->old_rx = 0; 474 np->old_tx = 0; 475 476 for (i = 0; i < TX_RING_SIZE; i++) 477 np->tx_ring[i].status = cpu_to_le64(TFDDone); 478 479 for (i = 0; i < RX_RING_SIZE; i++) 480 np->rx_ring[i].status = 0; 481 } 482 483 /* allocate and initialize Tx and Rx descriptors */ 484 static int alloc_list(struct net_device *dev) 485 { 486 struct netdev_private *np = netdev_priv(dev); 487 int i; 488 489 rio_reset_ring(np); 490 np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32); 491 492 /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */ 493 for (i = 0; i < TX_RING_SIZE; i++) { 494 np->tx_skbuff[i] = NULL; 495 np->tx_ring[i].next_desc = cpu_to_le64(np->tx_ring_dma + 496 ((i + 1) % TX_RING_SIZE) * 497 sizeof(struct netdev_desc)); 498 } 499 500 /* Initialize Rx descriptors & allocate buffers */ 501 for (i = 0; i < RX_RING_SIZE; i++) { 502 /* Allocated fixed size of skbuff */ 503 struct sk_buff *skb; 504 505 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); 506 np->rx_skbuff[i] = skb; 507 if (!skb) { 508 free_list(dev); 509 return -ENOMEM; 510 } 511 512 np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma + 513 ((i + 1) % RX_RING_SIZE) * 514 sizeof(struct netdev_desc)); 515 /* Rubicon now supports 40 bits of addressing space. */ 516 np->rx_ring[i].fraginfo = 517 cpu_to_le64(pci_map_single( 518 np->pdev, skb->data, np->rx_buf_sz, 519 PCI_DMA_FROMDEVICE)); 520 np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); 521 } 522 523 return 0; 524 } 525 526 static void rio_hw_init(struct net_device *dev) 527 { 528 struct netdev_private *np = netdev_priv(dev); 529 void __iomem *ioaddr = np->ioaddr; 530 int i; 531 u16 macctrl; 532 533 /* Reset all logic functions */ 534 dw16(ASICCtrl + 2, 535 GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset); 536 mdelay(10); 537 538 rio_set_led_mode(dev); 539 540 /* DebugCtrl bit 4, 5, 9 must set */ 541 dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230); 542 543 if (np->chip_id == CHIP_IP1000A && 544 (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) { 545 /* PHY magic taken from ipg driver, undocumented registers */ 546 mii_write(dev, np->phy_addr, 31, 0x0001); 547 mii_write(dev, np->phy_addr, 27, 0x01e0); 548 mii_write(dev, np->phy_addr, 31, 0x0002); 549 mii_write(dev, np->phy_addr, 27, 0xeb8e); 550 mii_write(dev, np->phy_addr, 31, 0x0000); 551 mii_write(dev, np->phy_addr, 30, 0x005e); 552 /* advertise 1000BASE-T half & full duplex, prefer MASTER */ 553 mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700); 554 } 555 556 if (np->phy_media) 557 mii_set_media_pcs(dev); 558 else 559 mii_set_media(dev); 560 561 /* Jumbo frame */ 562 if (np->jumbo != 0) 563 dw16(MaxFrameSize, MAX_JUMBO+14); 564 565 /* Set RFDListPtr */ 566 dw32(RFDListPtr0, np->rx_ring_dma); 567 dw32(RFDListPtr1, 0); 568 569 /* Set station address */ 570 /* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works 571 * too. However, it doesn't work on IP1000A so we use 16-bit access. 572 */ 573 for (i = 0; i < 3; i++) 574 dw16(StationAddr0 + 2 * i, 575 cpu_to_le16(((u16 *)dev->dev_addr)[i])); 576 577 set_multicast (dev); 578 if (np->coalesce) { 579 dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16); 580 } 581 /* Set RIO to poll every N*320nsec. */ 582 dw8(RxDMAPollPeriod, 0x20); 583 dw8(TxDMAPollPeriod, 0xff); 584 dw8(RxDMABurstThresh, 0x30); 585 dw8(RxDMAUrgentThresh, 0x30); 586 dw32(RmonStatMask, 0x0007ffff); 587 /* clear statistics */ 588 clear_stats (dev); 589 590 /* VLAN supported */ 591 if (np->vlan) { 592 /* priority field in RxDMAIntCtrl */ 593 dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10); 594 /* VLANId */ 595 dw16(VLANId, np->vlan); 596 /* Length/Type should be 0x8100 */ 597 dw32(VLANTag, 0x8100 << 16 | np->vlan); 598 /* Enable AutoVLANuntagging, but disable AutoVLANtagging. 599 VLAN information tagged by TFC' VID, CFI fields. */ 600 dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging); 601 } 602 603 /* Start Tx/Rx */ 604 dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable); 605 606 macctrl = 0; 607 macctrl |= (np->vlan) ? AutoVLANuntagging : 0; 608 macctrl |= (np->full_duplex) ? DuplexSelect : 0; 609 macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; 610 macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; 611 dw16(MACCtrl, macctrl); 612 } 613 614 static void rio_hw_stop(struct net_device *dev) 615 { 616 struct netdev_private *np = netdev_priv(dev); 617 void __iomem *ioaddr = np->ioaddr; 618 619 /* Disable interrupts */ 620 dw16(IntEnable, 0); 621 622 /* Stop Tx and Rx logics */ 623 dw32(MACCtrl, TxDisable | RxDisable | StatsDisable); 624 } 625 626 static int rio_open(struct net_device *dev) 627 { 628 struct netdev_private *np = netdev_priv(dev); 629 const int irq = np->pdev->irq; 630 int i; 631 632 i = alloc_list(dev); 633 if (i) 634 return i; 635 636 rio_hw_init(dev); 637 638 i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev); 639 if (i) { 640 rio_hw_stop(dev); 641 free_list(dev); 642 return i; 643 } 644 645 setup_timer(&np->timer, rio_timer, (unsigned long)dev); 646 np->timer.expires = jiffies + 1 * HZ; 647 add_timer(&np->timer); 648 649 netif_start_queue (dev); 650 651 dl2k_enable_int(np); 652 return 0; 653 } 654 655 static void 656 rio_timer (unsigned long data) 657 { 658 struct net_device *dev = (struct net_device *)data; 659 struct netdev_private *np = netdev_priv(dev); 660 unsigned int entry; 661 int next_tick = 1*HZ; 662 unsigned long flags; 663 664 spin_lock_irqsave(&np->rx_lock, flags); 665 /* Recover rx ring exhausted error */ 666 if (np->cur_rx - np->old_rx >= RX_RING_SIZE) { 667 printk(KERN_INFO "Try to recover rx ring exhausted...\n"); 668 /* Re-allocate skbuffs to fill the descriptor ring */ 669 for (; np->cur_rx - np->old_rx > 0; np->old_rx++) { 670 struct sk_buff *skb; 671 entry = np->old_rx % RX_RING_SIZE; 672 /* Dropped packets don't need to re-allocate */ 673 if (np->rx_skbuff[entry] == NULL) { 674 skb = netdev_alloc_skb_ip_align(dev, 675 np->rx_buf_sz); 676 if (skb == NULL) { 677 np->rx_ring[entry].fraginfo = 0; 678 printk (KERN_INFO 679 "%s: Still unable to re-allocate Rx skbuff.#%d\n", 680 dev->name, entry); 681 break; 682 } 683 np->rx_skbuff[entry] = skb; 684 np->rx_ring[entry].fraginfo = 685 cpu_to_le64 (pci_map_single 686 (np->pdev, skb->data, np->rx_buf_sz, 687 PCI_DMA_FROMDEVICE)); 688 } 689 np->rx_ring[entry].fraginfo |= 690 cpu_to_le64((u64)np->rx_buf_sz << 48); 691 np->rx_ring[entry].status = 0; 692 } /* end for */ 693 } /* end if */ 694 spin_unlock_irqrestore (&np->rx_lock, flags); 695 np->timer.expires = jiffies + next_tick; 696 add_timer(&np->timer); 697 } 698 699 static void 700 rio_tx_timeout (struct net_device *dev) 701 { 702 struct netdev_private *np = netdev_priv(dev); 703 void __iomem *ioaddr = np->ioaddr; 704 705 printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n", 706 dev->name, dr32(TxStatus)); 707 rio_free_tx(dev, 0); 708 dev->if_port = 0; 709 netif_trans_update(dev); /* prevent tx timeout */ 710 } 711 712 static netdev_tx_t 713 start_xmit (struct sk_buff *skb, struct net_device *dev) 714 { 715 struct netdev_private *np = netdev_priv(dev); 716 void __iomem *ioaddr = np->ioaddr; 717 struct netdev_desc *txdesc; 718 unsigned entry; 719 u64 tfc_vlan_tag = 0; 720 721 if (np->link_status == 0) { /* Link Down */ 722 dev_kfree_skb(skb); 723 return NETDEV_TX_OK; 724 } 725 entry = np->cur_tx % TX_RING_SIZE; 726 np->tx_skbuff[entry] = skb; 727 txdesc = &np->tx_ring[entry]; 728 729 #if 0 730 if (skb->ip_summed == CHECKSUM_PARTIAL) { 731 txdesc->status |= 732 cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable | 733 IPChecksumEnable); 734 } 735 #endif 736 if (np->vlan) { 737 tfc_vlan_tag = VLANTagInsert | 738 ((u64)np->vlan << 32) | 739 ((u64)skb->priority << 45); 740 } 741 txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, 742 skb->len, 743 PCI_DMA_TODEVICE)); 744 txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48); 745 746 /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode 747 * Work around: Always use 1 descriptor in 10Mbps mode */ 748 if (entry % np->tx_coalesce == 0 || np->speed == 10) 749 txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | 750 WordAlignDisable | 751 TxDMAIndicate | 752 (1 << FragCountShift)); 753 else 754 txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | 755 WordAlignDisable | 756 (1 << FragCountShift)); 757 758 /* TxDMAPollNow */ 759 dw32(DMACtrl, dr32(DMACtrl) | 0x00001000); 760 /* Schedule ISR */ 761 dw32(CountDown, 10000); 762 np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE; 763 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE 764 < TX_QUEUE_LEN - 1 && np->speed != 10) { 765 /* do nothing */ 766 } else if (!netif_queue_stopped(dev)) { 767 netif_stop_queue (dev); 768 } 769 770 /* The first TFDListPtr */ 771 if (!dr32(TFDListPtr0)) { 772 dw32(TFDListPtr0, np->tx_ring_dma + 773 entry * sizeof (struct netdev_desc)); 774 dw32(TFDListPtr1, 0); 775 } 776 777 return NETDEV_TX_OK; 778 } 779 780 static irqreturn_t 781 rio_interrupt (int irq, void *dev_instance) 782 { 783 struct net_device *dev = dev_instance; 784 struct netdev_private *np = netdev_priv(dev); 785 void __iomem *ioaddr = np->ioaddr; 786 unsigned int_status; 787 int cnt = max_intrloop; 788 int handled = 0; 789 790 while (1) { 791 int_status = dr16(IntStatus); 792 dw16(IntStatus, int_status); 793 int_status &= DEFAULT_INTR; 794 if (int_status == 0 || --cnt < 0) 795 break; 796 handled = 1; 797 /* Processing received packets */ 798 if (int_status & RxDMAComplete) 799 receive_packet (dev); 800 /* TxDMAComplete interrupt */ 801 if ((int_status & (TxDMAComplete|IntRequested))) { 802 int tx_status; 803 tx_status = dr32(TxStatus); 804 if (tx_status & 0x01) 805 tx_error (dev, tx_status); 806 /* Free used tx skbuffs */ 807 rio_free_tx (dev, 1); 808 } 809 810 /* Handle uncommon events */ 811 if (int_status & 812 (HostError | LinkEvent | UpdateStats)) 813 rio_error (dev, int_status); 814 } 815 if (np->cur_tx != np->old_tx) 816 dw32(CountDown, 100); 817 return IRQ_RETVAL(handled); 818 } 819 820 static void 821 rio_free_tx (struct net_device *dev, int irq) 822 { 823 struct netdev_private *np = netdev_priv(dev); 824 int entry = np->old_tx % TX_RING_SIZE; 825 int tx_use = 0; 826 unsigned long flag = 0; 827 828 if (irq) 829 spin_lock(&np->tx_lock); 830 else 831 spin_lock_irqsave(&np->tx_lock, flag); 832 833 /* Free used tx skbuffs */ 834 while (entry != np->cur_tx) { 835 struct sk_buff *skb; 836 837 if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone))) 838 break; 839 skb = np->tx_skbuff[entry]; 840 pci_unmap_single (np->pdev, 841 desc_to_dma(&np->tx_ring[entry]), 842 skb->len, PCI_DMA_TODEVICE); 843 if (irq) 844 dev_kfree_skb_irq (skb); 845 else 846 dev_kfree_skb (skb); 847 848 np->tx_skbuff[entry] = NULL; 849 entry = (entry + 1) % TX_RING_SIZE; 850 tx_use++; 851 } 852 if (irq) 853 spin_unlock(&np->tx_lock); 854 else 855 spin_unlock_irqrestore(&np->tx_lock, flag); 856 np->old_tx = entry; 857 858 /* If the ring is no longer full, clear tx_full and 859 call netif_wake_queue() */ 860 861 if (netif_queue_stopped(dev) && 862 ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE 863 < TX_QUEUE_LEN - 1 || np->speed == 10)) { 864 netif_wake_queue (dev); 865 } 866 } 867 868 static void 869 tx_error (struct net_device *dev, int tx_status) 870 { 871 struct netdev_private *np = netdev_priv(dev); 872 void __iomem *ioaddr = np->ioaddr; 873 int frame_id; 874 int i; 875 876 frame_id = (tx_status & 0xffff0000); 877 printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", 878 dev->name, tx_status, frame_id); 879 np->stats.tx_errors++; 880 /* Ttransmit Underrun */ 881 if (tx_status & 0x10) { 882 np->stats.tx_fifo_errors++; 883 dw16(TxStartThresh, dr16(TxStartThresh) + 0x10); 884 /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */ 885 dw16(ASICCtrl + 2, 886 TxReset | DMAReset | FIFOReset | NetworkReset); 887 /* Wait for ResetBusy bit clear */ 888 for (i = 50; i > 0; i--) { 889 if (!(dr16(ASICCtrl + 2) & ResetBusy)) 890 break; 891 mdelay (1); 892 } 893 rio_set_led_mode(dev); 894 rio_free_tx (dev, 1); 895 /* Reset TFDListPtr */ 896 dw32(TFDListPtr0, np->tx_ring_dma + 897 np->old_tx * sizeof (struct netdev_desc)); 898 dw32(TFDListPtr1, 0); 899 900 /* Let TxStartThresh stay default value */ 901 } 902 /* Late Collision */ 903 if (tx_status & 0x04) { 904 np->stats.tx_fifo_errors++; 905 /* TxReset and clear FIFO */ 906 dw16(ASICCtrl + 2, TxReset | FIFOReset); 907 /* Wait reset done */ 908 for (i = 50; i > 0; i--) { 909 if (!(dr16(ASICCtrl + 2) & ResetBusy)) 910 break; 911 mdelay (1); 912 } 913 rio_set_led_mode(dev); 914 /* Let TxStartThresh stay default value */ 915 } 916 /* Maximum Collisions */ 917 #ifdef ETHER_STATS 918 if (tx_status & 0x08) 919 np->stats.collisions16++; 920 #else 921 if (tx_status & 0x08) 922 np->stats.collisions++; 923 #endif 924 /* Restart the Tx */ 925 dw32(MACCtrl, dr16(MACCtrl) | TxEnable); 926 } 927 928 static int 929 receive_packet (struct net_device *dev) 930 { 931 struct netdev_private *np = netdev_priv(dev); 932 int entry = np->cur_rx % RX_RING_SIZE; 933 int cnt = 30; 934 935 /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */ 936 while (1) { 937 struct netdev_desc *desc = &np->rx_ring[entry]; 938 int pkt_len; 939 u64 frame_status; 940 941 if (!(desc->status & cpu_to_le64(RFDDone)) || 942 !(desc->status & cpu_to_le64(FrameStart)) || 943 !(desc->status & cpu_to_le64(FrameEnd))) 944 break; 945 946 /* Chip omits the CRC. */ 947 frame_status = le64_to_cpu(desc->status); 948 pkt_len = frame_status & 0xffff; 949 if (--cnt < 0) 950 break; 951 /* Update rx error statistics, drop packet. */ 952 if (frame_status & RFS_Errors) { 953 np->stats.rx_errors++; 954 if (frame_status & (RxRuntFrame | RxLengthError)) 955 np->stats.rx_length_errors++; 956 if (frame_status & RxFCSError) 957 np->stats.rx_crc_errors++; 958 if (frame_status & RxAlignmentError && np->speed != 1000) 959 np->stats.rx_frame_errors++; 960 if (frame_status & RxFIFOOverrun) 961 np->stats.rx_fifo_errors++; 962 } else { 963 struct sk_buff *skb; 964 965 /* Small skbuffs for short packets */ 966 if (pkt_len > copy_thresh) { 967 pci_unmap_single (np->pdev, 968 desc_to_dma(desc), 969 np->rx_buf_sz, 970 PCI_DMA_FROMDEVICE); 971 skb_put (skb = np->rx_skbuff[entry], pkt_len); 972 np->rx_skbuff[entry] = NULL; 973 } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) { 974 pci_dma_sync_single_for_cpu(np->pdev, 975 desc_to_dma(desc), 976 np->rx_buf_sz, 977 PCI_DMA_FROMDEVICE); 978 skb_copy_to_linear_data (skb, 979 np->rx_skbuff[entry]->data, 980 pkt_len); 981 skb_put (skb, pkt_len); 982 pci_dma_sync_single_for_device(np->pdev, 983 desc_to_dma(desc), 984 np->rx_buf_sz, 985 PCI_DMA_FROMDEVICE); 986 } 987 skb->protocol = eth_type_trans (skb, dev); 988 #if 0 989 /* Checksum done by hw, but csum value unavailable. */ 990 if (np->pdev->pci_rev_id >= 0x0c && 991 !(frame_status & (TCPError | UDPError | IPError))) { 992 skb->ip_summed = CHECKSUM_UNNECESSARY; 993 } 994 #endif 995 netif_rx (skb); 996 } 997 entry = (entry + 1) % RX_RING_SIZE; 998 } 999 spin_lock(&np->rx_lock); 1000 np->cur_rx = entry; 1001 /* Re-allocate skbuffs to fill the descriptor ring */ 1002 entry = np->old_rx; 1003 while (entry != np->cur_rx) { 1004 struct sk_buff *skb; 1005 /* Dropped packets don't need to re-allocate */ 1006 if (np->rx_skbuff[entry] == NULL) { 1007 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); 1008 if (skb == NULL) { 1009 np->rx_ring[entry].fraginfo = 0; 1010 printk (KERN_INFO 1011 "%s: receive_packet: " 1012 "Unable to re-allocate Rx skbuff.#%d\n", 1013 dev->name, entry); 1014 break; 1015 } 1016 np->rx_skbuff[entry] = skb; 1017 np->rx_ring[entry].fraginfo = 1018 cpu_to_le64 (pci_map_single 1019 (np->pdev, skb->data, np->rx_buf_sz, 1020 PCI_DMA_FROMDEVICE)); 1021 } 1022 np->rx_ring[entry].fraginfo |= 1023 cpu_to_le64((u64)np->rx_buf_sz << 48); 1024 np->rx_ring[entry].status = 0; 1025 entry = (entry + 1) % RX_RING_SIZE; 1026 } 1027 np->old_rx = entry; 1028 spin_unlock(&np->rx_lock); 1029 return 0; 1030 } 1031 1032 static void 1033 rio_error (struct net_device *dev, int int_status) 1034 { 1035 struct netdev_private *np = netdev_priv(dev); 1036 void __iomem *ioaddr = np->ioaddr; 1037 u16 macctrl; 1038 1039 /* Link change event */ 1040 if (int_status & LinkEvent) { 1041 if (mii_wait_link (dev, 10) == 0) { 1042 printk (KERN_INFO "%s: Link up\n", dev->name); 1043 if (np->phy_media) 1044 mii_get_media_pcs (dev); 1045 else 1046 mii_get_media (dev); 1047 if (np->speed == 1000) 1048 np->tx_coalesce = tx_coalesce; 1049 else 1050 np->tx_coalesce = 1; 1051 macctrl = 0; 1052 macctrl |= (np->vlan) ? AutoVLANuntagging : 0; 1053 macctrl |= (np->full_duplex) ? DuplexSelect : 0; 1054 macctrl |= (np->tx_flow) ? 1055 TxFlowControlEnable : 0; 1056 macctrl |= (np->rx_flow) ? 1057 RxFlowControlEnable : 0; 1058 dw16(MACCtrl, macctrl); 1059 np->link_status = 1; 1060 netif_carrier_on(dev); 1061 } else { 1062 printk (KERN_INFO "%s: Link off\n", dev->name); 1063 np->link_status = 0; 1064 netif_carrier_off(dev); 1065 } 1066 } 1067 1068 /* UpdateStats statistics registers */ 1069 if (int_status & UpdateStats) { 1070 get_stats (dev); 1071 } 1072 1073 /* PCI Error, a catastronphic error related to the bus interface 1074 occurs, set GlobalReset and HostReset to reset. */ 1075 if (int_status & HostError) { 1076 printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n", 1077 dev->name, int_status); 1078 dw16(ASICCtrl + 2, GlobalReset | HostReset); 1079 mdelay (500); 1080 rio_set_led_mode(dev); 1081 } 1082 } 1083 1084 static struct net_device_stats * 1085 get_stats (struct net_device *dev) 1086 { 1087 struct netdev_private *np = netdev_priv(dev); 1088 void __iomem *ioaddr = np->ioaddr; 1089 #ifdef MEM_MAPPING 1090 int i; 1091 #endif 1092 unsigned int stat_reg; 1093 1094 /* All statistics registers need to be acknowledged, 1095 else statistic overflow could cause problems */ 1096 1097 np->stats.rx_packets += dr32(FramesRcvOk); 1098 np->stats.tx_packets += dr32(FramesXmtOk); 1099 np->stats.rx_bytes += dr32(OctetRcvOk); 1100 np->stats.tx_bytes += dr32(OctetXmtOk); 1101 1102 np->stats.multicast = dr32(McstFramesRcvdOk); 1103 np->stats.collisions += dr32(SingleColFrames) 1104 + dr32(MultiColFrames); 1105 1106 /* detailed tx errors */ 1107 stat_reg = dr16(FramesAbortXSColls); 1108 np->stats.tx_aborted_errors += stat_reg; 1109 np->stats.tx_errors += stat_reg; 1110 1111 stat_reg = dr16(CarrierSenseErrors); 1112 np->stats.tx_carrier_errors += stat_reg; 1113 np->stats.tx_errors += stat_reg; 1114 1115 /* Clear all other statistic register. */ 1116 dr32(McstOctetXmtOk); 1117 dr16(BcstFramesXmtdOk); 1118 dr32(McstFramesXmtdOk); 1119 dr16(BcstFramesRcvdOk); 1120 dr16(MacControlFramesRcvd); 1121 dr16(FrameTooLongErrors); 1122 dr16(InRangeLengthErrors); 1123 dr16(FramesCheckSeqErrors); 1124 dr16(FramesLostRxErrors); 1125 dr32(McstOctetXmtOk); 1126 dr32(BcstOctetXmtOk); 1127 dr32(McstFramesXmtdOk); 1128 dr32(FramesWDeferredXmt); 1129 dr32(LateCollisions); 1130 dr16(BcstFramesXmtdOk); 1131 dr16(MacControlFramesXmtd); 1132 dr16(FramesWEXDeferal); 1133 1134 #ifdef MEM_MAPPING 1135 for (i = 0x100; i <= 0x150; i += 4) 1136 dr32(i); 1137 #endif 1138 dr16(TxJumboFrames); 1139 dr16(RxJumboFrames); 1140 dr16(TCPCheckSumErrors); 1141 dr16(UDPCheckSumErrors); 1142 dr16(IPCheckSumErrors); 1143 return &np->stats; 1144 } 1145 1146 static int 1147 clear_stats (struct net_device *dev) 1148 { 1149 struct netdev_private *np = netdev_priv(dev); 1150 void __iomem *ioaddr = np->ioaddr; 1151 #ifdef MEM_MAPPING 1152 int i; 1153 #endif 1154 1155 /* All statistics registers need to be acknowledged, 1156 else statistic overflow could cause problems */ 1157 dr32(FramesRcvOk); 1158 dr32(FramesXmtOk); 1159 dr32(OctetRcvOk); 1160 dr32(OctetXmtOk); 1161 1162 dr32(McstFramesRcvdOk); 1163 dr32(SingleColFrames); 1164 dr32(MultiColFrames); 1165 dr32(LateCollisions); 1166 /* detailed rx errors */ 1167 dr16(FrameTooLongErrors); 1168 dr16(InRangeLengthErrors); 1169 dr16(FramesCheckSeqErrors); 1170 dr16(FramesLostRxErrors); 1171 1172 /* detailed tx errors */ 1173 dr16(FramesAbortXSColls); 1174 dr16(CarrierSenseErrors); 1175 1176 /* Clear all other statistic register. */ 1177 dr32(McstOctetXmtOk); 1178 dr16(BcstFramesXmtdOk); 1179 dr32(McstFramesXmtdOk); 1180 dr16(BcstFramesRcvdOk); 1181 dr16(MacControlFramesRcvd); 1182 dr32(McstOctetXmtOk); 1183 dr32(BcstOctetXmtOk); 1184 dr32(McstFramesXmtdOk); 1185 dr32(FramesWDeferredXmt); 1186 dr16(BcstFramesXmtdOk); 1187 dr16(MacControlFramesXmtd); 1188 dr16(FramesWEXDeferal); 1189 #ifdef MEM_MAPPING 1190 for (i = 0x100; i <= 0x150; i += 4) 1191 dr32(i); 1192 #endif 1193 dr16(TxJumboFrames); 1194 dr16(RxJumboFrames); 1195 dr16(TCPCheckSumErrors); 1196 dr16(UDPCheckSumErrors); 1197 dr16(IPCheckSumErrors); 1198 return 0; 1199 } 1200 1201 1202 static int 1203 change_mtu (struct net_device *dev, int new_mtu) 1204 { 1205 struct netdev_private *np = netdev_priv(dev); 1206 int max = (np->jumbo) ? MAX_JUMBO : 1536; 1207 1208 if ((new_mtu < 68) || (new_mtu > max)) { 1209 return -EINVAL; 1210 } 1211 1212 dev->mtu = new_mtu; 1213 1214 return 0; 1215 } 1216 1217 static void 1218 set_multicast (struct net_device *dev) 1219 { 1220 struct netdev_private *np = netdev_priv(dev); 1221 void __iomem *ioaddr = np->ioaddr; 1222 u32 hash_table[2]; 1223 u16 rx_mode = 0; 1224 1225 hash_table[0] = hash_table[1] = 0; 1226 /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */ 1227 hash_table[1] |= 0x02000000; 1228 if (dev->flags & IFF_PROMISC) { 1229 /* Receive all frames promiscuously. */ 1230 rx_mode = ReceiveAllFrames; 1231 } else if ((dev->flags & IFF_ALLMULTI) || 1232 (netdev_mc_count(dev) > multicast_filter_limit)) { 1233 /* Receive broadcast and multicast frames */ 1234 rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast; 1235 } else if (!netdev_mc_empty(dev)) { 1236 struct netdev_hw_addr *ha; 1237 /* Receive broadcast frames and multicast frames filtering 1238 by Hashtable */ 1239 rx_mode = 1240 ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast; 1241 netdev_for_each_mc_addr(ha, dev) { 1242 int bit, index = 0; 1243 int crc = ether_crc_le(ETH_ALEN, ha->addr); 1244 /* The inverted high significant 6 bits of CRC are 1245 used as an index to hashtable */ 1246 for (bit = 0; bit < 6; bit++) 1247 if (crc & (1 << (31 - bit))) 1248 index |= (1 << bit); 1249 hash_table[index / 32] |= (1 << (index % 32)); 1250 } 1251 } else { 1252 rx_mode = ReceiveBroadcast | ReceiveUnicast; 1253 } 1254 if (np->vlan) { 1255 /* ReceiveVLANMatch field in ReceiveMode */ 1256 rx_mode |= ReceiveVLANMatch; 1257 } 1258 1259 dw32(HashTable0, hash_table[0]); 1260 dw32(HashTable1, hash_table[1]); 1261 dw16(ReceiveMode, rx_mode); 1262 } 1263 1264 static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1265 { 1266 struct netdev_private *np = netdev_priv(dev); 1267 1268 strlcpy(info->driver, "dl2k", sizeof(info->driver)); 1269 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1270 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); 1271 } 1272 1273 static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1274 { 1275 struct netdev_private *np = netdev_priv(dev); 1276 if (np->phy_media) { 1277 /* fiber device */ 1278 cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE; 1279 cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE; 1280 cmd->port = PORT_FIBRE; 1281 cmd->transceiver = XCVR_INTERNAL; 1282 } else { 1283 /* copper device */ 1284 cmd->supported = SUPPORTED_10baseT_Half | 1285 SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half 1286 | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | 1287 SUPPORTED_Autoneg | SUPPORTED_MII; 1288 cmd->advertising = ADVERTISED_10baseT_Half | 1289 ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | 1290 ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full| 1291 ADVERTISED_Autoneg | ADVERTISED_MII; 1292 cmd->port = PORT_MII; 1293 cmd->transceiver = XCVR_INTERNAL; 1294 } 1295 if ( np->link_status ) { 1296 ethtool_cmd_speed_set(cmd, np->speed); 1297 cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 1298 } else { 1299 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); 1300 cmd->duplex = DUPLEX_UNKNOWN; 1301 } 1302 if ( np->an_enable) 1303 cmd->autoneg = AUTONEG_ENABLE; 1304 else 1305 cmd->autoneg = AUTONEG_DISABLE; 1306 1307 cmd->phy_address = np->phy_addr; 1308 return 0; 1309 } 1310 1311 static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1312 { 1313 struct netdev_private *np = netdev_priv(dev); 1314 netif_carrier_off(dev); 1315 if (cmd->autoneg == AUTONEG_ENABLE) { 1316 if (np->an_enable) 1317 return 0; 1318 else { 1319 np->an_enable = 1; 1320 mii_set_media(dev); 1321 return 0; 1322 } 1323 } else { 1324 np->an_enable = 0; 1325 if (np->speed == 1000) { 1326 ethtool_cmd_speed_set(cmd, SPEED_100); 1327 cmd->duplex = DUPLEX_FULL; 1328 printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n"); 1329 } 1330 switch (ethtool_cmd_speed(cmd)) { 1331 case SPEED_10: 1332 np->speed = 10; 1333 np->full_duplex = (cmd->duplex == DUPLEX_FULL); 1334 break; 1335 case SPEED_100: 1336 np->speed = 100; 1337 np->full_duplex = (cmd->duplex == DUPLEX_FULL); 1338 break; 1339 case SPEED_1000: /* not supported */ 1340 default: 1341 return -EINVAL; 1342 } 1343 mii_set_media(dev); 1344 } 1345 return 0; 1346 } 1347 1348 static u32 rio_get_link(struct net_device *dev) 1349 { 1350 struct netdev_private *np = netdev_priv(dev); 1351 return np->link_status; 1352 } 1353 1354 static const struct ethtool_ops ethtool_ops = { 1355 .get_drvinfo = rio_get_drvinfo, 1356 .get_settings = rio_get_settings, 1357 .set_settings = rio_set_settings, 1358 .get_link = rio_get_link, 1359 }; 1360 1361 static int 1362 rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) 1363 { 1364 int phy_addr; 1365 struct netdev_private *np = netdev_priv(dev); 1366 struct mii_ioctl_data *miidata = if_mii(rq); 1367 1368 phy_addr = np->phy_addr; 1369 switch (cmd) { 1370 case SIOCGMIIPHY: 1371 miidata->phy_id = phy_addr; 1372 break; 1373 case SIOCGMIIREG: 1374 miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num); 1375 break; 1376 case SIOCSMIIREG: 1377 if (!capable(CAP_NET_ADMIN)) 1378 return -EPERM; 1379 mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in); 1380 break; 1381 default: 1382 return -EOPNOTSUPP; 1383 } 1384 return 0; 1385 } 1386 1387 #define EEP_READ 0x0200 1388 #define EEP_BUSY 0x8000 1389 /* Read the EEPROM word */ 1390 /* We use I/O instruction to read/write eeprom to avoid fail on some machines */ 1391 static int read_eeprom(struct netdev_private *np, int eep_addr) 1392 { 1393 void __iomem *ioaddr = np->eeprom_addr; 1394 int i = 1000; 1395 1396 dw16(EepromCtrl, EEP_READ | (eep_addr & 0xff)); 1397 while (i-- > 0) { 1398 if (!(dr16(EepromCtrl) & EEP_BUSY)) 1399 return dr16(EepromData); 1400 } 1401 return 0; 1402 } 1403 1404 enum phy_ctrl_bits { 1405 MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04, 1406 MII_DUPLEX = 0x08, 1407 }; 1408 1409 #define mii_delay() dr8(PhyCtrl) 1410 static void 1411 mii_sendbit (struct net_device *dev, u32 data) 1412 { 1413 struct netdev_private *np = netdev_priv(dev); 1414 void __iomem *ioaddr = np->ioaddr; 1415 1416 data = ((data) ? MII_DATA1 : 0) | (dr8(PhyCtrl) & 0xf8) | MII_WRITE; 1417 dw8(PhyCtrl, data); 1418 mii_delay (); 1419 dw8(PhyCtrl, data | MII_CLK); 1420 mii_delay (); 1421 } 1422 1423 static int 1424 mii_getbit (struct net_device *dev) 1425 { 1426 struct netdev_private *np = netdev_priv(dev); 1427 void __iomem *ioaddr = np->ioaddr; 1428 u8 data; 1429 1430 data = (dr8(PhyCtrl) & 0xf8) | MII_READ; 1431 dw8(PhyCtrl, data); 1432 mii_delay (); 1433 dw8(PhyCtrl, data | MII_CLK); 1434 mii_delay (); 1435 return (dr8(PhyCtrl) >> 1) & 1; 1436 } 1437 1438 static void 1439 mii_send_bits (struct net_device *dev, u32 data, int len) 1440 { 1441 int i; 1442 1443 for (i = len - 1; i >= 0; i--) { 1444 mii_sendbit (dev, data & (1 << i)); 1445 } 1446 } 1447 1448 static int 1449 mii_read (struct net_device *dev, int phy_addr, int reg_num) 1450 { 1451 u32 cmd; 1452 int i; 1453 u32 retval = 0; 1454 1455 /* Preamble */ 1456 mii_send_bits (dev, 0xffffffff, 32); 1457 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ 1458 /* ST,OP = 0110'b for read operation */ 1459 cmd = (0x06 << 10 | phy_addr << 5 | reg_num); 1460 mii_send_bits (dev, cmd, 14); 1461 /* Turnaround */ 1462 if (mii_getbit (dev)) 1463 goto err_out; 1464 /* Read data */ 1465 for (i = 0; i < 16; i++) { 1466 retval |= mii_getbit (dev); 1467 retval <<= 1; 1468 } 1469 /* End cycle */ 1470 mii_getbit (dev); 1471 return (retval >> 1) & 0xffff; 1472 1473 err_out: 1474 return 0; 1475 } 1476 static int 1477 mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data) 1478 { 1479 u32 cmd; 1480 1481 /* Preamble */ 1482 mii_send_bits (dev, 0xffffffff, 32); 1483 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ 1484 /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */ 1485 cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data; 1486 mii_send_bits (dev, cmd, 32); 1487 /* End cycle */ 1488 mii_getbit (dev); 1489 return 0; 1490 } 1491 static int 1492 mii_wait_link (struct net_device *dev, int wait) 1493 { 1494 __u16 bmsr; 1495 int phy_addr; 1496 struct netdev_private *np; 1497 1498 np = netdev_priv(dev); 1499 phy_addr = np->phy_addr; 1500 1501 do { 1502 bmsr = mii_read (dev, phy_addr, MII_BMSR); 1503 if (bmsr & BMSR_LSTATUS) 1504 return 0; 1505 mdelay (1); 1506 } while (--wait > 0); 1507 return -1; 1508 } 1509 static int 1510 mii_get_media (struct net_device *dev) 1511 { 1512 __u16 negotiate; 1513 __u16 bmsr; 1514 __u16 mscr; 1515 __u16 mssr; 1516 int phy_addr; 1517 struct netdev_private *np; 1518 1519 np = netdev_priv(dev); 1520 phy_addr = np->phy_addr; 1521 1522 bmsr = mii_read (dev, phy_addr, MII_BMSR); 1523 if (np->an_enable) { 1524 if (!(bmsr & BMSR_ANEGCOMPLETE)) { 1525 /* Auto-Negotiation not completed */ 1526 return -1; 1527 } 1528 negotiate = mii_read (dev, phy_addr, MII_ADVERTISE) & 1529 mii_read (dev, phy_addr, MII_LPA); 1530 mscr = mii_read (dev, phy_addr, MII_CTRL1000); 1531 mssr = mii_read (dev, phy_addr, MII_STAT1000); 1532 if (mscr & ADVERTISE_1000FULL && mssr & LPA_1000FULL) { 1533 np->speed = 1000; 1534 np->full_duplex = 1; 1535 printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); 1536 } else if (mscr & ADVERTISE_1000HALF && mssr & LPA_1000HALF) { 1537 np->speed = 1000; 1538 np->full_duplex = 0; 1539 printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n"); 1540 } else if (negotiate & ADVERTISE_100FULL) { 1541 np->speed = 100; 1542 np->full_duplex = 1; 1543 printk (KERN_INFO "Auto 100 Mbps, Full duplex\n"); 1544 } else if (negotiate & ADVERTISE_100HALF) { 1545 np->speed = 100; 1546 np->full_duplex = 0; 1547 printk (KERN_INFO "Auto 100 Mbps, Half duplex\n"); 1548 } else if (negotiate & ADVERTISE_10FULL) { 1549 np->speed = 10; 1550 np->full_duplex = 1; 1551 printk (KERN_INFO "Auto 10 Mbps, Full duplex\n"); 1552 } else if (negotiate & ADVERTISE_10HALF) { 1553 np->speed = 10; 1554 np->full_duplex = 0; 1555 printk (KERN_INFO "Auto 10 Mbps, Half duplex\n"); 1556 } 1557 if (negotiate & ADVERTISE_PAUSE_CAP) { 1558 np->tx_flow &= 1; 1559 np->rx_flow &= 1; 1560 } else if (negotiate & ADVERTISE_PAUSE_ASYM) { 1561 np->tx_flow = 0; 1562 np->rx_flow &= 1; 1563 } 1564 /* else tx_flow, rx_flow = user select */ 1565 } else { 1566 __u16 bmcr = mii_read (dev, phy_addr, MII_BMCR); 1567 switch (bmcr & (BMCR_SPEED100 | BMCR_SPEED1000)) { 1568 case BMCR_SPEED1000: 1569 printk (KERN_INFO "Operating at 1000 Mbps, "); 1570 break; 1571 case BMCR_SPEED100: 1572 printk (KERN_INFO "Operating at 100 Mbps, "); 1573 break; 1574 case 0: 1575 printk (KERN_INFO "Operating at 10 Mbps, "); 1576 } 1577 if (bmcr & BMCR_FULLDPLX) { 1578 printk (KERN_CONT "Full duplex\n"); 1579 } else { 1580 printk (KERN_CONT "Half duplex\n"); 1581 } 1582 } 1583 if (np->tx_flow) 1584 printk(KERN_INFO "Enable Tx Flow Control\n"); 1585 else 1586 printk(KERN_INFO "Disable Tx Flow Control\n"); 1587 if (np->rx_flow) 1588 printk(KERN_INFO "Enable Rx Flow Control\n"); 1589 else 1590 printk(KERN_INFO "Disable Rx Flow Control\n"); 1591 1592 return 0; 1593 } 1594 1595 static int 1596 mii_set_media (struct net_device *dev) 1597 { 1598 __u16 pscr; 1599 __u16 bmcr; 1600 __u16 bmsr; 1601 __u16 anar; 1602 int phy_addr; 1603 struct netdev_private *np; 1604 np = netdev_priv(dev); 1605 phy_addr = np->phy_addr; 1606 1607 /* Does user set speed? */ 1608 if (np->an_enable) { 1609 /* Advertise capabilities */ 1610 bmsr = mii_read (dev, phy_addr, MII_BMSR); 1611 anar = mii_read (dev, phy_addr, MII_ADVERTISE) & 1612 ~(ADVERTISE_100FULL | ADVERTISE_10FULL | 1613 ADVERTISE_100HALF | ADVERTISE_10HALF | 1614 ADVERTISE_100BASE4); 1615 if (bmsr & BMSR_100FULL) 1616 anar |= ADVERTISE_100FULL; 1617 if (bmsr & BMSR_100HALF) 1618 anar |= ADVERTISE_100HALF; 1619 if (bmsr & BMSR_100BASE4) 1620 anar |= ADVERTISE_100BASE4; 1621 if (bmsr & BMSR_10FULL) 1622 anar |= ADVERTISE_10FULL; 1623 if (bmsr & BMSR_10HALF) 1624 anar |= ADVERTISE_10HALF; 1625 anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 1626 mii_write (dev, phy_addr, MII_ADVERTISE, anar); 1627 1628 /* Enable Auto crossover */ 1629 pscr = mii_read (dev, phy_addr, MII_PHY_SCR); 1630 pscr |= 3 << 5; /* 11'b */ 1631 mii_write (dev, phy_addr, MII_PHY_SCR, pscr); 1632 1633 /* Soft reset PHY */ 1634 mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET); 1635 bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET; 1636 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1637 mdelay(1); 1638 } else { 1639 /* Force speed setting */ 1640 /* 1) Disable Auto crossover */ 1641 pscr = mii_read (dev, phy_addr, MII_PHY_SCR); 1642 pscr &= ~(3 << 5); 1643 mii_write (dev, phy_addr, MII_PHY_SCR, pscr); 1644 1645 /* 2) PHY Reset */ 1646 bmcr = mii_read (dev, phy_addr, MII_BMCR); 1647 bmcr |= BMCR_RESET; 1648 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1649 1650 /* 3) Power Down */ 1651 bmcr = 0x1940; /* must be 0x1940 */ 1652 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1653 mdelay (100); /* wait a certain time */ 1654 1655 /* 4) Advertise nothing */ 1656 mii_write (dev, phy_addr, MII_ADVERTISE, 0); 1657 1658 /* 5) Set media and Power Up */ 1659 bmcr = BMCR_PDOWN; 1660 if (np->speed == 100) { 1661 bmcr |= BMCR_SPEED100; 1662 printk (KERN_INFO "Manual 100 Mbps, "); 1663 } else if (np->speed == 10) { 1664 printk (KERN_INFO "Manual 10 Mbps, "); 1665 } 1666 if (np->full_duplex) { 1667 bmcr |= BMCR_FULLDPLX; 1668 printk (KERN_CONT "Full duplex\n"); 1669 } else { 1670 printk (KERN_CONT "Half duplex\n"); 1671 } 1672 #if 0 1673 /* Set 1000BaseT Master/Slave setting */ 1674 mscr = mii_read (dev, phy_addr, MII_CTRL1000); 1675 mscr |= MII_MSCR_CFG_ENABLE; 1676 mscr &= ~MII_MSCR_CFG_VALUE = 0; 1677 #endif 1678 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1679 mdelay(10); 1680 } 1681 return 0; 1682 } 1683 1684 static int 1685 mii_get_media_pcs (struct net_device *dev) 1686 { 1687 __u16 negotiate; 1688 __u16 bmsr; 1689 int phy_addr; 1690 struct netdev_private *np; 1691 1692 np = netdev_priv(dev); 1693 phy_addr = np->phy_addr; 1694 1695 bmsr = mii_read (dev, phy_addr, PCS_BMSR); 1696 if (np->an_enable) { 1697 if (!(bmsr & BMSR_ANEGCOMPLETE)) { 1698 /* Auto-Negotiation not completed */ 1699 return -1; 1700 } 1701 negotiate = mii_read (dev, phy_addr, PCS_ANAR) & 1702 mii_read (dev, phy_addr, PCS_ANLPAR); 1703 np->speed = 1000; 1704 if (negotiate & PCS_ANAR_FULL_DUPLEX) { 1705 printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); 1706 np->full_duplex = 1; 1707 } else { 1708 printk (KERN_INFO "Auto 1000 Mbps, half duplex\n"); 1709 np->full_duplex = 0; 1710 } 1711 if (negotiate & PCS_ANAR_PAUSE) { 1712 np->tx_flow &= 1; 1713 np->rx_flow &= 1; 1714 } else if (negotiate & PCS_ANAR_ASYMMETRIC) { 1715 np->tx_flow = 0; 1716 np->rx_flow &= 1; 1717 } 1718 /* else tx_flow, rx_flow = user select */ 1719 } else { 1720 __u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR); 1721 printk (KERN_INFO "Operating at 1000 Mbps, "); 1722 if (bmcr & BMCR_FULLDPLX) { 1723 printk (KERN_CONT "Full duplex\n"); 1724 } else { 1725 printk (KERN_CONT "Half duplex\n"); 1726 } 1727 } 1728 if (np->tx_flow) 1729 printk(KERN_INFO "Enable Tx Flow Control\n"); 1730 else 1731 printk(KERN_INFO "Disable Tx Flow Control\n"); 1732 if (np->rx_flow) 1733 printk(KERN_INFO "Enable Rx Flow Control\n"); 1734 else 1735 printk(KERN_INFO "Disable Rx Flow Control\n"); 1736 1737 return 0; 1738 } 1739 1740 static int 1741 mii_set_media_pcs (struct net_device *dev) 1742 { 1743 __u16 bmcr; 1744 __u16 esr; 1745 __u16 anar; 1746 int phy_addr; 1747 struct netdev_private *np; 1748 np = netdev_priv(dev); 1749 phy_addr = np->phy_addr; 1750 1751 /* Auto-Negotiation? */ 1752 if (np->an_enable) { 1753 /* Advertise capabilities */ 1754 esr = mii_read (dev, phy_addr, PCS_ESR); 1755 anar = mii_read (dev, phy_addr, MII_ADVERTISE) & 1756 ~PCS_ANAR_HALF_DUPLEX & 1757 ~PCS_ANAR_FULL_DUPLEX; 1758 if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD)) 1759 anar |= PCS_ANAR_HALF_DUPLEX; 1760 if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD)) 1761 anar |= PCS_ANAR_FULL_DUPLEX; 1762 anar |= PCS_ANAR_PAUSE | PCS_ANAR_ASYMMETRIC; 1763 mii_write (dev, phy_addr, MII_ADVERTISE, anar); 1764 1765 /* Soft reset PHY */ 1766 mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET); 1767 bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET; 1768 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1769 mdelay(1); 1770 } else { 1771 /* Force speed setting */ 1772 /* PHY Reset */ 1773 bmcr = BMCR_RESET; 1774 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1775 mdelay(10); 1776 if (np->full_duplex) { 1777 bmcr = BMCR_FULLDPLX; 1778 printk (KERN_INFO "Manual full duplex\n"); 1779 } else { 1780 bmcr = 0; 1781 printk (KERN_INFO "Manual half duplex\n"); 1782 } 1783 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1784 mdelay(10); 1785 1786 /* Advertise nothing */ 1787 mii_write (dev, phy_addr, MII_ADVERTISE, 0); 1788 } 1789 return 0; 1790 } 1791 1792 1793 static int 1794 rio_close (struct net_device *dev) 1795 { 1796 struct netdev_private *np = netdev_priv(dev); 1797 struct pci_dev *pdev = np->pdev; 1798 1799 netif_stop_queue (dev); 1800 1801 rio_hw_stop(dev); 1802 1803 free_irq(pdev->irq, dev); 1804 del_timer_sync (&np->timer); 1805 1806 free_list(dev); 1807 1808 return 0; 1809 } 1810 1811 static void 1812 rio_remove1 (struct pci_dev *pdev) 1813 { 1814 struct net_device *dev = pci_get_drvdata (pdev); 1815 1816 if (dev) { 1817 struct netdev_private *np = netdev_priv(dev); 1818 1819 unregister_netdev (dev); 1820 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, 1821 np->rx_ring_dma); 1822 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, 1823 np->tx_ring_dma); 1824 #ifdef MEM_MAPPING 1825 pci_iounmap(pdev, np->ioaddr); 1826 #endif 1827 pci_iounmap(pdev, np->eeprom_addr); 1828 free_netdev (dev); 1829 pci_release_regions (pdev); 1830 pci_disable_device (pdev); 1831 } 1832 } 1833 1834 #ifdef CONFIG_PM_SLEEP 1835 static int rio_suspend(struct device *device) 1836 { 1837 struct net_device *dev = dev_get_drvdata(device); 1838 struct netdev_private *np = netdev_priv(dev); 1839 1840 if (!netif_running(dev)) 1841 return 0; 1842 1843 netif_device_detach(dev); 1844 del_timer_sync(&np->timer); 1845 rio_hw_stop(dev); 1846 1847 return 0; 1848 } 1849 1850 static int rio_resume(struct device *device) 1851 { 1852 struct net_device *dev = dev_get_drvdata(device); 1853 struct netdev_private *np = netdev_priv(dev); 1854 1855 if (!netif_running(dev)) 1856 return 0; 1857 1858 rio_reset_ring(np); 1859 rio_hw_init(dev); 1860 np->timer.expires = jiffies + 1 * HZ; 1861 add_timer(&np->timer); 1862 netif_device_attach(dev); 1863 dl2k_enable_int(np); 1864 1865 return 0; 1866 } 1867 1868 static SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume); 1869 #define RIO_PM_OPS (&rio_pm_ops) 1870 1871 #else 1872 1873 #define RIO_PM_OPS NULL 1874 1875 #endif /* CONFIG_PM_SLEEP */ 1876 1877 static struct pci_driver rio_driver = { 1878 .name = "dl2k", 1879 .id_table = rio_pci_tbl, 1880 .probe = rio_probe1, 1881 .remove = rio_remove1, 1882 .driver.pm = RIO_PM_OPS, 1883 }; 1884 1885 module_pci_driver(rio_driver); 1886 /* 1887 1888 Compile command: 1889 1890 gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c 1891 1892 Read Documentation/networking/dl2k.txt for details. 1893 1894 */ 1895 1896