1 /* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */ 2 /* 3 Copyright (c) 2001, 2002 by D-Link Corporation 4 Written by Edward Peng.<edward_peng@dlink.com.tw> 5 Created 03-May-2001, base on Linux' sundance.c. 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 2 of the License, or 10 (at your option) any later version. 11 */ 12 13 #define DRV_NAME "DL2000/TC902x-based linux driver" 14 #define DRV_VERSION "v1.19" 15 #define DRV_RELDATE "2007/08/12" 16 #include "dl2k.h" 17 #include <linux/dma-mapping.h> 18 19 #define dw32(reg, val) iowrite32(val, ioaddr + (reg)) 20 #define dw16(reg, val) iowrite16(val, ioaddr + (reg)) 21 #define dw8(reg, val) iowrite8(val, ioaddr + (reg)) 22 #define dr32(reg) ioread32(ioaddr + (reg)) 23 #define dr16(reg) ioread16(ioaddr + (reg)) 24 #define dr8(reg) ioread8(ioaddr + (reg)) 25 26 static char version[] = 27 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; 28 #define MAX_UNITS 8 29 static int mtu[MAX_UNITS]; 30 static int vlan[MAX_UNITS]; 31 static int jumbo[MAX_UNITS]; 32 static char *media[MAX_UNITS]; 33 static int tx_flow=-1; 34 static int rx_flow=-1; 35 static int copy_thresh; 36 static int rx_coalesce=10; /* Rx frame count each interrupt */ 37 static int rx_timeout=200; /* Rx DMA wait time in 640ns increments */ 38 static int tx_coalesce=16; /* HW xmit count each TxDMAComplete */ 39 40 41 MODULE_AUTHOR ("Edward Peng"); 42 MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter"); 43 MODULE_LICENSE("GPL"); 44 module_param_array(mtu, int, NULL, 0); 45 module_param_array(media, charp, NULL, 0); 46 module_param_array(vlan, int, NULL, 0); 47 module_param_array(jumbo, int, NULL, 0); 48 module_param(tx_flow, int, 0); 49 module_param(rx_flow, int, 0); 50 module_param(copy_thresh, int, 0); 51 module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */ 52 module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */ 53 module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */ 54 55 56 /* Enable the default interrupts */ 57 #define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \ 58 UpdateStats | LinkEvent) 59 60 static void dl2k_enable_int(struct netdev_private *np) 61 { 62 void __iomem *ioaddr = np->ioaddr; 63 64 dw16(IntEnable, DEFAULT_INTR); 65 } 66 67 static const int max_intrloop = 50; 68 static const int multicast_filter_limit = 0x40; 69 70 static int rio_open (struct net_device *dev); 71 static void rio_timer (unsigned long data); 72 static void rio_tx_timeout (struct net_device *dev); 73 static void alloc_list (struct net_device *dev); 74 static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev); 75 static irqreturn_t rio_interrupt (int irq, void *dev_instance); 76 static void rio_free_tx (struct net_device *dev, int irq); 77 static void tx_error (struct net_device *dev, int tx_status); 78 static int receive_packet (struct net_device *dev); 79 static void rio_error (struct net_device *dev, int int_status); 80 static int change_mtu (struct net_device *dev, int new_mtu); 81 static void set_multicast (struct net_device *dev); 82 static struct net_device_stats *get_stats (struct net_device *dev); 83 static int clear_stats (struct net_device *dev); 84 static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); 85 static int rio_close (struct net_device *dev); 86 static int find_miiphy (struct net_device *dev); 87 static int parse_eeprom (struct net_device *dev); 88 static int read_eeprom (struct netdev_private *, int eep_addr); 89 static int mii_wait_link (struct net_device *dev, int wait); 90 static int mii_set_media (struct net_device *dev); 91 static int mii_get_media (struct net_device *dev); 92 static int mii_set_media_pcs (struct net_device *dev); 93 static int mii_get_media_pcs (struct net_device *dev); 94 static int mii_read (struct net_device *dev, int phy_addr, int reg_num); 95 static int mii_write (struct net_device *dev, int phy_addr, int reg_num, 96 u16 data); 97 98 static const struct ethtool_ops ethtool_ops; 99 100 static const struct net_device_ops netdev_ops = { 101 .ndo_open = rio_open, 102 .ndo_start_xmit = start_xmit, 103 .ndo_stop = rio_close, 104 .ndo_get_stats = get_stats, 105 .ndo_validate_addr = eth_validate_addr, 106 .ndo_set_mac_address = eth_mac_addr, 107 .ndo_set_rx_mode = set_multicast, 108 .ndo_do_ioctl = rio_ioctl, 109 .ndo_tx_timeout = rio_tx_timeout, 110 .ndo_change_mtu = change_mtu, 111 }; 112 113 static int 114 rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) 115 { 116 struct net_device *dev; 117 struct netdev_private *np; 118 static int card_idx; 119 int chip_idx = ent->driver_data; 120 int err, irq; 121 void __iomem *ioaddr; 122 static int version_printed; 123 void *ring_space; 124 dma_addr_t ring_dma; 125 126 if (!version_printed++) 127 printk ("%s", version); 128 129 err = pci_enable_device (pdev); 130 if (err) 131 return err; 132 133 irq = pdev->irq; 134 err = pci_request_regions (pdev, "dl2k"); 135 if (err) 136 goto err_out_disable; 137 138 pci_set_master (pdev); 139 140 err = -ENOMEM; 141 142 dev = alloc_etherdev (sizeof (*np)); 143 if (!dev) 144 goto err_out_res; 145 SET_NETDEV_DEV(dev, &pdev->dev); 146 147 np = netdev_priv(dev); 148 149 /* IO registers range. */ 150 ioaddr = pci_iomap(pdev, 0, 0); 151 if (!ioaddr) 152 goto err_out_dev; 153 np->eeprom_addr = ioaddr; 154 155 #ifdef MEM_MAPPING 156 /* MM registers range. */ 157 ioaddr = pci_iomap(pdev, 1, 0); 158 if (!ioaddr) 159 goto err_out_iounmap; 160 #endif 161 np->ioaddr = ioaddr; 162 np->chip_id = chip_idx; 163 np->pdev = pdev; 164 spin_lock_init (&np->tx_lock); 165 spin_lock_init (&np->rx_lock); 166 167 /* Parse manual configuration */ 168 np->an_enable = 1; 169 np->tx_coalesce = 1; 170 if (card_idx < MAX_UNITS) { 171 if (media[card_idx] != NULL) { 172 np->an_enable = 0; 173 if (strcmp (media[card_idx], "auto") == 0 || 174 strcmp (media[card_idx], "autosense") == 0 || 175 strcmp (media[card_idx], "0") == 0 ) { 176 np->an_enable = 2; 177 } else if (strcmp (media[card_idx], "100mbps_fd") == 0 || 178 strcmp (media[card_idx], "4") == 0) { 179 np->speed = 100; 180 np->full_duplex = 1; 181 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || 182 strcmp (media[card_idx], "3") == 0) { 183 np->speed = 100; 184 np->full_duplex = 0; 185 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || 186 strcmp (media[card_idx], "2") == 0) { 187 np->speed = 10; 188 np->full_duplex = 1; 189 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || 190 strcmp (media[card_idx], "1") == 0) { 191 np->speed = 10; 192 np->full_duplex = 0; 193 } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 || 194 strcmp (media[card_idx], "6") == 0) { 195 np->speed=1000; 196 np->full_duplex=1; 197 } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 || 198 strcmp (media[card_idx], "5") == 0) { 199 np->speed = 1000; 200 np->full_duplex = 0; 201 } else { 202 np->an_enable = 1; 203 } 204 } 205 if (jumbo[card_idx] != 0) { 206 np->jumbo = 1; 207 dev->mtu = MAX_JUMBO; 208 } else { 209 np->jumbo = 0; 210 if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE) 211 dev->mtu = mtu[card_idx]; 212 } 213 np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ? 214 vlan[card_idx] : 0; 215 if (rx_coalesce > 0 && rx_timeout > 0) { 216 np->rx_coalesce = rx_coalesce; 217 np->rx_timeout = rx_timeout; 218 np->coalesce = 1; 219 } 220 np->tx_flow = (tx_flow == 0) ? 0 : 1; 221 np->rx_flow = (rx_flow == 0) ? 0 : 1; 222 223 if (tx_coalesce < 1) 224 tx_coalesce = 1; 225 else if (tx_coalesce > TX_RING_SIZE-1) 226 tx_coalesce = TX_RING_SIZE - 1; 227 } 228 dev->netdev_ops = &netdev_ops; 229 dev->watchdog_timeo = TX_TIMEOUT; 230 dev->ethtool_ops = ðtool_ops; 231 #if 0 232 dev->features = NETIF_F_IP_CSUM; 233 #endif 234 pci_set_drvdata (pdev, dev); 235 236 ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma); 237 if (!ring_space) 238 goto err_out_iounmap; 239 np->tx_ring = ring_space; 240 np->tx_ring_dma = ring_dma; 241 242 ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma); 243 if (!ring_space) 244 goto err_out_unmap_tx; 245 np->rx_ring = ring_space; 246 np->rx_ring_dma = ring_dma; 247 248 /* Parse eeprom data */ 249 parse_eeprom (dev); 250 251 /* Find PHY address */ 252 err = find_miiphy (dev); 253 if (err) 254 goto err_out_unmap_rx; 255 256 if (np->chip_id == CHIP_IP1000A && 257 (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) { 258 /* PHY magic taken from ipg driver, undocumented registers */ 259 mii_write(dev, np->phy_addr, 31, 0x0001); 260 mii_write(dev, np->phy_addr, 27, 0x01e0); 261 mii_write(dev, np->phy_addr, 31, 0x0002); 262 mii_write(dev, np->phy_addr, 27, 0xeb8e); 263 mii_write(dev, np->phy_addr, 31, 0x0000); 264 mii_write(dev, np->phy_addr, 30, 0x005e); 265 /* advertise 1000BASE-T half & full duplex, prefer MASTER */ 266 mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700); 267 } 268 269 /* Fiber device? */ 270 np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0; 271 np->link_status = 0; 272 /* Set media and reset PHY */ 273 if (np->phy_media) { 274 /* default Auto-Negotiation for fiber deivices */ 275 if (np->an_enable == 2) { 276 np->an_enable = 1; 277 } 278 mii_set_media_pcs (dev); 279 } else { 280 /* Auto-Negotiation is mandatory for 1000BASE-T, 281 IEEE 802.3ab Annex 28D page 14 */ 282 if (np->speed == 1000) 283 np->an_enable = 1; 284 mii_set_media (dev); 285 } 286 287 err = register_netdev (dev); 288 if (err) 289 goto err_out_unmap_rx; 290 291 card_idx++; 292 293 printk (KERN_INFO "%s: %s, %pM, IRQ %d\n", 294 dev->name, np->name, dev->dev_addr, irq); 295 if (tx_coalesce > 1) 296 printk(KERN_INFO "tx_coalesce:\t%d packets\n", 297 tx_coalesce); 298 if (np->coalesce) 299 printk(KERN_INFO 300 "rx_coalesce:\t%d packets\n" 301 "rx_timeout: \t%d ns\n", 302 np->rx_coalesce, np->rx_timeout*640); 303 if (np->vlan) 304 printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); 305 return 0; 306 307 err_out_unmap_rx: 308 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); 309 err_out_unmap_tx: 310 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); 311 err_out_iounmap: 312 #ifdef MEM_MAPPING 313 pci_iounmap(pdev, np->ioaddr); 314 #endif 315 pci_iounmap(pdev, np->eeprom_addr); 316 err_out_dev: 317 free_netdev (dev); 318 err_out_res: 319 pci_release_regions (pdev); 320 err_out_disable: 321 pci_disable_device (pdev); 322 return err; 323 } 324 325 static int 326 find_miiphy (struct net_device *dev) 327 { 328 struct netdev_private *np = netdev_priv(dev); 329 int i, phy_found = 0; 330 np = netdev_priv(dev); 331 np->phy_addr = 1; 332 333 for (i = 31; i >= 0; i--) { 334 int mii_status = mii_read (dev, i, 1); 335 if (mii_status != 0xffff && mii_status != 0x0000) { 336 np->phy_addr = i; 337 phy_found++; 338 } 339 } 340 if (!phy_found) { 341 printk (KERN_ERR "%s: No MII PHY found!\n", dev->name); 342 return -ENODEV; 343 } 344 return 0; 345 } 346 347 static int 348 parse_eeprom (struct net_device *dev) 349 { 350 struct netdev_private *np = netdev_priv(dev); 351 void __iomem *ioaddr = np->ioaddr; 352 int i, j; 353 u8 sromdata[256]; 354 u8 *psib; 355 u32 crc; 356 PSROM_t psrom = (PSROM_t) sromdata; 357 358 int cid, next; 359 360 for (i = 0; i < 128; i++) 361 ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i)); 362 363 if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */ 364 /* Check CRC */ 365 crc = ~ether_crc_le (256 - 4, sromdata); 366 if (psrom->crc != cpu_to_le32(crc)) { 367 printk (KERN_ERR "%s: EEPROM data CRC error.\n", 368 dev->name); 369 return -1; 370 } 371 } 372 373 /* Set MAC address */ 374 for (i = 0; i < 6; i++) 375 dev->dev_addr[i] = psrom->mac_addr[i]; 376 377 if (np->chip_id == CHIP_IP1000A) { 378 np->led_mode = psrom->led_mode; 379 return 0; 380 } 381 382 if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) { 383 return 0; 384 } 385 386 /* Parse Software Information Block */ 387 i = 0x30; 388 psib = (u8 *) sromdata; 389 do { 390 cid = psib[i++]; 391 next = psib[i++]; 392 if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) { 393 printk (KERN_ERR "Cell data error\n"); 394 return -1; 395 } 396 switch (cid) { 397 case 0: /* Format version */ 398 break; 399 case 1: /* End of cell */ 400 return 0; 401 case 2: /* Duplex Polarity */ 402 np->duplex_polarity = psib[i]; 403 dw8(PhyCtrl, dr8(PhyCtrl) | psib[i]); 404 break; 405 case 3: /* Wake Polarity */ 406 np->wake_polarity = psib[i]; 407 break; 408 case 9: /* Adapter description */ 409 j = (next - i > 255) ? 255 : next - i; 410 memcpy (np->name, &(psib[i]), j); 411 break; 412 case 4: 413 case 5: 414 case 6: 415 case 7: 416 case 8: /* Reversed */ 417 break; 418 default: /* Unknown cell */ 419 return -1; 420 } 421 i = next; 422 } while (1); 423 424 return 0; 425 } 426 427 static void rio_set_led_mode(struct net_device *dev) 428 { 429 struct netdev_private *np = netdev_priv(dev); 430 void __iomem *ioaddr = np->ioaddr; 431 u32 mode; 432 433 if (np->chip_id != CHIP_IP1000A) 434 return; 435 436 mode = dr32(ASICCtrl); 437 mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED); 438 439 if (np->led_mode & 0x01) 440 mode |= IPG_AC_LED_MODE; 441 if (np->led_mode & 0x02) 442 mode |= IPG_AC_LED_MODE_BIT_1; 443 if (np->led_mode & 0x08) 444 mode |= IPG_AC_LED_SPEED; 445 446 dw32(ASICCtrl, mode); 447 } 448 449 static int 450 rio_open (struct net_device *dev) 451 { 452 struct netdev_private *np = netdev_priv(dev); 453 void __iomem *ioaddr = np->ioaddr; 454 const int irq = np->pdev->irq; 455 int i; 456 u16 macctrl; 457 458 i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev); 459 if (i) 460 return i; 461 462 /* Reset all logic functions */ 463 dw16(ASICCtrl + 2, 464 GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset); 465 mdelay(10); 466 467 rio_set_led_mode(dev); 468 469 /* DebugCtrl bit 4, 5, 9 must set */ 470 dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230); 471 472 /* Jumbo frame */ 473 if (np->jumbo != 0) 474 dw16(MaxFrameSize, MAX_JUMBO+14); 475 476 alloc_list (dev); 477 478 /* Set station address */ 479 /* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works 480 * too. However, it doesn't work on IP1000A so we use 16-bit access. 481 */ 482 for (i = 0; i < 3; i++) 483 dw16(StationAddr0 + 2 * i, 484 cpu_to_le16(((u16 *)dev->dev_addr)[i])); 485 486 set_multicast (dev); 487 if (np->coalesce) { 488 dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16); 489 } 490 /* Set RIO to poll every N*320nsec. */ 491 dw8(RxDMAPollPeriod, 0x20); 492 dw8(TxDMAPollPeriod, 0xff); 493 dw8(RxDMABurstThresh, 0x30); 494 dw8(RxDMAUrgentThresh, 0x30); 495 dw32(RmonStatMask, 0x0007ffff); 496 /* clear statistics */ 497 clear_stats (dev); 498 499 /* VLAN supported */ 500 if (np->vlan) { 501 /* priority field in RxDMAIntCtrl */ 502 dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10); 503 /* VLANId */ 504 dw16(VLANId, np->vlan); 505 /* Length/Type should be 0x8100 */ 506 dw32(VLANTag, 0x8100 << 16 | np->vlan); 507 /* Enable AutoVLANuntagging, but disable AutoVLANtagging. 508 VLAN information tagged by TFC' VID, CFI fields. */ 509 dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging); 510 } 511 512 setup_timer(&np->timer, rio_timer, (unsigned long)dev); 513 np->timer.expires = jiffies + 1*HZ; 514 add_timer (&np->timer); 515 516 /* Start Tx/Rx */ 517 dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable); 518 519 macctrl = 0; 520 macctrl |= (np->vlan) ? AutoVLANuntagging : 0; 521 macctrl |= (np->full_duplex) ? DuplexSelect : 0; 522 macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; 523 macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; 524 dw16(MACCtrl, macctrl); 525 526 netif_start_queue (dev); 527 528 dl2k_enable_int(np); 529 return 0; 530 } 531 532 static void 533 rio_timer (unsigned long data) 534 { 535 struct net_device *dev = (struct net_device *)data; 536 struct netdev_private *np = netdev_priv(dev); 537 unsigned int entry; 538 int next_tick = 1*HZ; 539 unsigned long flags; 540 541 spin_lock_irqsave(&np->rx_lock, flags); 542 /* Recover rx ring exhausted error */ 543 if (np->cur_rx - np->old_rx >= RX_RING_SIZE) { 544 printk(KERN_INFO "Try to recover rx ring exhausted...\n"); 545 /* Re-allocate skbuffs to fill the descriptor ring */ 546 for (; np->cur_rx - np->old_rx > 0; np->old_rx++) { 547 struct sk_buff *skb; 548 entry = np->old_rx % RX_RING_SIZE; 549 /* Dropped packets don't need to re-allocate */ 550 if (np->rx_skbuff[entry] == NULL) { 551 skb = netdev_alloc_skb_ip_align(dev, 552 np->rx_buf_sz); 553 if (skb == NULL) { 554 np->rx_ring[entry].fraginfo = 0; 555 printk (KERN_INFO 556 "%s: Still unable to re-allocate Rx skbuff.#%d\n", 557 dev->name, entry); 558 break; 559 } 560 np->rx_skbuff[entry] = skb; 561 np->rx_ring[entry].fraginfo = 562 cpu_to_le64 (pci_map_single 563 (np->pdev, skb->data, np->rx_buf_sz, 564 PCI_DMA_FROMDEVICE)); 565 } 566 np->rx_ring[entry].fraginfo |= 567 cpu_to_le64((u64)np->rx_buf_sz << 48); 568 np->rx_ring[entry].status = 0; 569 } /* end for */ 570 } /* end if */ 571 spin_unlock_irqrestore (&np->rx_lock, flags); 572 np->timer.expires = jiffies + next_tick; 573 add_timer(&np->timer); 574 } 575 576 static void 577 rio_tx_timeout (struct net_device *dev) 578 { 579 struct netdev_private *np = netdev_priv(dev); 580 void __iomem *ioaddr = np->ioaddr; 581 582 printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n", 583 dev->name, dr32(TxStatus)); 584 rio_free_tx(dev, 0); 585 dev->if_port = 0; 586 dev->trans_start = jiffies; /* prevent tx timeout */ 587 } 588 589 /* allocate and initialize Tx and Rx descriptors */ 590 static void 591 alloc_list (struct net_device *dev) 592 { 593 struct netdev_private *np = netdev_priv(dev); 594 void __iomem *ioaddr = np->ioaddr; 595 int i; 596 597 np->cur_rx = np->cur_tx = 0; 598 np->old_rx = np->old_tx = 0; 599 np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32); 600 601 /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */ 602 for (i = 0; i < TX_RING_SIZE; i++) { 603 np->tx_skbuff[i] = NULL; 604 np->tx_ring[i].status = cpu_to_le64 (TFDDone); 605 np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma + 606 ((i+1)%TX_RING_SIZE) * 607 sizeof (struct netdev_desc)); 608 } 609 610 /* Initialize Rx descriptors */ 611 for (i = 0; i < RX_RING_SIZE; i++) { 612 np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma + 613 ((i + 1) % RX_RING_SIZE) * 614 sizeof (struct netdev_desc)); 615 np->rx_ring[i].status = 0; 616 np->rx_ring[i].fraginfo = 0; 617 np->rx_skbuff[i] = NULL; 618 } 619 620 /* Allocate the rx buffers */ 621 for (i = 0; i < RX_RING_SIZE; i++) { 622 /* Allocated fixed size of skbuff */ 623 struct sk_buff *skb; 624 625 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); 626 np->rx_skbuff[i] = skb; 627 if (skb == NULL) 628 break; 629 630 /* Rubicon now supports 40 bits of addressing space. */ 631 np->rx_ring[i].fraginfo = 632 cpu_to_le64 ( pci_map_single ( 633 np->pdev, skb->data, np->rx_buf_sz, 634 PCI_DMA_FROMDEVICE)); 635 np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); 636 } 637 638 /* Set RFDListPtr */ 639 dw32(RFDListPtr0, np->rx_ring_dma); 640 dw32(RFDListPtr1, 0); 641 } 642 643 static netdev_tx_t 644 start_xmit (struct sk_buff *skb, struct net_device *dev) 645 { 646 struct netdev_private *np = netdev_priv(dev); 647 void __iomem *ioaddr = np->ioaddr; 648 struct netdev_desc *txdesc; 649 unsigned entry; 650 u64 tfc_vlan_tag = 0; 651 652 if (np->link_status == 0) { /* Link Down */ 653 dev_kfree_skb(skb); 654 return NETDEV_TX_OK; 655 } 656 entry = np->cur_tx % TX_RING_SIZE; 657 np->tx_skbuff[entry] = skb; 658 txdesc = &np->tx_ring[entry]; 659 660 #if 0 661 if (skb->ip_summed == CHECKSUM_PARTIAL) { 662 txdesc->status |= 663 cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable | 664 IPChecksumEnable); 665 } 666 #endif 667 if (np->vlan) { 668 tfc_vlan_tag = VLANTagInsert | 669 ((u64)np->vlan << 32) | 670 ((u64)skb->priority << 45); 671 } 672 txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, 673 skb->len, 674 PCI_DMA_TODEVICE)); 675 txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48); 676 677 /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode 678 * Work around: Always use 1 descriptor in 10Mbps mode */ 679 if (entry % np->tx_coalesce == 0 || np->speed == 10) 680 txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | 681 WordAlignDisable | 682 TxDMAIndicate | 683 (1 << FragCountShift)); 684 else 685 txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | 686 WordAlignDisable | 687 (1 << FragCountShift)); 688 689 /* TxDMAPollNow */ 690 dw32(DMACtrl, dr32(DMACtrl) | 0x00001000); 691 /* Schedule ISR */ 692 dw32(CountDown, 10000); 693 np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE; 694 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE 695 < TX_QUEUE_LEN - 1 && np->speed != 10) { 696 /* do nothing */ 697 } else if (!netif_queue_stopped(dev)) { 698 netif_stop_queue (dev); 699 } 700 701 /* The first TFDListPtr */ 702 if (!dr32(TFDListPtr0)) { 703 dw32(TFDListPtr0, np->tx_ring_dma + 704 entry * sizeof (struct netdev_desc)); 705 dw32(TFDListPtr1, 0); 706 } 707 708 return NETDEV_TX_OK; 709 } 710 711 static irqreturn_t 712 rio_interrupt (int irq, void *dev_instance) 713 { 714 struct net_device *dev = dev_instance; 715 struct netdev_private *np = netdev_priv(dev); 716 void __iomem *ioaddr = np->ioaddr; 717 unsigned int_status; 718 int cnt = max_intrloop; 719 int handled = 0; 720 721 while (1) { 722 int_status = dr16(IntStatus); 723 dw16(IntStatus, int_status); 724 int_status &= DEFAULT_INTR; 725 if (int_status == 0 || --cnt < 0) 726 break; 727 handled = 1; 728 /* Processing received packets */ 729 if (int_status & RxDMAComplete) 730 receive_packet (dev); 731 /* TxDMAComplete interrupt */ 732 if ((int_status & (TxDMAComplete|IntRequested))) { 733 int tx_status; 734 tx_status = dr32(TxStatus); 735 if (tx_status & 0x01) 736 tx_error (dev, tx_status); 737 /* Free used tx skbuffs */ 738 rio_free_tx (dev, 1); 739 } 740 741 /* Handle uncommon events */ 742 if (int_status & 743 (HostError | LinkEvent | UpdateStats)) 744 rio_error (dev, int_status); 745 } 746 if (np->cur_tx != np->old_tx) 747 dw32(CountDown, 100); 748 return IRQ_RETVAL(handled); 749 } 750 751 static inline dma_addr_t desc_to_dma(struct netdev_desc *desc) 752 { 753 return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48); 754 } 755 756 static void 757 rio_free_tx (struct net_device *dev, int irq) 758 { 759 struct netdev_private *np = netdev_priv(dev); 760 int entry = np->old_tx % TX_RING_SIZE; 761 int tx_use = 0; 762 unsigned long flag = 0; 763 764 if (irq) 765 spin_lock(&np->tx_lock); 766 else 767 spin_lock_irqsave(&np->tx_lock, flag); 768 769 /* Free used tx skbuffs */ 770 while (entry != np->cur_tx) { 771 struct sk_buff *skb; 772 773 if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone))) 774 break; 775 skb = np->tx_skbuff[entry]; 776 pci_unmap_single (np->pdev, 777 desc_to_dma(&np->tx_ring[entry]), 778 skb->len, PCI_DMA_TODEVICE); 779 if (irq) 780 dev_kfree_skb_irq (skb); 781 else 782 dev_kfree_skb (skb); 783 784 np->tx_skbuff[entry] = NULL; 785 entry = (entry + 1) % TX_RING_SIZE; 786 tx_use++; 787 } 788 if (irq) 789 spin_unlock(&np->tx_lock); 790 else 791 spin_unlock_irqrestore(&np->tx_lock, flag); 792 np->old_tx = entry; 793 794 /* If the ring is no longer full, clear tx_full and 795 call netif_wake_queue() */ 796 797 if (netif_queue_stopped(dev) && 798 ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE 799 < TX_QUEUE_LEN - 1 || np->speed == 10)) { 800 netif_wake_queue (dev); 801 } 802 } 803 804 static void 805 tx_error (struct net_device *dev, int tx_status) 806 { 807 struct netdev_private *np = netdev_priv(dev); 808 void __iomem *ioaddr = np->ioaddr; 809 int frame_id; 810 int i; 811 812 frame_id = (tx_status & 0xffff0000); 813 printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", 814 dev->name, tx_status, frame_id); 815 np->stats.tx_errors++; 816 /* Ttransmit Underrun */ 817 if (tx_status & 0x10) { 818 np->stats.tx_fifo_errors++; 819 dw16(TxStartThresh, dr16(TxStartThresh) + 0x10); 820 /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */ 821 dw16(ASICCtrl + 2, 822 TxReset | DMAReset | FIFOReset | NetworkReset); 823 /* Wait for ResetBusy bit clear */ 824 for (i = 50; i > 0; i--) { 825 if (!(dr16(ASICCtrl + 2) & ResetBusy)) 826 break; 827 mdelay (1); 828 } 829 rio_set_led_mode(dev); 830 rio_free_tx (dev, 1); 831 /* Reset TFDListPtr */ 832 dw32(TFDListPtr0, np->tx_ring_dma + 833 np->old_tx * sizeof (struct netdev_desc)); 834 dw32(TFDListPtr1, 0); 835 836 /* Let TxStartThresh stay default value */ 837 } 838 /* Late Collision */ 839 if (tx_status & 0x04) { 840 np->stats.tx_fifo_errors++; 841 /* TxReset and clear FIFO */ 842 dw16(ASICCtrl + 2, TxReset | FIFOReset); 843 /* Wait reset done */ 844 for (i = 50; i > 0; i--) { 845 if (!(dr16(ASICCtrl + 2) & ResetBusy)) 846 break; 847 mdelay (1); 848 } 849 rio_set_led_mode(dev); 850 /* Let TxStartThresh stay default value */ 851 } 852 /* Maximum Collisions */ 853 #ifdef ETHER_STATS 854 if (tx_status & 0x08) 855 np->stats.collisions16++; 856 #else 857 if (tx_status & 0x08) 858 np->stats.collisions++; 859 #endif 860 /* Restart the Tx */ 861 dw32(MACCtrl, dr16(MACCtrl) | TxEnable); 862 } 863 864 static int 865 receive_packet (struct net_device *dev) 866 { 867 struct netdev_private *np = netdev_priv(dev); 868 int entry = np->cur_rx % RX_RING_SIZE; 869 int cnt = 30; 870 871 /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */ 872 while (1) { 873 struct netdev_desc *desc = &np->rx_ring[entry]; 874 int pkt_len; 875 u64 frame_status; 876 877 if (!(desc->status & cpu_to_le64(RFDDone)) || 878 !(desc->status & cpu_to_le64(FrameStart)) || 879 !(desc->status & cpu_to_le64(FrameEnd))) 880 break; 881 882 /* Chip omits the CRC. */ 883 frame_status = le64_to_cpu(desc->status); 884 pkt_len = frame_status & 0xffff; 885 if (--cnt < 0) 886 break; 887 /* Update rx error statistics, drop packet. */ 888 if (frame_status & RFS_Errors) { 889 np->stats.rx_errors++; 890 if (frame_status & (RxRuntFrame | RxLengthError)) 891 np->stats.rx_length_errors++; 892 if (frame_status & RxFCSError) 893 np->stats.rx_crc_errors++; 894 if (frame_status & RxAlignmentError && np->speed != 1000) 895 np->stats.rx_frame_errors++; 896 if (frame_status & RxFIFOOverrun) 897 np->stats.rx_fifo_errors++; 898 } else { 899 struct sk_buff *skb; 900 901 /* Small skbuffs for short packets */ 902 if (pkt_len > copy_thresh) { 903 pci_unmap_single (np->pdev, 904 desc_to_dma(desc), 905 np->rx_buf_sz, 906 PCI_DMA_FROMDEVICE); 907 skb_put (skb = np->rx_skbuff[entry], pkt_len); 908 np->rx_skbuff[entry] = NULL; 909 } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) { 910 pci_dma_sync_single_for_cpu(np->pdev, 911 desc_to_dma(desc), 912 np->rx_buf_sz, 913 PCI_DMA_FROMDEVICE); 914 skb_copy_to_linear_data (skb, 915 np->rx_skbuff[entry]->data, 916 pkt_len); 917 skb_put (skb, pkt_len); 918 pci_dma_sync_single_for_device(np->pdev, 919 desc_to_dma(desc), 920 np->rx_buf_sz, 921 PCI_DMA_FROMDEVICE); 922 } 923 skb->protocol = eth_type_trans (skb, dev); 924 #if 0 925 /* Checksum done by hw, but csum value unavailable. */ 926 if (np->pdev->pci_rev_id >= 0x0c && 927 !(frame_status & (TCPError | UDPError | IPError))) { 928 skb->ip_summed = CHECKSUM_UNNECESSARY; 929 } 930 #endif 931 netif_rx (skb); 932 } 933 entry = (entry + 1) % RX_RING_SIZE; 934 } 935 spin_lock(&np->rx_lock); 936 np->cur_rx = entry; 937 /* Re-allocate skbuffs to fill the descriptor ring */ 938 entry = np->old_rx; 939 while (entry != np->cur_rx) { 940 struct sk_buff *skb; 941 /* Dropped packets don't need to re-allocate */ 942 if (np->rx_skbuff[entry] == NULL) { 943 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); 944 if (skb == NULL) { 945 np->rx_ring[entry].fraginfo = 0; 946 printk (KERN_INFO 947 "%s: receive_packet: " 948 "Unable to re-allocate Rx skbuff.#%d\n", 949 dev->name, entry); 950 break; 951 } 952 np->rx_skbuff[entry] = skb; 953 np->rx_ring[entry].fraginfo = 954 cpu_to_le64 (pci_map_single 955 (np->pdev, skb->data, np->rx_buf_sz, 956 PCI_DMA_FROMDEVICE)); 957 } 958 np->rx_ring[entry].fraginfo |= 959 cpu_to_le64((u64)np->rx_buf_sz << 48); 960 np->rx_ring[entry].status = 0; 961 entry = (entry + 1) % RX_RING_SIZE; 962 } 963 np->old_rx = entry; 964 spin_unlock(&np->rx_lock); 965 return 0; 966 } 967 968 static void 969 rio_error (struct net_device *dev, int int_status) 970 { 971 struct netdev_private *np = netdev_priv(dev); 972 void __iomem *ioaddr = np->ioaddr; 973 u16 macctrl; 974 975 /* Link change event */ 976 if (int_status & LinkEvent) { 977 if (mii_wait_link (dev, 10) == 0) { 978 printk (KERN_INFO "%s: Link up\n", dev->name); 979 if (np->phy_media) 980 mii_get_media_pcs (dev); 981 else 982 mii_get_media (dev); 983 if (np->speed == 1000) 984 np->tx_coalesce = tx_coalesce; 985 else 986 np->tx_coalesce = 1; 987 macctrl = 0; 988 macctrl |= (np->vlan) ? AutoVLANuntagging : 0; 989 macctrl |= (np->full_duplex) ? DuplexSelect : 0; 990 macctrl |= (np->tx_flow) ? 991 TxFlowControlEnable : 0; 992 macctrl |= (np->rx_flow) ? 993 RxFlowControlEnable : 0; 994 dw16(MACCtrl, macctrl); 995 np->link_status = 1; 996 netif_carrier_on(dev); 997 } else { 998 printk (KERN_INFO "%s: Link off\n", dev->name); 999 np->link_status = 0; 1000 netif_carrier_off(dev); 1001 } 1002 } 1003 1004 /* UpdateStats statistics registers */ 1005 if (int_status & UpdateStats) { 1006 get_stats (dev); 1007 } 1008 1009 /* PCI Error, a catastronphic error related to the bus interface 1010 occurs, set GlobalReset and HostReset to reset. */ 1011 if (int_status & HostError) { 1012 printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n", 1013 dev->name, int_status); 1014 dw16(ASICCtrl + 2, GlobalReset | HostReset); 1015 mdelay (500); 1016 rio_set_led_mode(dev); 1017 } 1018 } 1019 1020 static struct net_device_stats * 1021 get_stats (struct net_device *dev) 1022 { 1023 struct netdev_private *np = netdev_priv(dev); 1024 void __iomem *ioaddr = np->ioaddr; 1025 #ifdef MEM_MAPPING 1026 int i; 1027 #endif 1028 unsigned int stat_reg; 1029 1030 /* All statistics registers need to be acknowledged, 1031 else statistic overflow could cause problems */ 1032 1033 np->stats.rx_packets += dr32(FramesRcvOk); 1034 np->stats.tx_packets += dr32(FramesXmtOk); 1035 np->stats.rx_bytes += dr32(OctetRcvOk); 1036 np->stats.tx_bytes += dr32(OctetXmtOk); 1037 1038 np->stats.multicast = dr32(McstFramesRcvdOk); 1039 np->stats.collisions += dr32(SingleColFrames) 1040 + dr32(MultiColFrames); 1041 1042 /* detailed tx errors */ 1043 stat_reg = dr16(FramesAbortXSColls); 1044 np->stats.tx_aborted_errors += stat_reg; 1045 np->stats.tx_errors += stat_reg; 1046 1047 stat_reg = dr16(CarrierSenseErrors); 1048 np->stats.tx_carrier_errors += stat_reg; 1049 np->stats.tx_errors += stat_reg; 1050 1051 /* Clear all other statistic register. */ 1052 dr32(McstOctetXmtOk); 1053 dr16(BcstFramesXmtdOk); 1054 dr32(McstFramesXmtdOk); 1055 dr16(BcstFramesRcvdOk); 1056 dr16(MacControlFramesRcvd); 1057 dr16(FrameTooLongErrors); 1058 dr16(InRangeLengthErrors); 1059 dr16(FramesCheckSeqErrors); 1060 dr16(FramesLostRxErrors); 1061 dr32(McstOctetXmtOk); 1062 dr32(BcstOctetXmtOk); 1063 dr32(McstFramesXmtdOk); 1064 dr32(FramesWDeferredXmt); 1065 dr32(LateCollisions); 1066 dr16(BcstFramesXmtdOk); 1067 dr16(MacControlFramesXmtd); 1068 dr16(FramesWEXDeferal); 1069 1070 #ifdef MEM_MAPPING 1071 for (i = 0x100; i <= 0x150; i += 4) 1072 dr32(i); 1073 #endif 1074 dr16(TxJumboFrames); 1075 dr16(RxJumboFrames); 1076 dr16(TCPCheckSumErrors); 1077 dr16(UDPCheckSumErrors); 1078 dr16(IPCheckSumErrors); 1079 return &np->stats; 1080 } 1081 1082 static int 1083 clear_stats (struct net_device *dev) 1084 { 1085 struct netdev_private *np = netdev_priv(dev); 1086 void __iomem *ioaddr = np->ioaddr; 1087 #ifdef MEM_MAPPING 1088 int i; 1089 #endif 1090 1091 /* All statistics registers need to be acknowledged, 1092 else statistic overflow could cause problems */ 1093 dr32(FramesRcvOk); 1094 dr32(FramesXmtOk); 1095 dr32(OctetRcvOk); 1096 dr32(OctetXmtOk); 1097 1098 dr32(McstFramesRcvdOk); 1099 dr32(SingleColFrames); 1100 dr32(MultiColFrames); 1101 dr32(LateCollisions); 1102 /* detailed rx errors */ 1103 dr16(FrameTooLongErrors); 1104 dr16(InRangeLengthErrors); 1105 dr16(FramesCheckSeqErrors); 1106 dr16(FramesLostRxErrors); 1107 1108 /* detailed tx errors */ 1109 dr16(FramesAbortXSColls); 1110 dr16(CarrierSenseErrors); 1111 1112 /* Clear all other statistic register. */ 1113 dr32(McstOctetXmtOk); 1114 dr16(BcstFramesXmtdOk); 1115 dr32(McstFramesXmtdOk); 1116 dr16(BcstFramesRcvdOk); 1117 dr16(MacControlFramesRcvd); 1118 dr32(McstOctetXmtOk); 1119 dr32(BcstOctetXmtOk); 1120 dr32(McstFramesXmtdOk); 1121 dr32(FramesWDeferredXmt); 1122 dr16(BcstFramesXmtdOk); 1123 dr16(MacControlFramesXmtd); 1124 dr16(FramesWEXDeferal); 1125 #ifdef MEM_MAPPING 1126 for (i = 0x100; i <= 0x150; i += 4) 1127 dr32(i); 1128 #endif 1129 dr16(TxJumboFrames); 1130 dr16(RxJumboFrames); 1131 dr16(TCPCheckSumErrors); 1132 dr16(UDPCheckSumErrors); 1133 dr16(IPCheckSumErrors); 1134 return 0; 1135 } 1136 1137 1138 static int 1139 change_mtu (struct net_device *dev, int new_mtu) 1140 { 1141 struct netdev_private *np = netdev_priv(dev); 1142 int max = (np->jumbo) ? MAX_JUMBO : 1536; 1143 1144 if ((new_mtu < 68) || (new_mtu > max)) { 1145 return -EINVAL; 1146 } 1147 1148 dev->mtu = new_mtu; 1149 1150 return 0; 1151 } 1152 1153 static void 1154 set_multicast (struct net_device *dev) 1155 { 1156 struct netdev_private *np = netdev_priv(dev); 1157 void __iomem *ioaddr = np->ioaddr; 1158 u32 hash_table[2]; 1159 u16 rx_mode = 0; 1160 1161 hash_table[0] = hash_table[1] = 0; 1162 /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */ 1163 hash_table[1] |= 0x02000000; 1164 if (dev->flags & IFF_PROMISC) { 1165 /* Receive all frames promiscuously. */ 1166 rx_mode = ReceiveAllFrames; 1167 } else if ((dev->flags & IFF_ALLMULTI) || 1168 (netdev_mc_count(dev) > multicast_filter_limit)) { 1169 /* Receive broadcast and multicast frames */ 1170 rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast; 1171 } else if (!netdev_mc_empty(dev)) { 1172 struct netdev_hw_addr *ha; 1173 /* Receive broadcast frames and multicast frames filtering 1174 by Hashtable */ 1175 rx_mode = 1176 ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast; 1177 netdev_for_each_mc_addr(ha, dev) { 1178 int bit, index = 0; 1179 int crc = ether_crc_le(ETH_ALEN, ha->addr); 1180 /* The inverted high significant 6 bits of CRC are 1181 used as an index to hashtable */ 1182 for (bit = 0; bit < 6; bit++) 1183 if (crc & (1 << (31 - bit))) 1184 index |= (1 << bit); 1185 hash_table[index / 32] |= (1 << (index % 32)); 1186 } 1187 } else { 1188 rx_mode = ReceiveBroadcast | ReceiveUnicast; 1189 } 1190 if (np->vlan) { 1191 /* ReceiveVLANMatch field in ReceiveMode */ 1192 rx_mode |= ReceiveVLANMatch; 1193 } 1194 1195 dw32(HashTable0, hash_table[0]); 1196 dw32(HashTable1, hash_table[1]); 1197 dw16(ReceiveMode, rx_mode); 1198 } 1199 1200 static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1201 { 1202 struct netdev_private *np = netdev_priv(dev); 1203 1204 strlcpy(info->driver, "dl2k", sizeof(info->driver)); 1205 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1206 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); 1207 } 1208 1209 static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1210 { 1211 struct netdev_private *np = netdev_priv(dev); 1212 if (np->phy_media) { 1213 /* fiber device */ 1214 cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE; 1215 cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE; 1216 cmd->port = PORT_FIBRE; 1217 cmd->transceiver = XCVR_INTERNAL; 1218 } else { 1219 /* copper device */ 1220 cmd->supported = SUPPORTED_10baseT_Half | 1221 SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half 1222 | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | 1223 SUPPORTED_Autoneg | SUPPORTED_MII; 1224 cmd->advertising = ADVERTISED_10baseT_Half | 1225 ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | 1226 ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full| 1227 ADVERTISED_Autoneg | ADVERTISED_MII; 1228 cmd->port = PORT_MII; 1229 cmd->transceiver = XCVR_INTERNAL; 1230 } 1231 if ( np->link_status ) { 1232 ethtool_cmd_speed_set(cmd, np->speed); 1233 cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 1234 } else { 1235 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); 1236 cmd->duplex = DUPLEX_UNKNOWN; 1237 } 1238 if ( np->an_enable) 1239 cmd->autoneg = AUTONEG_ENABLE; 1240 else 1241 cmd->autoneg = AUTONEG_DISABLE; 1242 1243 cmd->phy_address = np->phy_addr; 1244 return 0; 1245 } 1246 1247 static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1248 { 1249 struct netdev_private *np = netdev_priv(dev); 1250 netif_carrier_off(dev); 1251 if (cmd->autoneg == AUTONEG_ENABLE) { 1252 if (np->an_enable) 1253 return 0; 1254 else { 1255 np->an_enable = 1; 1256 mii_set_media(dev); 1257 return 0; 1258 } 1259 } else { 1260 np->an_enable = 0; 1261 if (np->speed == 1000) { 1262 ethtool_cmd_speed_set(cmd, SPEED_100); 1263 cmd->duplex = DUPLEX_FULL; 1264 printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n"); 1265 } 1266 switch (ethtool_cmd_speed(cmd)) { 1267 case SPEED_10: 1268 np->speed = 10; 1269 np->full_duplex = (cmd->duplex == DUPLEX_FULL); 1270 break; 1271 case SPEED_100: 1272 np->speed = 100; 1273 np->full_duplex = (cmd->duplex == DUPLEX_FULL); 1274 break; 1275 case SPEED_1000: /* not supported */ 1276 default: 1277 return -EINVAL; 1278 } 1279 mii_set_media(dev); 1280 } 1281 return 0; 1282 } 1283 1284 static u32 rio_get_link(struct net_device *dev) 1285 { 1286 struct netdev_private *np = netdev_priv(dev); 1287 return np->link_status; 1288 } 1289 1290 static const struct ethtool_ops ethtool_ops = { 1291 .get_drvinfo = rio_get_drvinfo, 1292 .get_settings = rio_get_settings, 1293 .set_settings = rio_set_settings, 1294 .get_link = rio_get_link, 1295 }; 1296 1297 static int 1298 rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) 1299 { 1300 int phy_addr; 1301 struct netdev_private *np = netdev_priv(dev); 1302 struct mii_ioctl_data *miidata = if_mii(rq); 1303 1304 phy_addr = np->phy_addr; 1305 switch (cmd) { 1306 case SIOCGMIIPHY: 1307 miidata->phy_id = phy_addr; 1308 break; 1309 case SIOCGMIIREG: 1310 miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num); 1311 break; 1312 case SIOCSMIIREG: 1313 if (!capable(CAP_NET_ADMIN)) 1314 return -EPERM; 1315 mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in); 1316 break; 1317 default: 1318 return -EOPNOTSUPP; 1319 } 1320 return 0; 1321 } 1322 1323 #define EEP_READ 0x0200 1324 #define EEP_BUSY 0x8000 1325 /* Read the EEPROM word */ 1326 /* We use I/O instruction to read/write eeprom to avoid fail on some machines */ 1327 static int read_eeprom(struct netdev_private *np, int eep_addr) 1328 { 1329 void __iomem *ioaddr = np->eeprom_addr; 1330 int i = 1000; 1331 1332 dw16(EepromCtrl, EEP_READ | (eep_addr & 0xff)); 1333 while (i-- > 0) { 1334 if (!(dr16(EepromCtrl) & EEP_BUSY)) 1335 return dr16(EepromData); 1336 } 1337 return 0; 1338 } 1339 1340 enum phy_ctrl_bits { 1341 MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04, 1342 MII_DUPLEX = 0x08, 1343 }; 1344 1345 #define mii_delay() dr8(PhyCtrl) 1346 static void 1347 mii_sendbit (struct net_device *dev, u32 data) 1348 { 1349 struct netdev_private *np = netdev_priv(dev); 1350 void __iomem *ioaddr = np->ioaddr; 1351 1352 data = ((data) ? MII_DATA1 : 0) | (dr8(PhyCtrl) & 0xf8) | MII_WRITE; 1353 dw8(PhyCtrl, data); 1354 mii_delay (); 1355 dw8(PhyCtrl, data | MII_CLK); 1356 mii_delay (); 1357 } 1358 1359 static int 1360 mii_getbit (struct net_device *dev) 1361 { 1362 struct netdev_private *np = netdev_priv(dev); 1363 void __iomem *ioaddr = np->ioaddr; 1364 u8 data; 1365 1366 data = (dr8(PhyCtrl) & 0xf8) | MII_READ; 1367 dw8(PhyCtrl, data); 1368 mii_delay (); 1369 dw8(PhyCtrl, data | MII_CLK); 1370 mii_delay (); 1371 return (dr8(PhyCtrl) >> 1) & 1; 1372 } 1373 1374 static void 1375 mii_send_bits (struct net_device *dev, u32 data, int len) 1376 { 1377 int i; 1378 1379 for (i = len - 1; i >= 0; i--) { 1380 mii_sendbit (dev, data & (1 << i)); 1381 } 1382 } 1383 1384 static int 1385 mii_read (struct net_device *dev, int phy_addr, int reg_num) 1386 { 1387 u32 cmd; 1388 int i; 1389 u32 retval = 0; 1390 1391 /* Preamble */ 1392 mii_send_bits (dev, 0xffffffff, 32); 1393 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ 1394 /* ST,OP = 0110'b for read operation */ 1395 cmd = (0x06 << 10 | phy_addr << 5 | reg_num); 1396 mii_send_bits (dev, cmd, 14); 1397 /* Turnaround */ 1398 if (mii_getbit (dev)) 1399 goto err_out; 1400 /* Read data */ 1401 for (i = 0; i < 16; i++) { 1402 retval |= mii_getbit (dev); 1403 retval <<= 1; 1404 } 1405 /* End cycle */ 1406 mii_getbit (dev); 1407 return (retval >> 1) & 0xffff; 1408 1409 err_out: 1410 return 0; 1411 } 1412 static int 1413 mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data) 1414 { 1415 u32 cmd; 1416 1417 /* Preamble */ 1418 mii_send_bits (dev, 0xffffffff, 32); 1419 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ 1420 /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */ 1421 cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data; 1422 mii_send_bits (dev, cmd, 32); 1423 /* End cycle */ 1424 mii_getbit (dev); 1425 return 0; 1426 } 1427 static int 1428 mii_wait_link (struct net_device *dev, int wait) 1429 { 1430 __u16 bmsr; 1431 int phy_addr; 1432 struct netdev_private *np; 1433 1434 np = netdev_priv(dev); 1435 phy_addr = np->phy_addr; 1436 1437 do { 1438 bmsr = mii_read (dev, phy_addr, MII_BMSR); 1439 if (bmsr & BMSR_LSTATUS) 1440 return 0; 1441 mdelay (1); 1442 } while (--wait > 0); 1443 return -1; 1444 } 1445 static int 1446 mii_get_media (struct net_device *dev) 1447 { 1448 __u16 negotiate; 1449 __u16 bmsr; 1450 __u16 mscr; 1451 __u16 mssr; 1452 int phy_addr; 1453 struct netdev_private *np; 1454 1455 np = netdev_priv(dev); 1456 phy_addr = np->phy_addr; 1457 1458 bmsr = mii_read (dev, phy_addr, MII_BMSR); 1459 if (np->an_enable) { 1460 if (!(bmsr & BMSR_ANEGCOMPLETE)) { 1461 /* Auto-Negotiation not completed */ 1462 return -1; 1463 } 1464 negotiate = mii_read (dev, phy_addr, MII_ADVERTISE) & 1465 mii_read (dev, phy_addr, MII_LPA); 1466 mscr = mii_read (dev, phy_addr, MII_CTRL1000); 1467 mssr = mii_read (dev, phy_addr, MII_STAT1000); 1468 if (mscr & ADVERTISE_1000FULL && mssr & LPA_1000FULL) { 1469 np->speed = 1000; 1470 np->full_duplex = 1; 1471 printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); 1472 } else if (mscr & ADVERTISE_1000HALF && mssr & LPA_1000HALF) { 1473 np->speed = 1000; 1474 np->full_duplex = 0; 1475 printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n"); 1476 } else if (negotiate & ADVERTISE_100FULL) { 1477 np->speed = 100; 1478 np->full_duplex = 1; 1479 printk (KERN_INFO "Auto 100 Mbps, Full duplex\n"); 1480 } else if (negotiate & ADVERTISE_100HALF) { 1481 np->speed = 100; 1482 np->full_duplex = 0; 1483 printk (KERN_INFO "Auto 100 Mbps, Half duplex\n"); 1484 } else if (negotiate & ADVERTISE_10FULL) { 1485 np->speed = 10; 1486 np->full_duplex = 1; 1487 printk (KERN_INFO "Auto 10 Mbps, Full duplex\n"); 1488 } else if (negotiate & ADVERTISE_10HALF) { 1489 np->speed = 10; 1490 np->full_duplex = 0; 1491 printk (KERN_INFO "Auto 10 Mbps, Half duplex\n"); 1492 } 1493 if (negotiate & ADVERTISE_PAUSE_CAP) { 1494 np->tx_flow &= 1; 1495 np->rx_flow &= 1; 1496 } else if (negotiate & ADVERTISE_PAUSE_ASYM) { 1497 np->tx_flow = 0; 1498 np->rx_flow &= 1; 1499 } 1500 /* else tx_flow, rx_flow = user select */ 1501 } else { 1502 __u16 bmcr = mii_read (dev, phy_addr, MII_BMCR); 1503 switch (bmcr & (BMCR_SPEED100 | BMCR_SPEED1000)) { 1504 case BMCR_SPEED1000: 1505 printk (KERN_INFO "Operating at 1000 Mbps, "); 1506 break; 1507 case BMCR_SPEED100: 1508 printk (KERN_INFO "Operating at 100 Mbps, "); 1509 break; 1510 case 0: 1511 printk (KERN_INFO "Operating at 10 Mbps, "); 1512 } 1513 if (bmcr & BMCR_FULLDPLX) { 1514 printk (KERN_CONT "Full duplex\n"); 1515 } else { 1516 printk (KERN_CONT "Half duplex\n"); 1517 } 1518 } 1519 if (np->tx_flow) 1520 printk(KERN_INFO "Enable Tx Flow Control\n"); 1521 else 1522 printk(KERN_INFO "Disable Tx Flow Control\n"); 1523 if (np->rx_flow) 1524 printk(KERN_INFO "Enable Rx Flow Control\n"); 1525 else 1526 printk(KERN_INFO "Disable Rx Flow Control\n"); 1527 1528 return 0; 1529 } 1530 1531 static int 1532 mii_set_media (struct net_device *dev) 1533 { 1534 __u16 pscr; 1535 __u16 bmcr; 1536 __u16 bmsr; 1537 __u16 anar; 1538 int phy_addr; 1539 struct netdev_private *np; 1540 np = netdev_priv(dev); 1541 phy_addr = np->phy_addr; 1542 1543 /* Does user set speed? */ 1544 if (np->an_enable) { 1545 /* Advertise capabilities */ 1546 bmsr = mii_read (dev, phy_addr, MII_BMSR); 1547 anar = mii_read (dev, phy_addr, MII_ADVERTISE) & 1548 ~(ADVERTISE_100FULL | ADVERTISE_10FULL | 1549 ADVERTISE_100HALF | ADVERTISE_10HALF | 1550 ADVERTISE_100BASE4); 1551 if (bmsr & BMSR_100FULL) 1552 anar |= ADVERTISE_100FULL; 1553 if (bmsr & BMSR_100HALF) 1554 anar |= ADVERTISE_100HALF; 1555 if (bmsr & BMSR_100BASE4) 1556 anar |= ADVERTISE_100BASE4; 1557 if (bmsr & BMSR_10FULL) 1558 anar |= ADVERTISE_10FULL; 1559 if (bmsr & BMSR_10HALF) 1560 anar |= ADVERTISE_10HALF; 1561 anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 1562 mii_write (dev, phy_addr, MII_ADVERTISE, anar); 1563 1564 /* Enable Auto crossover */ 1565 pscr = mii_read (dev, phy_addr, MII_PHY_SCR); 1566 pscr |= 3 << 5; /* 11'b */ 1567 mii_write (dev, phy_addr, MII_PHY_SCR, pscr); 1568 1569 /* Soft reset PHY */ 1570 mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET); 1571 bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET; 1572 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1573 mdelay(1); 1574 } else { 1575 /* Force speed setting */ 1576 /* 1) Disable Auto crossover */ 1577 pscr = mii_read (dev, phy_addr, MII_PHY_SCR); 1578 pscr &= ~(3 << 5); 1579 mii_write (dev, phy_addr, MII_PHY_SCR, pscr); 1580 1581 /* 2) PHY Reset */ 1582 bmcr = mii_read (dev, phy_addr, MII_BMCR); 1583 bmcr |= BMCR_RESET; 1584 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1585 1586 /* 3) Power Down */ 1587 bmcr = 0x1940; /* must be 0x1940 */ 1588 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1589 mdelay (100); /* wait a certain time */ 1590 1591 /* 4) Advertise nothing */ 1592 mii_write (dev, phy_addr, MII_ADVERTISE, 0); 1593 1594 /* 5) Set media and Power Up */ 1595 bmcr = BMCR_PDOWN; 1596 if (np->speed == 100) { 1597 bmcr |= BMCR_SPEED100; 1598 printk (KERN_INFO "Manual 100 Mbps, "); 1599 } else if (np->speed == 10) { 1600 printk (KERN_INFO "Manual 10 Mbps, "); 1601 } 1602 if (np->full_duplex) { 1603 bmcr |= BMCR_FULLDPLX; 1604 printk (KERN_CONT "Full duplex\n"); 1605 } else { 1606 printk (KERN_CONT "Half duplex\n"); 1607 } 1608 #if 0 1609 /* Set 1000BaseT Master/Slave setting */ 1610 mscr = mii_read (dev, phy_addr, MII_CTRL1000); 1611 mscr |= MII_MSCR_CFG_ENABLE; 1612 mscr &= ~MII_MSCR_CFG_VALUE = 0; 1613 #endif 1614 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1615 mdelay(10); 1616 } 1617 return 0; 1618 } 1619 1620 static int 1621 mii_get_media_pcs (struct net_device *dev) 1622 { 1623 __u16 negotiate; 1624 __u16 bmsr; 1625 int phy_addr; 1626 struct netdev_private *np; 1627 1628 np = netdev_priv(dev); 1629 phy_addr = np->phy_addr; 1630 1631 bmsr = mii_read (dev, phy_addr, PCS_BMSR); 1632 if (np->an_enable) { 1633 if (!(bmsr & BMSR_ANEGCOMPLETE)) { 1634 /* Auto-Negotiation not completed */ 1635 return -1; 1636 } 1637 negotiate = mii_read (dev, phy_addr, PCS_ANAR) & 1638 mii_read (dev, phy_addr, PCS_ANLPAR); 1639 np->speed = 1000; 1640 if (negotiate & PCS_ANAR_FULL_DUPLEX) { 1641 printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); 1642 np->full_duplex = 1; 1643 } else { 1644 printk (KERN_INFO "Auto 1000 Mbps, half duplex\n"); 1645 np->full_duplex = 0; 1646 } 1647 if (negotiate & PCS_ANAR_PAUSE) { 1648 np->tx_flow &= 1; 1649 np->rx_flow &= 1; 1650 } else if (negotiate & PCS_ANAR_ASYMMETRIC) { 1651 np->tx_flow = 0; 1652 np->rx_flow &= 1; 1653 } 1654 /* else tx_flow, rx_flow = user select */ 1655 } else { 1656 __u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR); 1657 printk (KERN_INFO "Operating at 1000 Mbps, "); 1658 if (bmcr & BMCR_FULLDPLX) { 1659 printk (KERN_CONT "Full duplex\n"); 1660 } else { 1661 printk (KERN_CONT "Half duplex\n"); 1662 } 1663 } 1664 if (np->tx_flow) 1665 printk(KERN_INFO "Enable Tx Flow Control\n"); 1666 else 1667 printk(KERN_INFO "Disable Tx Flow Control\n"); 1668 if (np->rx_flow) 1669 printk(KERN_INFO "Enable Rx Flow Control\n"); 1670 else 1671 printk(KERN_INFO "Disable Rx Flow Control\n"); 1672 1673 return 0; 1674 } 1675 1676 static int 1677 mii_set_media_pcs (struct net_device *dev) 1678 { 1679 __u16 bmcr; 1680 __u16 esr; 1681 __u16 anar; 1682 int phy_addr; 1683 struct netdev_private *np; 1684 np = netdev_priv(dev); 1685 phy_addr = np->phy_addr; 1686 1687 /* Auto-Negotiation? */ 1688 if (np->an_enable) { 1689 /* Advertise capabilities */ 1690 esr = mii_read (dev, phy_addr, PCS_ESR); 1691 anar = mii_read (dev, phy_addr, MII_ADVERTISE) & 1692 ~PCS_ANAR_HALF_DUPLEX & 1693 ~PCS_ANAR_FULL_DUPLEX; 1694 if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD)) 1695 anar |= PCS_ANAR_HALF_DUPLEX; 1696 if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD)) 1697 anar |= PCS_ANAR_FULL_DUPLEX; 1698 anar |= PCS_ANAR_PAUSE | PCS_ANAR_ASYMMETRIC; 1699 mii_write (dev, phy_addr, MII_ADVERTISE, anar); 1700 1701 /* Soft reset PHY */ 1702 mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET); 1703 bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET; 1704 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1705 mdelay(1); 1706 } else { 1707 /* Force speed setting */ 1708 /* PHY Reset */ 1709 bmcr = BMCR_RESET; 1710 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1711 mdelay(10); 1712 if (np->full_duplex) { 1713 bmcr = BMCR_FULLDPLX; 1714 printk (KERN_INFO "Manual full duplex\n"); 1715 } else { 1716 bmcr = 0; 1717 printk (KERN_INFO "Manual half duplex\n"); 1718 } 1719 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1720 mdelay(10); 1721 1722 /* Advertise nothing */ 1723 mii_write (dev, phy_addr, MII_ADVERTISE, 0); 1724 } 1725 return 0; 1726 } 1727 1728 1729 static int 1730 rio_close (struct net_device *dev) 1731 { 1732 struct netdev_private *np = netdev_priv(dev); 1733 void __iomem *ioaddr = np->ioaddr; 1734 1735 struct pci_dev *pdev = np->pdev; 1736 struct sk_buff *skb; 1737 int i; 1738 1739 netif_stop_queue (dev); 1740 1741 /* Disable interrupts */ 1742 dw16(IntEnable, 0); 1743 1744 /* Stop Tx and Rx logics */ 1745 dw32(MACCtrl, TxDisable | RxDisable | StatsDisable); 1746 1747 free_irq(pdev->irq, dev); 1748 del_timer_sync (&np->timer); 1749 1750 /* Free all the skbuffs in the queue. */ 1751 for (i = 0; i < RX_RING_SIZE; i++) { 1752 skb = np->rx_skbuff[i]; 1753 if (skb) { 1754 pci_unmap_single(pdev, desc_to_dma(&np->rx_ring[i]), 1755 skb->len, PCI_DMA_FROMDEVICE); 1756 dev_kfree_skb (skb); 1757 np->rx_skbuff[i] = NULL; 1758 } 1759 np->rx_ring[i].status = 0; 1760 np->rx_ring[i].fraginfo = 0; 1761 } 1762 for (i = 0; i < TX_RING_SIZE; i++) { 1763 skb = np->tx_skbuff[i]; 1764 if (skb) { 1765 pci_unmap_single(pdev, desc_to_dma(&np->tx_ring[i]), 1766 skb->len, PCI_DMA_TODEVICE); 1767 dev_kfree_skb (skb); 1768 np->tx_skbuff[i] = NULL; 1769 } 1770 } 1771 1772 return 0; 1773 } 1774 1775 static void 1776 rio_remove1 (struct pci_dev *pdev) 1777 { 1778 struct net_device *dev = pci_get_drvdata (pdev); 1779 1780 if (dev) { 1781 struct netdev_private *np = netdev_priv(dev); 1782 1783 unregister_netdev (dev); 1784 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, 1785 np->rx_ring_dma); 1786 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, 1787 np->tx_ring_dma); 1788 #ifdef MEM_MAPPING 1789 pci_iounmap(pdev, np->ioaddr); 1790 #endif 1791 pci_iounmap(pdev, np->eeprom_addr); 1792 free_netdev (dev); 1793 pci_release_regions (pdev); 1794 pci_disable_device (pdev); 1795 } 1796 } 1797 1798 static struct pci_driver rio_driver = { 1799 .name = "dl2k", 1800 .id_table = rio_pci_tbl, 1801 .probe = rio_probe1, 1802 .remove = rio_remove1, 1803 }; 1804 1805 module_pci_driver(rio_driver); 1806 /* 1807 1808 Compile command: 1809 1810 gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c 1811 1812 Read Documentation/networking/dl2k.txt for details. 1813 1814 */ 1815 1816