1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */ 3 /* 4 Copyright (c) 2001, 2002 by D-Link Corporation 5 Written by Edward Peng.<edward_peng@dlink.com.tw> 6 Created 03-May-2001, base on Linux' sundance.c. 7 8 */ 9 10 #include "dl2k.h" 11 #include <linux/dma-mapping.h> 12 13 #define dw32(reg, val) iowrite32(val, ioaddr + (reg)) 14 #define dw16(reg, val) iowrite16(val, ioaddr + (reg)) 15 #define dw8(reg, val) iowrite8(val, ioaddr + (reg)) 16 #define dr32(reg) ioread32(ioaddr + (reg)) 17 #define dr16(reg) ioread16(ioaddr + (reg)) 18 #define dr8(reg) ioread8(ioaddr + (reg)) 19 20 #define MAX_UNITS 8 21 static int mtu[MAX_UNITS]; 22 static int vlan[MAX_UNITS]; 23 static int jumbo[MAX_UNITS]; 24 static char *media[MAX_UNITS]; 25 static int tx_flow=-1; 26 static int rx_flow=-1; 27 static int copy_thresh; 28 static int rx_coalesce=10; /* Rx frame count each interrupt */ 29 static int rx_timeout=200; /* Rx DMA wait time in 640ns increments */ 30 static int tx_coalesce=16; /* HW xmit count each TxDMAComplete */ 31 32 33 MODULE_AUTHOR ("Edward Peng"); 34 MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter"); 35 MODULE_LICENSE("GPL"); 36 module_param_array(mtu, int, NULL, 0); 37 module_param_array(media, charp, NULL, 0); 38 module_param_array(vlan, int, NULL, 0); 39 module_param_array(jumbo, int, NULL, 0); 40 module_param(tx_flow, int, 0); 41 module_param(rx_flow, int, 0); 42 module_param(copy_thresh, int, 0); 43 module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */ 44 module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */ 45 module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */ 46 47 48 /* Enable the default interrupts */ 49 #define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \ 50 UpdateStats | LinkEvent) 51 52 static void dl2k_enable_int(struct netdev_private *np) 53 { 54 void __iomem *ioaddr = np->ioaddr; 55 56 dw16(IntEnable, DEFAULT_INTR); 57 } 58 59 static const int max_intrloop = 50; 60 static const int multicast_filter_limit = 0x40; 61 62 static int rio_open (struct net_device *dev); 63 static void rio_timer (struct timer_list *t); 64 static void rio_tx_timeout (struct net_device *dev, unsigned int txqueue); 65 static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev); 66 static irqreturn_t rio_interrupt (int irq, void *dev_instance); 67 static void rio_free_tx (struct net_device *dev, int irq); 68 static void tx_error (struct net_device *dev, int tx_status); 69 static int receive_packet (struct net_device *dev); 70 static void rio_error (struct net_device *dev, int int_status); 71 static void set_multicast (struct net_device *dev); 72 static struct net_device_stats *get_stats (struct net_device *dev); 73 static int clear_stats (struct net_device *dev); 74 static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); 75 static int rio_close (struct net_device *dev); 76 static int find_miiphy (struct net_device *dev); 77 static int parse_eeprom (struct net_device *dev); 78 static int read_eeprom (struct netdev_private *, int eep_addr); 79 static int mii_wait_link (struct net_device *dev, int wait); 80 static int mii_set_media (struct net_device *dev); 81 static int mii_get_media (struct net_device *dev); 82 static int mii_set_media_pcs (struct net_device *dev); 83 static int mii_get_media_pcs (struct net_device *dev); 84 static int mii_read (struct net_device *dev, int phy_addr, int reg_num); 85 static int mii_write (struct net_device *dev, int phy_addr, int reg_num, 86 u16 data); 87 88 static const struct ethtool_ops ethtool_ops; 89 90 static const struct net_device_ops netdev_ops = { 91 .ndo_open = rio_open, 92 .ndo_start_xmit = start_xmit, 93 .ndo_stop = rio_close, 94 .ndo_get_stats = get_stats, 95 .ndo_validate_addr = eth_validate_addr, 96 .ndo_set_mac_address = eth_mac_addr, 97 .ndo_set_rx_mode = set_multicast, 98 .ndo_eth_ioctl = rio_ioctl, 99 .ndo_tx_timeout = rio_tx_timeout, 100 }; 101 102 static int 103 rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) 104 { 105 struct net_device *dev; 106 struct netdev_private *np; 107 static int card_idx; 108 int chip_idx = ent->driver_data; 109 int err, irq; 110 void __iomem *ioaddr; 111 void *ring_space; 112 dma_addr_t ring_dma; 113 114 err = pci_enable_device (pdev); 115 if (err) 116 return err; 117 118 irq = pdev->irq; 119 err = pci_request_regions (pdev, "dl2k"); 120 if (err) 121 goto err_out_disable; 122 123 pci_set_master (pdev); 124 125 err = -ENOMEM; 126 127 dev = alloc_etherdev (sizeof (*np)); 128 if (!dev) 129 goto err_out_res; 130 SET_NETDEV_DEV(dev, &pdev->dev); 131 132 np = netdev_priv(dev); 133 134 /* IO registers range. */ 135 ioaddr = pci_iomap(pdev, 0, 0); 136 if (!ioaddr) 137 goto err_out_dev; 138 np->eeprom_addr = ioaddr; 139 140 #ifdef MEM_MAPPING 141 /* MM registers range. */ 142 ioaddr = pci_iomap(pdev, 1, 0); 143 if (!ioaddr) 144 goto err_out_iounmap; 145 #endif 146 np->ioaddr = ioaddr; 147 np->chip_id = chip_idx; 148 np->pdev = pdev; 149 spin_lock_init (&np->tx_lock); 150 spin_lock_init (&np->rx_lock); 151 152 /* Parse manual configuration */ 153 np->an_enable = 1; 154 np->tx_coalesce = 1; 155 if (card_idx < MAX_UNITS) { 156 if (media[card_idx] != NULL) { 157 np->an_enable = 0; 158 if (strcmp (media[card_idx], "auto") == 0 || 159 strcmp (media[card_idx], "autosense") == 0 || 160 strcmp (media[card_idx], "0") == 0 ) { 161 np->an_enable = 2; 162 } else if (strcmp (media[card_idx], "100mbps_fd") == 0 || 163 strcmp (media[card_idx], "4") == 0) { 164 np->speed = 100; 165 np->full_duplex = 1; 166 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || 167 strcmp (media[card_idx], "3") == 0) { 168 np->speed = 100; 169 np->full_duplex = 0; 170 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || 171 strcmp (media[card_idx], "2") == 0) { 172 np->speed = 10; 173 np->full_duplex = 1; 174 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || 175 strcmp (media[card_idx], "1") == 0) { 176 np->speed = 10; 177 np->full_duplex = 0; 178 } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 || 179 strcmp (media[card_idx], "6") == 0) { 180 np->speed=1000; 181 np->full_duplex=1; 182 } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 || 183 strcmp (media[card_idx], "5") == 0) { 184 np->speed = 1000; 185 np->full_duplex = 0; 186 } else { 187 np->an_enable = 1; 188 } 189 } 190 if (jumbo[card_idx] != 0) { 191 np->jumbo = 1; 192 dev->mtu = MAX_JUMBO; 193 } else { 194 np->jumbo = 0; 195 if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE) 196 dev->mtu = mtu[card_idx]; 197 } 198 np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ? 199 vlan[card_idx] : 0; 200 if (rx_coalesce > 0 && rx_timeout > 0) { 201 np->rx_coalesce = rx_coalesce; 202 np->rx_timeout = rx_timeout; 203 np->coalesce = 1; 204 } 205 np->tx_flow = (tx_flow == 0) ? 0 : 1; 206 np->rx_flow = (rx_flow == 0) ? 0 : 1; 207 208 if (tx_coalesce < 1) 209 tx_coalesce = 1; 210 else if (tx_coalesce > TX_RING_SIZE-1) 211 tx_coalesce = TX_RING_SIZE - 1; 212 } 213 dev->netdev_ops = &netdev_ops; 214 dev->watchdog_timeo = TX_TIMEOUT; 215 dev->ethtool_ops = ðtool_ops; 216 #if 0 217 dev->features = NETIF_F_IP_CSUM; 218 #endif 219 /* MTU range: 68 - 1536 or 8000 */ 220 dev->min_mtu = ETH_MIN_MTU; 221 dev->max_mtu = np->jumbo ? MAX_JUMBO : PACKET_SIZE; 222 223 pci_set_drvdata (pdev, dev); 224 225 ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma, 226 GFP_KERNEL); 227 if (!ring_space) 228 goto err_out_iounmap; 229 np->tx_ring = ring_space; 230 np->tx_ring_dma = ring_dma; 231 232 ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma, 233 GFP_KERNEL); 234 if (!ring_space) 235 goto err_out_unmap_tx; 236 np->rx_ring = ring_space; 237 np->rx_ring_dma = ring_dma; 238 239 /* Parse eeprom data */ 240 parse_eeprom (dev); 241 242 /* Find PHY address */ 243 err = find_miiphy (dev); 244 if (err) 245 goto err_out_unmap_rx; 246 247 /* Fiber device? */ 248 np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0; 249 np->link_status = 0; 250 /* Set media and reset PHY */ 251 if (np->phy_media) { 252 /* default Auto-Negotiation for fiber deivices */ 253 if (np->an_enable == 2) { 254 np->an_enable = 1; 255 } 256 } else { 257 /* Auto-Negotiation is mandatory for 1000BASE-T, 258 IEEE 802.3ab Annex 28D page 14 */ 259 if (np->speed == 1000) 260 np->an_enable = 1; 261 } 262 263 err = register_netdev (dev); 264 if (err) 265 goto err_out_unmap_rx; 266 267 card_idx++; 268 269 printk (KERN_INFO "%s: %s, %pM, IRQ %d\n", 270 dev->name, np->name, dev->dev_addr, irq); 271 if (tx_coalesce > 1) 272 printk(KERN_INFO "tx_coalesce:\t%d packets\n", 273 tx_coalesce); 274 if (np->coalesce) 275 printk(KERN_INFO 276 "rx_coalesce:\t%d packets\n" 277 "rx_timeout: \t%d ns\n", 278 np->rx_coalesce, np->rx_timeout*640); 279 if (np->vlan) 280 printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); 281 return 0; 282 283 err_out_unmap_rx: 284 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, 285 np->rx_ring_dma); 286 err_out_unmap_tx: 287 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, 288 np->tx_ring_dma); 289 err_out_iounmap: 290 #ifdef MEM_MAPPING 291 pci_iounmap(pdev, np->ioaddr); 292 #endif 293 pci_iounmap(pdev, np->eeprom_addr); 294 err_out_dev: 295 free_netdev (dev); 296 err_out_res: 297 pci_release_regions (pdev); 298 err_out_disable: 299 pci_disable_device (pdev); 300 return err; 301 } 302 303 static int 304 find_miiphy (struct net_device *dev) 305 { 306 struct netdev_private *np = netdev_priv(dev); 307 int i, phy_found = 0; 308 309 np->phy_addr = 1; 310 311 for (i = 31; i >= 0; i--) { 312 int mii_status = mii_read (dev, i, 1); 313 if (mii_status != 0xffff && mii_status != 0x0000) { 314 np->phy_addr = i; 315 phy_found++; 316 } 317 } 318 if (!phy_found) { 319 printk (KERN_ERR "%s: No MII PHY found!\n", dev->name); 320 return -ENODEV; 321 } 322 return 0; 323 } 324 325 static int 326 parse_eeprom (struct net_device *dev) 327 { 328 struct netdev_private *np = netdev_priv(dev); 329 void __iomem *ioaddr = np->ioaddr; 330 int i, j; 331 u8 sromdata[256]; 332 u8 *psib; 333 u32 crc; 334 PSROM_t psrom = (PSROM_t) sromdata; 335 336 int cid, next; 337 338 for (i = 0; i < 128; i++) 339 ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i)); 340 341 if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */ 342 /* Check CRC */ 343 crc = ~ether_crc_le (256 - 4, sromdata); 344 if (psrom->crc != cpu_to_le32(crc)) { 345 printk (KERN_ERR "%s: EEPROM data CRC error.\n", 346 dev->name); 347 return -1; 348 } 349 } 350 351 /* Set MAC address */ 352 for (i = 0; i < 6; i++) 353 dev->dev_addr[i] = psrom->mac_addr[i]; 354 355 if (np->chip_id == CHIP_IP1000A) { 356 np->led_mode = psrom->led_mode; 357 return 0; 358 } 359 360 if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) { 361 return 0; 362 } 363 364 /* Parse Software Information Block */ 365 i = 0x30; 366 psib = (u8 *) sromdata; 367 do { 368 cid = psib[i++]; 369 next = psib[i++]; 370 if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) { 371 printk (KERN_ERR "Cell data error\n"); 372 return -1; 373 } 374 switch (cid) { 375 case 0: /* Format version */ 376 break; 377 case 1: /* End of cell */ 378 return 0; 379 case 2: /* Duplex Polarity */ 380 np->duplex_polarity = psib[i]; 381 dw8(PhyCtrl, dr8(PhyCtrl) | psib[i]); 382 break; 383 case 3: /* Wake Polarity */ 384 np->wake_polarity = psib[i]; 385 break; 386 case 9: /* Adapter description */ 387 j = (next - i > 255) ? 255 : next - i; 388 memcpy (np->name, &(psib[i]), j); 389 break; 390 case 4: 391 case 5: 392 case 6: 393 case 7: 394 case 8: /* Reversed */ 395 break; 396 default: /* Unknown cell */ 397 return -1; 398 } 399 i = next; 400 } while (1); 401 402 return 0; 403 } 404 405 static void rio_set_led_mode(struct net_device *dev) 406 { 407 struct netdev_private *np = netdev_priv(dev); 408 void __iomem *ioaddr = np->ioaddr; 409 u32 mode; 410 411 if (np->chip_id != CHIP_IP1000A) 412 return; 413 414 mode = dr32(ASICCtrl); 415 mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED); 416 417 if (np->led_mode & 0x01) 418 mode |= IPG_AC_LED_MODE; 419 if (np->led_mode & 0x02) 420 mode |= IPG_AC_LED_MODE_BIT_1; 421 if (np->led_mode & 0x08) 422 mode |= IPG_AC_LED_SPEED; 423 424 dw32(ASICCtrl, mode); 425 } 426 427 static inline dma_addr_t desc_to_dma(struct netdev_desc *desc) 428 { 429 return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48); 430 } 431 432 static void free_list(struct net_device *dev) 433 { 434 struct netdev_private *np = netdev_priv(dev); 435 struct sk_buff *skb; 436 int i; 437 438 /* Free all the skbuffs in the queue. */ 439 for (i = 0; i < RX_RING_SIZE; i++) { 440 skb = np->rx_skbuff[i]; 441 if (skb) { 442 dma_unmap_single(&np->pdev->dev, 443 desc_to_dma(&np->rx_ring[i]), 444 skb->len, DMA_FROM_DEVICE); 445 dev_kfree_skb(skb); 446 np->rx_skbuff[i] = NULL; 447 } 448 np->rx_ring[i].status = 0; 449 np->rx_ring[i].fraginfo = 0; 450 } 451 for (i = 0; i < TX_RING_SIZE; i++) { 452 skb = np->tx_skbuff[i]; 453 if (skb) { 454 dma_unmap_single(&np->pdev->dev, 455 desc_to_dma(&np->tx_ring[i]), 456 skb->len, DMA_TO_DEVICE); 457 dev_kfree_skb(skb); 458 np->tx_skbuff[i] = NULL; 459 } 460 } 461 } 462 463 static void rio_reset_ring(struct netdev_private *np) 464 { 465 int i; 466 467 np->cur_rx = 0; 468 np->cur_tx = 0; 469 np->old_rx = 0; 470 np->old_tx = 0; 471 472 for (i = 0; i < TX_RING_SIZE; i++) 473 np->tx_ring[i].status = cpu_to_le64(TFDDone); 474 475 for (i = 0; i < RX_RING_SIZE; i++) 476 np->rx_ring[i].status = 0; 477 } 478 479 /* allocate and initialize Tx and Rx descriptors */ 480 static int alloc_list(struct net_device *dev) 481 { 482 struct netdev_private *np = netdev_priv(dev); 483 int i; 484 485 rio_reset_ring(np); 486 np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32); 487 488 /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */ 489 for (i = 0; i < TX_RING_SIZE; i++) { 490 np->tx_skbuff[i] = NULL; 491 np->tx_ring[i].next_desc = cpu_to_le64(np->tx_ring_dma + 492 ((i + 1) % TX_RING_SIZE) * 493 sizeof(struct netdev_desc)); 494 } 495 496 /* Initialize Rx descriptors & allocate buffers */ 497 for (i = 0; i < RX_RING_SIZE; i++) { 498 /* Allocated fixed size of skbuff */ 499 struct sk_buff *skb; 500 501 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); 502 np->rx_skbuff[i] = skb; 503 if (!skb) { 504 free_list(dev); 505 return -ENOMEM; 506 } 507 508 np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma + 509 ((i + 1) % RX_RING_SIZE) * 510 sizeof(struct netdev_desc)); 511 /* Rubicon now supports 40 bits of addressing space. */ 512 np->rx_ring[i].fraginfo = 513 cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data, 514 np->rx_buf_sz, DMA_FROM_DEVICE)); 515 np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); 516 } 517 518 return 0; 519 } 520 521 static void rio_hw_init(struct net_device *dev) 522 { 523 struct netdev_private *np = netdev_priv(dev); 524 void __iomem *ioaddr = np->ioaddr; 525 int i; 526 u16 macctrl; 527 528 /* Reset all logic functions */ 529 dw16(ASICCtrl + 2, 530 GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset); 531 mdelay(10); 532 533 rio_set_led_mode(dev); 534 535 /* DebugCtrl bit 4, 5, 9 must set */ 536 dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230); 537 538 if (np->chip_id == CHIP_IP1000A && 539 (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) { 540 /* PHY magic taken from ipg driver, undocumented registers */ 541 mii_write(dev, np->phy_addr, 31, 0x0001); 542 mii_write(dev, np->phy_addr, 27, 0x01e0); 543 mii_write(dev, np->phy_addr, 31, 0x0002); 544 mii_write(dev, np->phy_addr, 27, 0xeb8e); 545 mii_write(dev, np->phy_addr, 31, 0x0000); 546 mii_write(dev, np->phy_addr, 30, 0x005e); 547 /* advertise 1000BASE-T half & full duplex, prefer MASTER */ 548 mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700); 549 } 550 551 if (np->phy_media) 552 mii_set_media_pcs(dev); 553 else 554 mii_set_media(dev); 555 556 /* Jumbo frame */ 557 if (np->jumbo != 0) 558 dw16(MaxFrameSize, MAX_JUMBO+14); 559 560 /* Set RFDListPtr */ 561 dw32(RFDListPtr0, np->rx_ring_dma); 562 dw32(RFDListPtr1, 0); 563 564 /* Set station address */ 565 /* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works 566 * too. However, it doesn't work on IP1000A so we use 16-bit access. 567 */ 568 for (i = 0; i < 3; i++) 569 dw16(StationAddr0 + 2 * i, 570 cpu_to_le16(((u16 *)dev->dev_addr)[i])); 571 572 set_multicast (dev); 573 if (np->coalesce) { 574 dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16); 575 } 576 /* Set RIO to poll every N*320nsec. */ 577 dw8(RxDMAPollPeriod, 0x20); 578 dw8(TxDMAPollPeriod, 0xff); 579 dw8(RxDMABurstThresh, 0x30); 580 dw8(RxDMAUrgentThresh, 0x30); 581 dw32(RmonStatMask, 0x0007ffff); 582 /* clear statistics */ 583 clear_stats (dev); 584 585 /* VLAN supported */ 586 if (np->vlan) { 587 /* priority field in RxDMAIntCtrl */ 588 dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10); 589 /* VLANId */ 590 dw16(VLANId, np->vlan); 591 /* Length/Type should be 0x8100 */ 592 dw32(VLANTag, 0x8100 << 16 | np->vlan); 593 /* Enable AutoVLANuntagging, but disable AutoVLANtagging. 594 VLAN information tagged by TFC' VID, CFI fields. */ 595 dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging); 596 } 597 598 /* Start Tx/Rx */ 599 dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable); 600 601 macctrl = 0; 602 macctrl |= (np->vlan) ? AutoVLANuntagging : 0; 603 macctrl |= (np->full_duplex) ? DuplexSelect : 0; 604 macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; 605 macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; 606 dw16(MACCtrl, macctrl); 607 } 608 609 static void rio_hw_stop(struct net_device *dev) 610 { 611 struct netdev_private *np = netdev_priv(dev); 612 void __iomem *ioaddr = np->ioaddr; 613 614 /* Disable interrupts */ 615 dw16(IntEnable, 0); 616 617 /* Stop Tx and Rx logics */ 618 dw32(MACCtrl, TxDisable | RxDisable | StatsDisable); 619 } 620 621 static int rio_open(struct net_device *dev) 622 { 623 struct netdev_private *np = netdev_priv(dev); 624 const int irq = np->pdev->irq; 625 int i; 626 627 i = alloc_list(dev); 628 if (i) 629 return i; 630 631 rio_hw_init(dev); 632 633 i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev); 634 if (i) { 635 rio_hw_stop(dev); 636 free_list(dev); 637 return i; 638 } 639 640 timer_setup(&np->timer, rio_timer, 0); 641 np->timer.expires = jiffies + 1 * HZ; 642 add_timer(&np->timer); 643 644 netif_start_queue (dev); 645 646 dl2k_enable_int(np); 647 return 0; 648 } 649 650 static void 651 rio_timer (struct timer_list *t) 652 { 653 struct netdev_private *np = from_timer(np, t, timer); 654 struct net_device *dev = pci_get_drvdata(np->pdev); 655 unsigned int entry; 656 int next_tick = 1*HZ; 657 unsigned long flags; 658 659 spin_lock_irqsave(&np->rx_lock, flags); 660 /* Recover rx ring exhausted error */ 661 if (np->cur_rx - np->old_rx >= RX_RING_SIZE) { 662 printk(KERN_INFO "Try to recover rx ring exhausted...\n"); 663 /* Re-allocate skbuffs to fill the descriptor ring */ 664 for (; np->cur_rx - np->old_rx > 0; np->old_rx++) { 665 struct sk_buff *skb; 666 entry = np->old_rx % RX_RING_SIZE; 667 /* Dropped packets don't need to re-allocate */ 668 if (np->rx_skbuff[entry] == NULL) { 669 skb = netdev_alloc_skb_ip_align(dev, 670 np->rx_buf_sz); 671 if (skb == NULL) { 672 np->rx_ring[entry].fraginfo = 0; 673 printk (KERN_INFO 674 "%s: Still unable to re-allocate Rx skbuff.#%d\n", 675 dev->name, entry); 676 break; 677 } 678 np->rx_skbuff[entry] = skb; 679 np->rx_ring[entry].fraginfo = 680 cpu_to_le64 (dma_map_single(&np->pdev->dev, skb->data, 681 np->rx_buf_sz, DMA_FROM_DEVICE)); 682 } 683 np->rx_ring[entry].fraginfo |= 684 cpu_to_le64((u64)np->rx_buf_sz << 48); 685 np->rx_ring[entry].status = 0; 686 } /* end for */ 687 } /* end if */ 688 spin_unlock_irqrestore (&np->rx_lock, flags); 689 np->timer.expires = jiffies + next_tick; 690 add_timer(&np->timer); 691 } 692 693 static void 694 rio_tx_timeout (struct net_device *dev, unsigned int txqueue) 695 { 696 struct netdev_private *np = netdev_priv(dev); 697 void __iomem *ioaddr = np->ioaddr; 698 699 printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n", 700 dev->name, dr32(TxStatus)); 701 rio_free_tx(dev, 0); 702 dev->if_port = 0; 703 netif_trans_update(dev); /* prevent tx timeout */ 704 } 705 706 static netdev_tx_t 707 start_xmit (struct sk_buff *skb, struct net_device *dev) 708 { 709 struct netdev_private *np = netdev_priv(dev); 710 void __iomem *ioaddr = np->ioaddr; 711 struct netdev_desc *txdesc; 712 unsigned entry; 713 u64 tfc_vlan_tag = 0; 714 715 if (np->link_status == 0) { /* Link Down */ 716 dev_kfree_skb(skb); 717 return NETDEV_TX_OK; 718 } 719 entry = np->cur_tx % TX_RING_SIZE; 720 np->tx_skbuff[entry] = skb; 721 txdesc = &np->tx_ring[entry]; 722 723 #if 0 724 if (skb->ip_summed == CHECKSUM_PARTIAL) { 725 txdesc->status |= 726 cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable | 727 IPChecksumEnable); 728 } 729 #endif 730 if (np->vlan) { 731 tfc_vlan_tag = VLANTagInsert | 732 ((u64)np->vlan << 32) | 733 ((u64)skb->priority << 45); 734 } 735 txdesc->fraginfo = cpu_to_le64 (dma_map_single(&np->pdev->dev, skb->data, 736 skb->len, DMA_TO_DEVICE)); 737 txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48); 738 739 /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode 740 * Work around: Always use 1 descriptor in 10Mbps mode */ 741 if (entry % np->tx_coalesce == 0 || np->speed == 10) 742 txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | 743 WordAlignDisable | 744 TxDMAIndicate | 745 (1 << FragCountShift)); 746 else 747 txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | 748 WordAlignDisable | 749 (1 << FragCountShift)); 750 751 /* TxDMAPollNow */ 752 dw32(DMACtrl, dr32(DMACtrl) | 0x00001000); 753 /* Schedule ISR */ 754 dw32(CountDown, 10000); 755 np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE; 756 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE 757 < TX_QUEUE_LEN - 1 && np->speed != 10) { 758 /* do nothing */ 759 } else if (!netif_queue_stopped(dev)) { 760 netif_stop_queue (dev); 761 } 762 763 /* The first TFDListPtr */ 764 if (!dr32(TFDListPtr0)) { 765 dw32(TFDListPtr0, np->tx_ring_dma + 766 entry * sizeof (struct netdev_desc)); 767 dw32(TFDListPtr1, 0); 768 } 769 770 return NETDEV_TX_OK; 771 } 772 773 static irqreturn_t 774 rio_interrupt (int irq, void *dev_instance) 775 { 776 struct net_device *dev = dev_instance; 777 struct netdev_private *np = netdev_priv(dev); 778 void __iomem *ioaddr = np->ioaddr; 779 unsigned int_status; 780 int cnt = max_intrloop; 781 int handled = 0; 782 783 while (1) { 784 int_status = dr16(IntStatus); 785 dw16(IntStatus, int_status); 786 int_status &= DEFAULT_INTR; 787 if (int_status == 0 || --cnt < 0) 788 break; 789 handled = 1; 790 /* Processing received packets */ 791 if (int_status & RxDMAComplete) 792 receive_packet (dev); 793 /* TxDMAComplete interrupt */ 794 if ((int_status & (TxDMAComplete|IntRequested))) { 795 int tx_status; 796 tx_status = dr32(TxStatus); 797 if (tx_status & 0x01) 798 tx_error (dev, tx_status); 799 /* Free used tx skbuffs */ 800 rio_free_tx (dev, 1); 801 } 802 803 /* Handle uncommon events */ 804 if (int_status & 805 (HostError | LinkEvent | UpdateStats)) 806 rio_error (dev, int_status); 807 } 808 if (np->cur_tx != np->old_tx) 809 dw32(CountDown, 100); 810 return IRQ_RETVAL(handled); 811 } 812 813 static void 814 rio_free_tx (struct net_device *dev, int irq) 815 { 816 struct netdev_private *np = netdev_priv(dev); 817 int entry = np->old_tx % TX_RING_SIZE; 818 int tx_use = 0; 819 unsigned long flag = 0; 820 821 if (irq) 822 spin_lock(&np->tx_lock); 823 else 824 spin_lock_irqsave(&np->tx_lock, flag); 825 826 /* Free used tx skbuffs */ 827 while (entry != np->cur_tx) { 828 struct sk_buff *skb; 829 830 if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone))) 831 break; 832 skb = np->tx_skbuff[entry]; 833 dma_unmap_single(&np->pdev->dev, 834 desc_to_dma(&np->tx_ring[entry]), skb->len, 835 DMA_TO_DEVICE); 836 if (irq) 837 dev_consume_skb_irq(skb); 838 else 839 dev_kfree_skb(skb); 840 841 np->tx_skbuff[entry] = NULL; 842 entry = (entry + 1) % TX_RING_SIZE; 843 tx_use++; 844 } 845 if (irq) 846 spin_unlock(&np->tx_lock); 847 else 848 spin_unlock_irqrestore(&np->tx_lock, flag); 849 np->old_tx = entry; 850 851 /* If the ring is no longer full, clear tx_full and 852 call netif_wake_queue() */ 853 854 if (netif_queue_stopped(dev) && 855 ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE 856 < TX_QUEUE_LEN - 1 || np->speed == 10)) { 857 netif_wake_queue (dev); 858 } 859 } 860 861 static void 862 tx_error (struct net_device *dev, int tx_status) 863 { 864 struct netdev_private *np = netdev_priv(dev); 865 void __iomem *ioaddr = np->ioaddr; 866 int frame_id; 867 int i; 868 869 frame_id = (tx_status & 0xffff0000); 870 printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", 871 dev->name, tx_status, frame_id); 872 dev->stats.tx_errors++; 873 /* Ttransmit Underrun */ 874 if (tx_status & 0x10) { 875 dev->stats.tx_fifo_errors++; 876 dw16(TxStartThresh, dr16(TxStartThresh) + 0x10); 877 /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */ 878 dw16(ASICCtrl + 2, 879 TxReset | DMAReset | FIFOReset | NetworkReset); 880 /* Wait for ResetBusy bit clear */ 881 for (i = 50; i > 0; i--) { 882 if (!(dr16(ASICCtrl + 2) & ResetBusy)) 883 break; 884 mdelay (1); 885 } 886 rio_set_led_mode(dev); 887 rio_free_tx (dev, 1); 888 /* Reset TFDListPtr */ 889 dw32(TFDListPtr0, np->tx_ring_dma + 890 np->old_tx * sizeof (struct netdev_desc)); 891 dw32(TFDListPtr1, 0); 892 893 /* Let TxStartThresh stay default value */ 894 } 895 /* Late Collision */ 896 if (tx_status & 0x04) { 897 dev->stats.tx_fifo_errors++; 898 /* TxReset and clear FIFO */ 899 dw16(ASICCtrl + 2, TxReset | FIFOReset); 900 /* Wait reset done */ 901 for (i = 50; i > 0; i--) { 902 if (!(dr16(ASICCtrl + 2) & ResetBusy)) 903 break; 904 mdelay (1); 905 } 906 rio_set_led_mode(dev); 907 /* Let TxStartThresh stay default value */ 908 } 909 /* Maximum Collisions */ 910 if (tx_status & 0x08) 911 dev->stats.collisions++; 912 /* Restart the Tx */ 913 dw32(MACCtrl, dr16(MACCtrl) | TxEnable); 914 } 915 916 static int 917 receive_packet (struct net_device *dev) 918 { 919 struct netdev_private *np = netdev_priv(dev); 920 int entry = np->cur_rx % RX_RING_SIZE; 921 int cnt = 30; 922 923 /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */ 924 while (1) { 925 struct netdev_desc *desc = &np->rx_ring[entry]; 926 int pkt_len; 927 u64 frame_status; 928 929 if (!(desc->status & cpu_to_le64(RFDDone)) || 930 !(desc->status & cpu_to_le64(FrameStart)) || 931 !(desc->status & cpu_to_le64(FrameEnd))) 932 break; 933 934 /* Chip omits the CRC. */ 935 frame_status = le64_to_cpu(desc->status); 936 pkt_len = frame_status & 0xffff; 937 if (--cnt < 0) 938 break; 939 /* Update rx error statistics, drop packet. */ 940 if (frame_status & RFS_Errors) { 941 dev->stats.rx_errors++; 942 if (frame_status & (RxRuntFrame | RxLengthError)) 943 dev->stats.rx_length_errors++; 944 if (frame_status & RxFCSError) 945 dev->stats.rx_crc_errors++; 946 if (frame_status & RxAlignmentError && np->speed != 1000) 947 dev->stats.rx_frame_errors++; 948 if (frame_status & RxFIFOOverrun) 949 dev->stats.rx_fifo_errors++; 950 } else { 951 struct sk_buff *skb; 952 953 /* Small skbuffs for short packets */ 954 if (pkt_len > copy_thresh) { 955 dma_unmap_single(&np->pdev->dev, 956 desc_to_dma(desc), 957 np->rx_buf_sz, 958 DMA_FROM_DEVICE); 959 skb_put (skb = np->rx_skbuff[entry], pkt_len); 960 np->rx_skbuff[entry] = NULL; 961 } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) { 962 dma_sync_single_for_cpu(&np->pdev->dev, 963 desc_to_dma(desc), 964 np->rx_buf_sz, 965 DMA_FROM_DEVICE); 966 skb_copy_to_linear_data (skb, 967 np->rx_skbuff[entry]->data, 968 pkt_len); 969 skb_put (skb, pkt_len); 970 dma_sync_single_for_device(&np->pdev->dev, 971 desc_to_dma(desc), 972 np->rx_buf_sz, 973 DMA_FROM_DEVICE); 974 } 975 skb->protocol = eth_type_trans (skb, dev); 976 #if 0 977 /* Checksum done by hw, but csum value unavailable. */ 978 if (np->pdev->pci_rev_id >= 0x0c && 979 !(frame_status & (TCPError | UDPError | IPError))) { 980 skb->ip_summed = CHECKSUM_UNNECESSARY; 981 } 982 #endif 983 netif_rx (skb); 984 } 985 entry = (entry + 1) % RX_RING_SIZE; 986 } 987 spin_lock(&np->rx_lock); 988 np->cur_rx = entry; 989 /* Re-allocate skbuffs to fill the descriptor ring */ 990 entry = np->old_rx; 991 while (entry != np->cur_rx) { 992 struct sk_buff *skb; 993 /* Dropped packets don't need to re-allocate */ 994 if (np->rx_skbuff[entry] == NULL) { 995 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); 996 if (skb == NULL) { 997 np->rx_ring[entry].fraginfo = 0; 998 printk (KERN_INFO 999 "%s: receive_packet: " 1000 "Unable to re-allocate Rx skbuff.#%d\n", 1001 dev->name, entry); 1002 break; 1003 } 1004 np->rx_skbuff[entry] = skb; 1005 np->rx_ring[entry].fraginfo = 1006 cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data, 1007 np->rx_buf_sz, DMA_FROM_DEVICE)); 1008 } 1009 np->rx_ring[entry].fraginfo |= 1010 cpu_to_le64((u64)np->rx_buf_sz << 48); 1011 np->rx_ring[entry].status = 0; 1012 entry = (entry + 1) % RX_RING_SIZE; 1013 } 1014 np->old_rx = entry; 1015 spin_unlock(&np->rx_lock); 1016 return 0; 1017 } 1018 1019 static void 1020 rio_error (struct net_device *dev, int int_status) 1021 { 1022 struct netdev_private *np = netdev_priv(dev); 1023 void __iomem *ioaddr = np->ioaddr; 1024 u16 macctrl; 1025 1026 /* Link change event */ 1027 if (int_status & LinkEvent) { 1028 if (mii_wait_link (dev, 10) == 0) { 1029 printk (KERN_INFO "%s: Link up\n", dev->name); 1030 if (np->phy_media) 1031 mii_get_media_pcs (dev); 1032 else 1033 mii_get_media (dev); 1034 if (np->speed == 1000) 1035 np->tx_coalesce = tx_coalesce; 1036 else 1037 np->tx_coalesce = 1; 1038 macctrl = 0; 1039 macctrl |= (np->vlan) ? AutoVLANuntagging : 0; 1040 macctrl |= (np->full_duplex) ? DuplexSelect : 0; 1041 macctrl |= (np->tx_flow) ? 1042 TxFlowControlEnable : 0; 1043 macctrl |= (np->rx_flow) ? 1044 RxFlowControlEnable : 0; 1045 dw16(MACCtrl, macctrl); 1046 np->link_status = 1; 1047 netif_carrier_on(dev); 1048 } else { 1049 printk (KERN_INFO "%s: Link off\n", dev->name); 1050 np->link_status = 0; 1051 netif_carrier_off(dev); 1052 } 1053 } 1054 1055 /* UpdateStats statistics registers */ 1056 if (int_status & UpdateStats) { 1057 get_stats (dev); 1058 } 1059 1060 /* PCI Error, a catastronphic error related to the bus interface 1061 occurs, set GlobalReset and HostReset to reset. */ 1062 if (int_status & HostError) { 1063 printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n", 1064 dev->name, int_status); 1065 dw16(ASICCtrl + 2, GlobalReset | HostReset); 1066 mdelay (500); 1067 rio_set_led_mode(dev); 1068 } 1069 } 1070 1071 static struct net_device_stats * 1072 get_stats (struct net_device *dev) 1073 { 1074 struct netdev_private *np = netdev_priv(dev); 1075 void __iomem *ioaddr = np->ioaddr; 1076 #ifdef MEM_MAPPING 1077 int i; 1078 #endif 1079 unsigned int stat_reg; 1080 1081 /* All statistics registers need to be acknowledged, 1082 else statistic overflow could cause problems */ 1083 1084 dev->stats.rx_packets += dr32(FramesRcvOk); 1085 dev->stats.tx_packets += dr32(FramesXmtOk); 1086 dev->stats.rx_bytes += dr32(OctetRcvOk); 1087 dev->stats.tx_bytes += dr32(OctetXmtOk); 1088 1089 dev->stats.multicast = dr32(McstFramesRcvdOk); 1090 dev->stats.collisions += dr32(SingleColFrames) 1091 + dr32(MultiColFrames); 1092 1093 /* detailed tx errors */ 1094 stat_reg = dr16(FramesAbortXSColls); 1095 dev->stats.tx_aborted_errors += stat_reg; 1096 dev->stats.tx_errors += stat_reg; 1097 1098 stat_reg = dr16(CarrierSenseErrors); 1099 dev->stats.tx_carrier_errors += stat_reg; 1100 dev->stats.tx_errors += stat_reg; 1101 1102 /* Clear all other statistic register. */ 1103 dr32(McstOctetXmtOk); 1104 dr16(BcstFramesXmtdOk); 1105 dr32(McstFramesXmtdOk); 1106 dr16(BcstFramesRcvdOk); 1107 dr16(MacControlFramesRcvd); 1108 dr16(FrameTooLongErrors); 1109 dr16(InRangeLengthErrors); 1110 dr16(FramesCheckSeqErrors); 1111 dr16(FramesLostRxErrors); 1112 dr32(McstOctetXmtOk); 1113 dr32(BcstOctetXmtOk); 1114 dr32(McstFramesXmtdOk); 1115 dr32(FramesWDeferredXmt); 1116 dr32(LateCollisions); 1117 dr16(BcstFramesXmtdOk); 1118 dr16(MacControlFramesXmtd); 1119 dr16(FramesWEXDeferal); 1120 1121 #ifdef MEM_MAPPING 1122 for (i = 0x100; i <= 0x150; i += 4) 1123 dr32(i); 1124 #endif 1125 dr16(TxJumboFrames); 1126 dr16(RxJumboFrames); 1127 dr16(TCPCheckSumErrors); 1128 dr16(UDPCheckSumErrors); 1129 dr16(IPCheckSumErrors); 1130 return &dev->stats; 1131 } 1132 1133 static int 1134 clear_stats (struct net_device *dev) 1135 { 1136 struct netdev_private *np = netdev_priv(dev); 1137 void __iomem *ioaddr = np->ioaddr; 1138 #ifdef MEM_MAPPING 1139 int i; 1140 #endif 1141 1142 /* All statistics registers need to be acknowledged, 1143 else statistic overflow could cause problems */ 1144 dr32(FramesRcvOk); 1145 dr32(FramesXmtOk); 1146 dr32(OctetRcvOk); 1147 dr32(OctetXmtOk); 1148 1149 dr32(McstFramesRcvdOk); 1150 dr32(SingleColFrames); 1151 dr32(MultiColFrames); 1152 dr32(LateCollisions); 1153 /* detailed rx errors */ 1154 dr16(FrameTooLongErrors); 1155 dr16(InRangeLengthErrors); 1156 dr16(FramesCheckSeqErrors); 1157 dr16(FramesLostRxErrors); 1158 1159 /* detailed tx errors */ 1160 dr16(FramesAbortXSColls); 1161 dr16(CarrierSenseErrors); 1162 1163 /* Clear all other statistic register. */ 1164 dr32(McstOctetXmtOk); 1165 dr16(BcstFramesXmtdOk); 1166 dr32(McstFramesXmtdOk); 1167 dr16(BcstFramesRcvdOk); 1168 dr16(MacControlFramesRcvd); 1169 dr32(McstOctetXmtOk); 1170 dr32(BcstOctetXmtOk); 1171 dr32(McstFramesXmtdOk); 1172 dr32(FramesWDeferredXmt); 1173 dr16(BcstFramesXmtdOk); 1174 dr16(MacControlFramesXmtd); 1175 dr16(FramesWEXDeferal); 1176 #ifdef MEM_MAPPING 1177 for (i = 0x100; i <= 0x150; i += 4) 1178 dr32(i); 1179 #endif 1180 dr16(TxJumboFrames); 1181 dr16(RxJumboFrames); 1182 dr16(TCPCheckSumErrors); 1183 dr16(UDPCheckSumErrors); 1184 dr16(IPCheckSumErrors); 1185 return 0; 1186 } 1187 1188 static void 1189 set_multicast (struct net_device *dev) 1190 { 1191 struct netdev_private *np = netdev_priv(dev); 1192 void __iomem *ioaddr = np->ioaddr; 1193 u32 hash_table[2]; 1194 u16 rx_mode = 0; 1195 1196 hash_table[0] = hash_table[1] = 0; 1197 /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */ 1198 hash_table[1] |= 0x02000000; 1199 if (dev->flags & IFF_PROMISC) { 1200 /* Receive all frames promiscuously. */ 1201 rx_mode = ReceiveAllFrames; 1202 } else if ((dev->flags & IFF_ALLMULTI) || 1203 (netdev_mc_count(dev) > multicast_filter_limit)) { 1204 /* Receive broadcast and multicast frames */ 1205 rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast; 1206 } else if (!netdev_mc_empty(dev)) { 1207 struct netdev_hw_addr *ha; 1208 /* Receive broadcast frames and multicast frames filtering 1209 by Hashtable */ 1210 rx_mode = 1211 ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast; 1212 netdev_for_each_mc_addr(ha, dev) { 1213 int bit, index = 0; 1214 int crc = ether_crc_le(ETH_ALEN, ha->addr); 1215 /* The inverted high significant 6 bits of CRC are 1216 used as an index to hashtable */ 1217 for (bit = 0; bit < 6; bit++) 1218 if (crc & (1 << (31 - bit))) 1219 index |= (1 << bit); 1220 hash_table[index / 32] |= (1 << (index % 32)); 1221 } 1222 } else { 1223 rx_mode = ReceiveBroadcast | ReceiveUnicast; 1224 } 1225 if (np->vlan) { 1226 /* ReceiveVLANMatch field in ReceiveMode */ 1227 rx_mode |= ReceiveVLANMatch; 1228 } 1229 1230 dw32(HashTable0, hash_table[0]); 1231 dw32(HashTable1, hash_table[1]); 1232 dw16(ReceiveMode, rx_mode); 1233 } 1234 1235 static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1236 { 1237 struct netdev_private *np = netdev_priv(dev); 1238 1239 strlcpy(info->driver, "dl2k", sizeof(info->driver)); 1240 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); 1241 } 1242 1243 static int rio_get_link_ksettings(struct net_device *dev, 1244 struct ethtool_link_ksettings *cmd) 1245 { 1246 struct netdev_private *np = netdev_priv(dev); 1247 u32 supported, advertising; 1248 1249 if (np->phy_media) { 1250 /* fiber device */ 1251 supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE; 1252 advertising = ADVERTISED_Autoneg | ADVERTISED_FIBRE; 1253 cmd->base.port = PORT_FIBRE; 1254 } else { 1255 /* copper device */ 1256 supported = SUPPORTED_10baseT_Half | 1257 SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half 1258 | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | 1259 SUPPORTED_Autoneg | SUPPORTED_MII; 1260 advertising = ADVERTISED_10baseT_Half | 1261 ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | 1262 ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full | 1263 ADVERTISED_Autoneg | ADVERTISED_MII; 1264 cmd->base.port = PORT_MII; 1265 } 1266 if (np->link_status) { 1267 cmd->base.speed = np->speed; 1268 cmd->base.duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 1269 } else { 1270 cmd->base.speed = SPEED_UNKNOWN; 1271 cmd->base.duplex = DUPLEX_UNKNOWN; 1272 } 1273 if (np->an_enable) 1274 cmd->base.autoneg = AUTONEG_ENABLE; 1275 else 1276 cmd->base.autoneg = AUTONEG_DISABLE; 1277 1278 cmd->base.phy_address = np->phy_addr; 1279 1280 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 1281 supported); 1282 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 1283 advertising); 1284 1285 return 0; 1286 } 1287 1288 static int rio_set_link_ksettings(struct net_device *dev, 1289 const struct ethtool_link_ksettings *cmd) 1290 { 1291 struct netdev_private *np = netdev_priv(dev); 1292 u32 speed = cmd->base.speed; 1293 u8 duplex = cmd->base.duplex; 1294 1295 netif_carrier_off(dev); 1296 if (cmd->base.autoneg == AUTONEG_ENABLE) { 1297 if (np->an_enable) { 1298 return 0; 1299 } else { 1300 np->an_enable = 1; 1301 mii_set_media(dev); 1302 return 0; 1303 } 1304 } else { 1305 np->an_enable = 0; 1306 if (np->speed == 1000) { 1307 speed = SPEED_100; 1308 duplex = DUPLEX_FULL; 1309 printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n"); 1310 } 1311 switch (speed) { 1312 case SPEED_10: 1313 np->speed = 10; 1314 np->full_duplex = (duplex == DUPLEX_FULL); 1315 break; 1316 case SPEED_100: 1317 np->speed = 100; 1318 np->full_duplex = (duplex == DUPLEX_FULL); 1319 break; 1320 case SPEED_1000: /* not supported */ 1321 default: 1322 return -EINVAL; 1323 } 1324 mii_set_media(dev); 1325 } 1326 return 0; 1327 } 1328 1329 static u32 rio_get_link(struct net_device *dev) 1330 { 1331 struct netdev_private *np = netdev_priv(dev); 1332 return np->link_status; 1333 } 1334 1335 static const struct ethtool_ops ethtool_ops = { 1336 .get_drvinfo = rio_get_drvinfo, 1337 .get_link = rio_get_link, 1338 .get_link_ksettings = rio_get_link_ksettings, 1339 .set_link_ksettings = rio_set_link_ksettings, 1340 }; 1341 1342 static int 1343 rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) 1344 { 1345 int phy_addr; 1346 struct netdev_private *np = netdev_priv(dev); 1347 struct mii_ioctl_data *miidata = if_mii(rq); 1348 1349 phy_addr = np->phy_addr; 1350 switch (cmd) { 1351 case SIOCGMIIPHY: 1352 miidata->phy_id = phy_addr; 1353 break; 1354 case SIOCGMIIREG: 1355 miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num); 1356 break; 1357 case SIOCSMIIREG: 1358 if (!capable(CAP_NET_ADMIN)) 1359 return -EPERM; 1360 mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in); 1361 break; 1362 default: 1363 return -EOPNOTSUPP; 1364 } 1365 return 0; 1366 } 1367 1368 #define EEP_READ 0x0200 1369 #define EEP_BUSY 0x8000 1370 /* Read the EEPROM word */ 1371 /* We use I/O instruction to read/write eeprom to avoid fail on some machines */ 1372 static int read_eeprom(struct netdev_private *np, int eep_addr) 1373 { 1374 void __iomem *ioaddr = np->eeprom_addr; 1375 int i = 1000; 1376 1377 dw16(EepromCtrl, EEP_READ | (eep_addr & 0xff)); 1378 while (i-- > 0) { 1379 if (!(dr16(EepromCtrl) & EEP_BUSY)) 1380 return dr16(EepromData); 1381 } 1382 return 0; 1383 } 1384 1385 enum phy_ctrl_bits { 1386 MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04, 1387 MII_DUPLEX = 0x08, 1388 }; 1389 1390 #define mii_delay() dr8(PhyCtrl) 1391 static void 1392 mii_sendbit (struct net_device *dev, u32 data) 1393 { 1394 struct netdev_private *np = netdev_priv(dev); 1395 void __iomem *ioaddr = np->ioaddr; 1396 1397 data = ((data) ? MII_DATA1 : 0) | (dr8(PhyCtrl) & 0xf8) | MII_WRITE; 1398 dw8(PhyCtrl, data); 1399 mii_delay (); 1400 dw8(PhyCtrl, data | MII_CLK); 1401 mii_delay (); 1402 } 1403 1404 static int 1405 mii_getbit (struct net_device *dev) 1406 { 1407 struct netdev_private *np = netdev_priv(dev); 1408 void __iomem *ioaddr = np->ioaddr; 1409 u8 data; 1410 1411 data = (dr8(PhyCtrl) & 0xf8) | MII_READ; 1412 dw8(PhyCtrl, data); 1413 mii_delay (); 1414 dw8(PhyCtrl, data | MII_CLK); 1415 mii_delay (); 1416 return (dr8(PhyCtrl) >> 1) & 1; 1417 } 1418 1419 static void 1420 mii_send_bits (struct net_device *dev, u32 data, int len) 1421 { 1422 int i; 1423 1424 for (i = len - 1; i >= 0; i--) { 1425 mii_sendbit (dev, data & (1 << i)); 1426 } 1427 } 1428 1429 static int 1430 mii_read (struct net_device *dev, int phy_addr, int reg_num) 1431 { 1432 u32 cmd; 1433 int i; 1434 u32 retval = 0; 1435 1436 /* Preamble */ 1437 mii_send_bits (dev, 0xffffffff, 32); 1438 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ 1439 /* ST,OP = 0110'b for read operation */ 1440 cmd = (0x06 << 10 | phy_addr << 5 | reg_num); 1441 mii_send_bits (dev, cmd, 14); 1442 /* Turnaround */ 1443 if (mii_getbit (dev)) 1444 goto err_out; 1445 /* Read data */ 1446 for (i = 0; i < 16; i++) { 1447 retval |= mii_getbit (dev); 1448 retval <<= 1; 1449 } 1450 /* End cycle */ 1451 mii_getbit (dev); 1452 return (retval >> 1) & 0xffff; 1453 1454 err_out: 1455 return 0; 1456 } 1457 static int 1458 mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data) 1459 { 1460 u32 cmd; 1461 1462 /* Preamble */ 1463 mii_send_bits (dev, 0xffffffff, 32); 1464 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ 1465 /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */ 1466 cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data; 1467 mii_send_bits (dev, cmd, 32); 1468 /* End cycle */ 1469 mii_getbit (dev); 1470 return 0; 1471 } 1472 static int 1473 mii_wait_link (struct net_device *dev, int wait) 1474 { 1475 __u16 bmsr; 1476 int phy_addr; 1477 struct netdev_private *np; 1478 1479 np = netdev_priv(dev); 1480 phy_addr = np->phy_addr; 1481 1482 do { 1483 bmsr = mii_read (dev, phy_addr, MII_BMSR); 1484 if (bmsr & BMSR_LSTATUS) 1485 return 0; 1486 mdelay (1); 1487 } while (--wait > 0); 1488 return -1; 1489 } 1490 static int 1491 mii_get_media (struct net_device *dev) 1492 { 1493 __u16 negotiate; 1494 __u16 bmsr; 1495 __u16 mscr; 1496 __u16 mssr; 1497 int phy_addr; 1498 struct netdev_private *np; 1499 1500 np = netdev_priv(dev); 1501 phy_addr = np->phy_addr; 1502 1503 bmsr = mii_read (dev, phy_addr, MII_BMSR); 1504 if (np->an_enable) { 1505 if (!(bmsr & BMSR_ANEGCOMPLETE)) { 1506 /* Auto-Negotiation not completed */ 1507 return -1; 1508 } 1509 negotiate = mii_read (dev, phy_addr, MII_ADVERTISE) & 1510 mii_read (dev, phy_addr, MII_LPA); 1511 mscr = mii_read (dev, phy_addr, MII_CTRL1000); 1512 mssr = mii_read (dev, phy_addr, MII_STAT1000); 1513 if (mscr & ADVERTISE_1000FULL && mssr & LPA_1000FULL) { 1514 np->speed = 1000; 1515 np->full_duplex = 1; 1516 printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); 1517 } else if (mscr & ADVERTISE_1000HALF && mssr & LPA_1000HALF) { 1518 np->speed = 1000; 1519 np->full_duplex = 0; 1520 printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n"); 1521 } else if (negotiate & ADVERTISE_100FULL) { 1522 np->speed = 100; 1523 np->full_duplex = 1; 1524 printk (KERN_INFO "Auto 100 Mbps, Full duplex\n"); 1525 } else if (negotiate & ADVERTISE_100HALF) { 1526 np->speed = 100; 1527 np->full_duplex = 0; 1528 printk (KERN_INFO "Auto 100 Mbps, Half duplex\n"); 1529 } else if (negotiate & ADVERTISE_10FULL) { 1530 np->speed = 10; 1531 np->full_duplex = 1; 1532 printk (KERN_INFO "Auto 10 Mbps, Full duplex\n"); 1533 } else if (negotiate & ADVERTISE_10HALF) { 1534 np->speed = 10; 1535 np->full_duplex = 0; 1536 printk (KERN_INFO "Auto 10 Mbps, Half duplex\n"); 1537 } 1538 if (negotiate & ADVERTISE_PAUSE_CAP) { 1539 np->tx_flow &= 1; 1540 np->rx_flow &= 1; 1541 } else if (negotiate & ADVERTISE_PAUSE_ASYM) { 1542 np->tx_flow = 0; 1543 np->rx_flow &= 1; 1544 } 1545 /* else tx_flow, rx_flow = user select */ 1546 } else { 1547 __u16 bmcr = mii_read (dev, phy_addr, MII_BMCR); 1548 switch (bmcr & (BMCR_SPEED100 | BMCR_SPEED1000)) { 1549 case BMCR_SPEED1000: 1550 printk (KERN_INFO "Operating at 1000 Mbps, "); 1551 break; 1552 case BMCR_SPEED100: 1553 printk (KERN_INFO "Operating at 100 Mbps, "); 1554 break; 1555 case 0: 1556 printk (KERN_INFO "Operating at 10 Mbps, "); 1557 } 1558 if (bmcr & BMCR_FULLDPLX) { 1559 printk (KERN_CONT "Full duplex\n"); 1560 } else { 1561 printk (KERN_CONT "Half duplex\n"); 1562 } 1563 } 1564 if (np->tx_flow) 1565 printk(KERN_INFO "Enable Tx Flow Control\n"); 1566 else 1567 printk(KERN_INFO "Disable Tx Flow Control\n"); 1568 if (np->rx_flow) 1569 printk(KERN_INFO "Enable Rx Flow Control\n"); 1570 else 1571 printk(KERN_INFO "Disable Rx Flow Control\n"); 1572 1573 return 0; 1574 } 1575 1576 static int 1577 mii_set_media (struct net_device *dev) 1578 { 1579 __u16 pscr; 1580 __u16 bmcr; 1581 __u16 bmsr; 1582 __u16 anar; 1583 int phy_addr; 1584 struct netdev_private *np; 1585 np = netdev_priv(dev); 1586 phy_addr = np->phy_addr; 1587 1588 /* Does user set speed? */ 1589 if (np->an_enable) { 1590 /* Advertise capabilities */ 1591 bmsr = mii_read (dev, phy_addr, MII_BMSR); 1592 anar = mii_read (dev, phy_addr, MII_ADVERTISE) & 1593 ~(ADVERTISE_100FULL | ADVERTISE_10FULL | 1594 ADVERTISE_100HALF | ADVERTISE_10HALF | 1595 ADVERTISE_100BASE4); 1596 if (bmsr & BMSR_100FULL) 1597 anar |= ADVERTISE_100FULL; 1598 if (bmsr & BMSR_100HALF) 1599 anar |= ADVERTISE_100HALF; 1600 if (bmsr & BMSR_100BASE4) 1601 anar |= ADVERTISE_100BASE4; 1602 if (bmsr & BMSR_10FULL) 1603 anar |= ADVERTISE_10FULL; 1604 if (bmsr & BMSR_10HALF) 1605 anar |= ADVERTISE_10HALF; 1606 anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 1607 mii_write (dev, phy_addr, MII_ADVERTISE, anar); 1608 1609 /* Enable Auto crossover */ 1610 pscr = mii_read (dev, phy_addr, MII_PHY_SCR); 1611 pscr |= 3 << 5; /* 11'b */ 1612 mii_write (dev, phy_addr, MII_PHY_SCR, pscr); 1613 1614 /* Soft reset PHY */ 1615 mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET); 1616 bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET; 1617 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1618 mdelay(1); 1619 } else { 1620 /* Force speed setting */ 1621 /* 1) Disable Auto crossover */ 1622 pscr = mii_read (dev, phy_addr, MII_PHY_SCR); 1623 pscr &= ~(3 << 5); 1624 mii_write (dev, phy_addr, MII_PHY_SCR, pscr); 1625 1626 /* 2) PHY Reset */ 1627 bmcr = mii_read (dev, phy_addr, MII_BMCR); 1628 bmcr |= BMCR_RESET; 1629 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1630 1631 /* 3) Power Down */ 1632 bmcr = 0x1940; /* must be 0x1940 */ 1633 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1634 mdelay (100); /* wait a certain time */ 1635 1636 /* 4) Advertise nothing */ 1637 mii_write (dev, phy_addr, MII_ADVERTISE, 0); 1638 1639 /* 5) Set media and Power Up */ 1640 bmcr = BMCR_PDOWN; 1641 if (np->speed == 100) { 1642 bmcr |= BMCR_SPEED100; 1643 printk (KERN_INFO "Manual 100 Mbps, "); 1644 } else if (np->speed == 10) { 1645 printk (KERN_INFO "Manual 10 Mbps, "); 1646 } 1647 if (np->full_duplex) { 1648 bmcr |= BMCR_FULLDPLX; 1649 printk (KERN_CONT "Full duplex\n"); 1650 } else { 1651 printk (KERN_CONT "Half duplex\n"); 1652 } 1653 #if 0 1654 /* Set 1000BaseT Master/Slave setting */ 1655 mscr = mii_read (dev, phy_addr, MII_CTRL1000); 1656 mscr |= MII_MSCR_CFG_ENABLE; 1657 mscr &= ~MII_MSCR_CFG_VALUE = 0; 1658 #endif 1659 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1660 mdelay(10); 1661 } 1662 return 0; 1663 } 1664 1665 static int 1666 mii_get_media_pcs (struct net_device *dev) 1667 { 1668 __u16 negotiate; 1669 __u16 bmsr; 1670 int phy_addr; 1671 struct netdev_private *np; 1672 1673 np = netdev_priv(dev); 1674 phy_addr = np->phy_addr; 1675 1676 bmsr = mii_read (dev, phy_addr, PCS_BMSR); 1677 if (np->an_enable) { 1678 if (!(bmsr & BMSR_ANEGCOMPLETE)) { 1679 /* Auto-Negotiation not completed */ 1680 return -1; 1681 } 1682 negotiate = mii_read (dev, phy_addr, PCS_ANAR) & 1683 mii_read (dev, phy_addr, PCS_ANLPAR); 1684 np->speed = 1000; 1685 if (negotiate & PCS_ANAR_FULL_DUPLEX) { 1686 printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); 1687 np->full_duplex = 1; 1688 } else { 1689 printk (KERN_INFO "Auto 1000 Mbps, half duplex\n"); 1690 np->full_duplex = 0; 1691 } 1692 if (negotiate & PCS_ANAR_PAUSE) { 1693 np->tx_flow &= 1; 1694 np->rx_flow &= 1; 1695 } else if (negotiate & PCS_ANAR_ASYMMETRIC) { 1696 np->tx_flow = 0; 1697 np->rx_flow &= 1; 1698 } 1699 /* else tx_flow, rx_flow = user select */ 1700 } else { 1701 __u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR); 1702 printk (KERN_INFO "Operating at 1000 Mbps, "); 1703 if (bmcr & BMCR_FULLDPLX) { 1704 printk (KERN_CONT "Full duplex\n"); 1705 } else { 1706 printk (KERN_CONT "Half duplex\n"); 1707 } 1708 } 1709 if (np->tx_flow) 1710 printk(KERN_INFO "Enable Tx Flow Control\n"); 1711 else 1712 printk(KERN_INFO "Disable Tx Flow Control\n"); 1713 if (np->rx_flow) 1714 printk(KERN_INFO "Enable Rx Flow Control\n"); 1715 else 1716 printk(KERN_INFO "Disable Rx Flow Control\n"); 1717 1718 return 0; 1719 } 1720 1721 static int 1722 mii_set_media_pcs (struct net_device *dev) 1723 { 1724 __u16 bmcr; 1725 __u16 esr; 1726 __u16 anar; 1727 int phy_addr; 1728 struct netdev_private *np; 1729 np = netdev_priv(dev); 1730 phy_addr = np->phy_addr; 1731 1732 /* Auto-Negotiation? */ 1733 if (np->an_enable) { 1734 /* Advertise capabilities */ 1735 esr = mii_read (dev, phy_addr, PCS_ESR); 1736 anar = mii_read (dev, phy_addr, MII_ADVERTISE) & 1737 ~PCS_ANAR_HALF_DUPLEX & 1738 ~PCS_ANAR_FULL_DUPLEX; 1739 if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD)) 1740 anar |= PCS_ANAR_HALF_DUPLEX; 1741 if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD)) 1742 anar |= PCS_ANAR_FULL_DUPLEX; 1743 anar |= PCS_ANAR_PAUSE | PCS_ANAR_ASYMMETRIC; 1744 mii_write (dev, phy_addr, MII_ADVERTISE, anar); 1745 1746 /* Soft reset PHY */ 1747 mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET); 1748 bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET; 1749 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1750 mdelay(1); 1751 } else { 1752 /* Force speed setting */ 1753 /* PHY Reset */ 1754 bmcr = BMCR_RESET; 1755 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1756 mdelay(10); 1757 if (np->full_duplex) { 1758 bmcr = BMCR_FULLDPLX; 1759 printk (KERN_INFO "Manual full duplex\n"); 1760 } else { 1761 bmcr = 0; 1762 printk (KERN_INFO "Manual half duplex\n"); 1763 } 1764 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1765 mdelay(10); 1766 1767 /* Advertise nothing */ 1768 mii_write (dev, phy_addr, MII_ADVERTISE, 0); 1769 } 1770 return 0; 1771 } 1772 1773 1774 static int 1775 rio_close (struct net_device *dev) 1776 { 1777 struct netdev_private *np = netdev_priv(dev); 1778 struct pci_dev *pdev = np->pdev; 1779 1780 netif_stop_queue (dev); 1781 1782 rio_hw_stop(dev); 1783 1784 free_irq(pdev->irq, dev); 1785 del_timer_sync (&np->timer); 1786 1787 free_list(dev); 1788 1789 return 0; 1790 } 1791 1792 static void 1793 rio_remove1 (struct pci_dev *pdev) 1794 { 1795 struct net_device *dev = pci_get_drvdata (pdev); 1796 1797 if (dev) { 1798 struct netdev_private *np = netdev_priv(dev); 1799 1800 unregister_netdev (dev); 1801 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, 1802 np->rx_ring_dma); 1803 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, 1804 np->tx_ring_dma); 1805 #ifdef MEM_MAPPING 1806 pci_iounmap(pdev, np->ioaddr); 1807 #endif 1808 pci_iounmap(pdev, np->eeprom_addr); 1809 free_netdev (dev); 1810 pci_release_regions (pdev); 1811 pci_disable_device (pdev); 1812 } 1813 } 1814 1815 #ifdef CONFIG_PM_SLEEP 1816 static int rio_suspend(struct device *device) 1817 { 1818 struct net_device *dev = dev_get_drvdata(device); 1819 struct netdev_private *np = netdev_priv(dev); 1820 1821 if (!netif_running(dev)) 1822 return 0; 1823 1824 netif_device_detach(dev); 1825 del_timer_sync(&np->timer); 1826 rio_hw_stop(dev); 1827 1828 return 0; 1829 } 1830 1831 static int rio_resume(struct device *device) 1832 { 1833 struct net_device *dev = dev_get_drvdata(device); 1834 struct netdev_private *np = netdev_priv(dev); 1835 1836 if (!netif_running(dev)) 1837 return 0; 1838 1839 rio_reset_ring(np); 1840 rio_hw_init(dev); 1841 np->timer.expires = jiffies + 1 * HZ; 1842 add_timer(&np->timer); 1843 netif_device_attach(dev); 1844 dl2k_enable_int(np); 1845 1846 return 0; 1847 } 1848 1849 static SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume); 1850 #define RIO_PM_OPS (&rio_pm_ops) 1851 1852 #else 1853 1854 #define RIO_PM_OPS NULL 1855 1856 #endif /* CONFIG_PM_SLEEP */ 1857 1858 static struct pci_driver rio_driver = { 1859 .name = "dl2k", 1860 .id_table = rio_pci_tbl, 1861 .probe = rio_probe1, 1862 .remove = rio_remove1, 1863 .driver.pm = RIO_PM_OPS, 1864 }; 1865 1866 module_pci_driver(rio_driver); 1867 1868 /* Read Documentation/networking/device_drivers/ethernet/dlink/dl2k.rst. */ 1869