1 /* 2 * rrunner.c: Linux driver for the Essential RoadRunner HIPPI board. 3 * 4 * Copyright (C) 1998-2002 by Jes Sorensen, <jes@wildopensource.com>. 5 * 6 * Thanks to Essential Communication for providing us with hardware 7 * and very comprehensive documentation without which I would not have 8 * been able to write this driver. A special thank you to John Gibbon 9 * for sorting out the legal issues, with the NDA, allowing the code to 10 * be released under the GPL. 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * Thanks to Jayaram Bhat from ODS/Essential for fixing some of the 18 * stupid bugs in my code. 19 * 20 * Softnet support and various other patches from Val Henson of 21 * ODS/Essential. 22 * 23 * PCI DMA mapping code partly based on work by Francois Romieu. 24 */ 25 26 27 #define DEBUG 1 28 #define RX_DMA_SKBUFF 1 29 #define PKT_COPY_THRESHOLD 512 30 31 #include <linux/module.h> 32 #include <linux/types.h> 33 #include <linux/errno.h> 34 #include <linux/ioport.h> 35 #include <linux/pci.h> 36 #include <linux/kernel.h> 37 #include <linux/netdevice.h> 38 #include <linux/hippidevice.h> 39 #include <linux/skbuff.h> 40 #include <linux/delay.h> 41 #include <linux/mm.h> 42 #include <linux/slab.h> 43 #include <net/sock.h> 44 45 #include <asm/cache.h> 46 #include <asm/byteorder.h> 47 #include <asm/io.h> 48 #include <asm/irq.h> 49 #include <asm/uaccess.h> 50 51 #define rr_if_busy(dev) netif_queue_stopped(dev) 52 #define rr_if_running(dev) netif_running(dev) 53 54 #include "rrunner.h" 55 56 #define RUN_AT(x) (jiffies + (x)) 57 58 59 MODULE_AUTHOR("Jes Sorensen <jes@wildopensource.com>"); 60 MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver"); 61 MODULE_LICENSE("GPL"); 62 63 static char version[] = "rrunner.c: v0.50 11/11/2002 Jes Sorensen (jes@wildopensource.com)\n"; 64 65 66 static const struct net_device_ops rr_netdev_ops = { 67 .ndo_open = rr_open, 68 .ndo_stop = rr_close, 69 .ndo_do_ioctl = rr_ioctl, 70 .ndo_start_xmit = rr_start_xmit, 71 .ndo_change_mtu = hippi_change_mtu, 72 .ndo_set_mac_address = hippi_mac_addr, 73 }; 74 75 /* 76 * Implementation notes: 77 * 78 * The DMA engine only allows for DMA within physical 64KB chunks of 79 * memory. The current approach of the driver (and stack) is to use 80 * linear blocks of memory for the skbuffs. However, as the data block 81 * is always the first part of the skb and skbs are 2^n aligned so we 82 * are guarantted to get the whole block within one 64KB align 64KB 83 * chunk. 84 * 85 * On the long term, relying on being able to allocate 64KB linear 86 * chunks of memory is not feasible and the skb handling code and the 87 * stack will need to know about I/O vectors or something similar. 88 */ 89 90 static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 91 { 92 struct net_device *dev; 93 static int version_disp; 94 u8 pci_latency; 95 struct rr_private *rrpriv; 96 void *tmpptr; 97 dma_addr_t ring_dma; 98 int ret = -ENOMEM; 99 100 dev = alloc_hippi_dev(sizeof(struct rr_private)); 101 if (!dev) 102 goto out3; 103 104 ret = pci_enable_device(pdev); 105 if (ret) { 106 ret = -ENODEV; 107 goto out2; 108 } 109 110 rrpriv = netdev_priv(dev); 111 112 SET_NETDEV_DEV(dev, &pdev->dev); 113 114 ret = pci_request_regions(pdev, "rrunner"); 115 if (ret < 0) 116 goto out; 117 118 pci_set_drvdata(pdev, dev); 119 120 rrpriv->pci_dev = pdev; 121 122 spin_lock_init(&rrpriv->lock); 123 124 dev->netdev_ops = &rr_netdev_ops; 125 126 /* display version info if adapter is found */ 127 if (!version_disp) { 128 /* set display flag to TRUE so that */ 129 /* we only display this string ONCE */ 130 version_disp = 1; 131 printk(version); 132 } 133 134 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency); 135 if (pci_latency <= 0x58){ 136 pci_latency = 0x58; 137 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, pci_latency); 138 } 139 140 pci_set_master(pdev); 141 142 printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI " 143 "at 0x%llx, irq %i, PCI latency %i\n", dev->name, 144 (unsigned long long)pci_resource_start(pdev, 0), 145 pdev->irq, pci_latency); 146 147 /* 148 * Remap the MMIO regs into kernel space. 149 */ 150 rrpriv->regs = pci_iomap(pdev, 0, 0x1000); 151 if (!rrpriv->regs) { 152 printk(KERN_ERR "%s: Unable to map I/O register, " 153 "RoadRunner will be disabled.\n", dev->name); 154 ret = -EIO; 155 goto out; 156 } 157 158 tmpptr = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); 159 rrpriv->tx_ring = tmpptr; 160 rrpriv->tx_ring_dma = ring_dma; 161 162 if (!tmpptr) { 163 ret = -ENOMEM; 164 goto out; 165 } 166 167 tmpptr = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); 168 rrpriv->rx_ring = tmpptr; 169 rrpriv->rx_ring_dma = ring_dma; 170 171 if (!tmpptr) { 172 ret = -ENOMEM; 173 goto out; 174 } 175 176 tmpptr = pci_alloc_consistent(pdev, EVT_RING_SIZE, &ring_dma); 177 rrpriv->evt_ring = tmpptr; 178 rrpriv->evt_ring_dma = ring_dma; 179 180 if (!tmpptr) { 181 ret = -ENOMEM; 182 goto out; 183 } 184 185 /* 186 * Don't access any register before this point! 187 */ 188 #ifdef __BIG_ENDIAN 189 writel(readl(&rrpriv->regs->HostCtrl) | NO_SWAP, 190 &rrpriv->regs->HostCtrl); 191 #endif 192 /* 193 * Need to add a case for little-endian 64-bit hosts here. 194 */ 195 196 rr_init(dev); 197 198 ret = register_netdev(dev); 199 if (ret) 200 goto out; 201 return 0; 202 203 out: 204 if (rrpriv->evt_ring) 205 pci_free_consistent(pdev, EVT_RING_SIZE, rrpriv->evt_ring, 206 rrpriv->evt_ring_dma); 207 if (rrpriv->rx_ring) 208 pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring, 209 rrpriv->rx_ring_dma); 210 if (rrpriv->tx_ring) 211 pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring, 212 rrpriv->tx_ring_dma); 213 if (rrpriv->regs) 214 pci_iounmap(pdev, rrpriv->regs); 215 if (pdev) 216 pci_release_regions(pdev); 217 out2: 218 free_netdev(dev); 219 out3: 220 return ret; 221 } 222 223 static void rr_remove_one(struct pci_dev *pdev) 224 { 225 struct net_device *dev = pci_get_drvdata(pdev); 226 struct rr_private *rr = netdev_priv(dev); 227 228 if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)) { 229 printk(KERN_ERR "%s: trying to unload running NIC\n", 230 dev->name); 231 writel(HALT_NIC, &rr->regs->HostCtrl); 232 } 233 234 unregister_netdev(dev); 235 pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring, 236 rr->evt_ring_dma); 237 pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring, 238 rr->rx_ring_dma); 239 pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring, 240 rr->tx_ring_dma); 241 pci_iounmap(pdev, rr->regs); 242 pci_release_regions(pdev); 243 pci_disable_device(pdev); 244 free_netdev(dev); 245 } 246 247 248 /* 249 * Commands are considered to be slow, thus there is no reason to 250 * inline this. 251 */ 252 static void rr_issue_cmd(struct rr_private *rrpriv, struct cmd *cmd) 253 { 254 struct rr_regs __iomem *regs; 255 u32 idx; 256 257 regs = rrpriv->regs; 258 /* 259 * This is temporary - it will go away in the final version. 260 * We probably also want to make this function inline. 261 */ 262 if (readl(®s->HostCtrl) & NIC_HALTED){ 263 printk("issuing command for halted NIC, code 0x%x, " 264 "HostCtrl %08x\n", cmd->code, readl(®s->HostCtrl)); 265 if (readl(®s->Mode) & FATAL_ERR) 266 printk("error codes Fail1 %02x, Fail2 %02x\n", 267 readl(®s->Fail1), readl(®s->Fail2)); 268 } 269 270 idx = rrpriv->info->cmd_ctrl.pi; 271 272 writel(*(u32*)(cmd), ®s->CmdRing[idx]); 273 wmb(); 274 275 idx = (idx - 1) % CMD_RING_ENTRIES; 276 rrpriv->info->cmd_ctrl.pi = idx; 277 wmb(); 278 279 if (readl(®s->Mode) & FATAL_ERR) 280 printk("error code %02x\n", readl(®s->Fail1)); 281 } 282 283 284 /* 285 * Reset the board in a sensible manner. The NIC is already halted 286 * when we get here and a spin-lock is held. 287 */ 288 static int rr_reset(struct net_device *dev) 289 { 290 struct rr_private *rrpriv; 291 struct rr_regs __iomem *regs; 292 u32 start_pc; 293 int i; 294 295 rrpriv = netdev_priv(dev); 296 regs = rrpriv->regs; 297 298 rr_load_firmware(dev); 299 300 writel(0x01000000, ®s->TX_state); 301 writel(0xff800000, ®s->RX_state); 302 writel(0, ®s->AssistState); 303 writel(CLEAR_INTA, ®s->LocalCtrl); 304 writel(0x01, ®s->BrkPt); 305 writel(0, ®s->Timer); 306 writel(0, ®s->TimerRef); 307 writel(RESET_DMA, ®s->DmaReadState); 308 writel(RESET_DMA, ®s->DmaWriteState); 309 writel(0, ®s->DmaWriteHostHi); 310 writel(0, ®s->DmaWriteHostLo); 311 writel(0, ®s->DmaReadHostHi); 312 writel(0, ®s->DmaReadHostLo); 313 writel(0, ®s->DmaReadLen); 314 writel(0, ®s->DmaWriteLen); 315 writel(0, ®s->DmaWriteLcl); 316 writel(0, ®s->DmaWriteIPchecksum); 317 writel(0, ®s->DmaReadLcl); 318 writel(0, ®s->DmaReadIPchecksum); 319 writel(0, ®s->PciState); 320 #if (BITS_PER_LONG == 64) && defined __LITTLE_ENDIAN 321 writel(SWAP_DATA | PTR64BIT | PTR_WD_SWAP, ®s->Mode); 322 #elif (BITS_PER_LONG == 64) 323 writel(SWAP_DATA | PTR64BIT | PTR_WD_NOSWAP, ®s->Mode); 324 #else 325 writel(SWAP_DATA | PTR32BIT | PTR_WD_NOSWAP, ®s->Mode); 326 #endif 327 328 #if 0 329 /* 330 * Don't worry, this is just black magic. 331 */ 332 writel(0xdf000, ®s->RxBase); 333 writel(0xdf000, ®s->RxPrd); 334 writel(0xdf000, ®s->RxCon); 335 writel(0xce000, ®s->TxBase); 336 writel(0xce000, ®s->TxPrd); 337 writel(0xce000, ®s->TxCon); 338 writel(0, ®s->RxIndPro); 339 writel(0, ®s->RxIndCon); 340 writel(0, ®s->RxIndRef); 341 writel(0, ®s->TxIndPro); 342 writel(0, ®s->TxIndCon); 343 writel(0, ®s->TxIndRef); 344 writel(0xcc000, ®s->pad10[0]); 345 writel(0, ®s->DrCmndPro); 346 writel(0, ®s->DrCmndCon); 347 writel(0, ®s->DwCmndPro); 348 writel(0, ®s->DwCmndCon); 349 writel(0, ®s->DwCmndRef); 350 writel(0, ®s->DrDataPro); 351 writel(0, ®s->DrDataCon); 352 writel(0, ®s->DrDataRef); 353 writel(0, ®s->DwDataPro); 354 writel(0, ®s->DwDataCon); 355 writel(0, ®s->DwDataRef); 356 #endif 357 358 writel(0xffffffff, ®s->MbEvent); 359 writel(0, ®s->Event); 360 361 writel(0, ®s->TxPi); 362 writel(0, ®s->IpRxPi); 363 364 writel(0, ®s->EvtCon); 365 writel(0, ®s->EvtPrd); 366 367 rrpriv->info->evt_ctrl.pi = 0; 368 369 for (i = 0; i < CMD_RING_ENTRIES; i++) 370 writel(0, ®s->CmdRing[i]); 371 372 /* 373 * Why 32 ? is this not cache line size dependent? 374 */ 375 writel(RBURST_64|WBURST_64, ®s->PciState); 376 wmb(); 377 378 start_pc = rr_read_eeprom_word(rrpriv, 379 offsetof(struct eeprom, rncd_info.FwStart)); 380 381 #if (DEBUG > 1) 382 printk("%s: Executing firmware at address 0x%06x\n", 383 dev->name, start_pc); 384 #endif 385 386 writel(start_pc + 0x800, ®s->Pc); 387 wmb(); 388 udelay(5); 389 390 writel(start_pc, ®s->Pc); 391 wmb(); 392 393 return 0; 394 } 395 396 397 /* 398 * Read a string from the EEPROM. 399 */ 400 static unsigned int rr_read_eeprom(struct rr_private *rrpriv, 401 unsigned long offset, 402 unsigned char *buf, 403 unsigned long length) 404 { 405 struct rr_regs __iomem *regs = rrpriv->regs; 406 u32 misc, io, host, i; 407 408 io = readl(®s->ExtIo); 409 writel(0, ®s->ExtIo); 410 misc = readl(®s->LocalCtrl); 411 writel(0, ®s->LocalCtrl); 412 host = readl(®s->HostCtrl); 413 writel(host | HALT_NIC, ®s->HostCtrl); 414 mb(); 415 416 for (i = 0; i < length; i++){ 417 writel((EEPROM_BASE + ((offset+i) << 3)), ®s->WinBase); 418 mb(); 419 buf[i] = (readl(®s->WinData) >> 24) & 0xff; 420 mb(); 421 } 422 423 writel(host, ®s->HostCtrl); 424 writel(misc, ®s->LocalCtrl); 425 writel(io, ®s->ExtIo); 426 mb(); 427 return i; 428 } 429 430 431 /* 432 * Shortcut to read one word (4 bytes) out of the EEPROM and convert 433 * it to our CPU byte-order. 434 */ 435 static u32 rr_read_eeprom_word(struct rr_private *rrpriv, 436 size_t offset) 437 { 438 __be32 word; 439 440 if ((rr_read_eeprom(rrpriv, offset, 441 (unsigned char *)&word, 4) == 4)) 442 return be32_to_cpu(word); 443 return 0; 444 } 445 446 447 /* 448 * Write a string to the EEPROM. 449 * 450 * This is only called when the firmware is not running. 451 */ 452 static unsigned int write_eeprom(struct rr_private *rrpriv, 453 unsigned long offset, 454 unsigned char *buf, 455 unsigned long length) 456 { 457 struct rr_regs __iomem *regs = rrpriv->regs; 458 u32 misc, io, data, i, j, ready, error = 0; 459 460 io = readl(®s->ExtIo); 461 writel(0, ®s->ExtIo); 462 misc = readl(®s->LocalCtrl); 463 writel(ENABLE_EEPROM_WRITE, ®s->LocalCtrl); 464 mb(); 465 466 for (i = 0; i < length; i++){ 467 writel((EEPROM_BASE + ((offset+i) << 3)), ®s->WinBase); 468 mb(); 469 data = buf[i] << 24; 470 /* 471 * Only try to write the data if it is not the same 472 * value already. 473 */ 474 if ((readl(®s->WinData) & 0xff000000) != data){ 475 writel(data, ®s->WinData); 476 ready = 0; 477 j = 0; 478 mb(); 479 while(!ready){ 480 udelay(20); 481 if ((readl(®s->WinData) & 0xff000000) == 482 data) 483 ready = 1; 484 mb(); 485 if (j++ > 5000){ 486 printk("data mismatch: %08x, " 487 "WinData %08x\n", data, 488 readl(®s->WinData)); 489 ready = 1; 490 error = 1; 491 } 492 } 493 } 494 } 495 496 writel(misc, ®s->LocalCtrl); 497 writel(io, ®s->ExtIo); 498 mb(); 499 500 return error; 501 } 502 503 504 static int rr_init(struct net_device *dev) 505 { 506 struct rr_private *rrpriv; 507 struct rr_regs __iomem *regs; 508 u32 sram_size, rev; 509 510 rrpriv = netdev_priv(dev); 511 regs = rrpriv->regs; 512 513 rev = readl(®s->FwRev); 514 rrpriv->fw_rev = rev; 515 if (rev > 0x00020024) 516 printk(" Firmware revision: %i.%i.%i\n", (rev >> 16), 517 ((rev >> 8) & 0xff), (rev & 0xff)); 518 else if (rev >= 0x00020000) { 519 printk(" Firmware revision: %i.%i.%i (2.0.37 or " 520 "later is recommended)\n", (rev >> 16), 521 ((rev >> 8) & 0xff), (rev & 0xff)); 522 }else{ 523 printk(" Firmware revision too old: %i.%i.%i, please " 524 "upgrade to 2.0.37 or later.\n", 525 (rev >> 16), ((rev >> 8) & 0xff), (rev & 0xff)); 526 } 527 528 #if (DEBUG > 2) 529 printk(" Maximum receive rings %i\n", readl(®s->MaxRxRng)); 530 #endif 531 532 /* 533 * Read the hardware address from the eeprom. The HW address 534 * is not really necessary for HIPPI but awfully convenient. 535 * The pointer arithmetic to put it in dev_addr is ugly, but 536 * Donald Becker does it this way for the GigE version of this 537 * card and it's shorter and more portable than any 538 * other method I've seen. -VAL 539 */ 540 541 *(__be16 *)(dev->dev_addr) = 542 htons(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA))); 543 *(__be32 *)(dev->dev_addr+2) = 544 htonl(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA[4]))); 545 546 printk(" MAC: %pM\n", dev->dev_addr); 547 548 sram_size = rr_read_eeprom_word(rrpriv, 8); 549 printk(" SRAM size 0x%06x\n", sram_size); 550 551 return 0; 552 } 553 554 555 static int rr_init1(struct net_device *dev) 556 { 557 struct rr_private *rrpriv; 558 struct rr_regs __iomem *regs; 559 unsigned long myjif, flags; 560 struct cmd cmd; 561 u32 hostctrl; 562 int ecode = 0; 563 short i; 564 565 rrpriv = netdev_priv(dev); 566 regs = rrpriv->regs; 567 568 spin_lock_irqsave(&rrpriv->lock, flags); 569 570 hostctrl = readl(®s->HostCtrl); 571 writel(hostctrl | HALT_NIC | RR_CLEAR_INT, ®s->HostCtrl); 572 wmb(); 573 574 if (hostctrl & PARITY_ERR){ 575 printk("%s: Parity error halting NIC - this is serious!\n", 576 dev->name); 577 spin_unlock_irqrestore(&rrpriv->lock, flags); 578 ecode = -EFAULT; 579 goto error; 580 } 581 582 set_rxaddr(regs, rrpriv->rx_ctrl_dma); 583 set_infoaddr(regs, rrpriv->info_dma); 584 585 rrpriv->info->evt_ctrl.entry_size = sizeof(struct event); 586 rrpriv->info->evt_ctrl.entries = EVT_RING_ENTRIES; 587 rrpriv->info->evt_ctrl.mode = 0; 588 rrpriv->info->evt_ctrl.pi = 0; 589 set_rraddr(&rrpriv->info->evt_ctrl.rngptr, rrpriv->evt_ring_dma); 590 591 rrpriv->info->cmd_ctrl.entry_size = sizeof(struct cmd); 592 rrpriv->info->cmd_ctrl.entries = CMD_RING_ENTRIES; 593 rrpriv->info->cmd_ctrl.mode = 0; 594 rrpriv->info->cmd_ctrl.pi = 15; 595 596 for (i = 0; i < CMD_RING_ENTRIES; i++) { 597 writel(0, ®s->CmdRing[i]); 598 } 599 600 for (i = 0; i < TX_RING_ENTRIES; i++) { 601 rrpriv->tx_ring[i].size = 0; 602 set_rraddr(&rrpriv->tx_ring[i].addr, 0); 603 rrpriv->tx_skbuff[i] = NULL; 604 } 605 rrpriv->info->tx_ctrl.entry_size = sizeof(struct tx_desc); 606 rrpriv->info->tx_ctrl.entries = TX_RING_ENTRIES; 607 rrpriv->info->tx_ctrl.mode = 0; 608 rrpriv->info->tx_ctrl.pi = 0; 609 set_rraddr(&rrpriv->info->tx_ctrl.rngptr, rrpriv->tx_ring_dma); 610 611 /* 612 * Set dirty_tx before we start receiving interrupts, otherwise 613 * the interrupt handler might think it is supposed to process 614 * tx ints before we are up and running, which may cause a null 615 * pointer access in the int handler. 616 */ 617 rrpriv->tx_full = 0; 618 rrpriv->cur_rx = 0; 619 rrpriv->dirty_rx = rrpriv->dirty_tx = 0; 620 621 rr_reset(dev); 622 623 /* Tuning values */ 624 writel(0x5000, ®s->ConRetry); 625 writel(0x100, ®s->ConRetryTmr); 626 writel(0x500000, ®s->ConTmout); 627 writel(0x60, ®s->IntrTmr); 628 writel(0x500000, ®s->TxDataMvTimeout); 629 writel(0x200000, ®s->RxDataMvTimeout); 630 writel(0x80, ®s->WriteDmaThresh); 631 writel(0x80, ®s->ReadDmaThresh); 632 633 rrpriv->fw_running = 0; 634 wmb(); 635 636 hostctrl &= ~(HALT_NIC | INVALID_INST_B | PARITY_ERR); 637 writel(hostctrl, ®s->HostCtrl); 638 wmb(); 639 640 spin_unlock_irqrestore(&rrpriv->lock, flags); 641 642 for (i = 0; i < RX_RING_ENTRIES; i++) { 643 struct sk_buff *skb; 644 dma_addr_t addr; 645 646 rrpriv->rx_ring[i].mode = 0; 647 skb = alloc_skb(dev->mtu + HIPPI_HLEN, GFP_ATOMIC); 648 if (!skb) { 649 printk(KERN_WARNING "%s: Unable to allocate memory " 650 "for receive ring - halting NIC\n", dev->name); 651 ecode = -ENOMEM; 652 goto error; 653 } 654 rrpriv->rx_skbuff[i] = skb; 655 addr = pci_map_single(rrpriv->pci_dev, skb->data, 656 dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE); 657 /* 658 * Sanity test to see if we conflict with the DMA 659 * limitations of the Roadrunner. 660 */ 661 if ((((unsigned long)skb->data) & 0xfff) > ~65320) 662 printk("skb alloc error\n"); 663 664 set_rraddr(&rrpriv->rx_ring[i].addr, addr); 665 rrpriv->rx_ring[i].size = dev->mtu + HIPPI_HLEN; 666 } 667 668 rrpriv->rx_ctrl[4].entry_size = sizeof(struct rx_desc); 669 rrpriv->rx_ctrl[4].entries = RX_RING_ENTRIES; 670 rrpriv->rx_ctrl[4].mode = 8; 671 rrpriv->rx_ctrl[4].pi = 0; 672 wmb(); 673 set_rraddr(&rrpriv->rx_ctrl[4].rngptr, rrpriv->rx_ring_dma); 674 675 udelay(1000); 676 677 /* 678 * Now start the FirmWare. 679 */ 680 cmd.code = C_START_FW; 681 cmd.ring = 0; 682 cmd.index = 0; 683 684 rr_issue_cmd(rrpriv, &cmd); 685 686 /* 687 * Give the FirmWare time to chew on the `get running' command. 688 */ 689 myjif = jiffies + 5 * HZ; 690 while (time_before(jiffies, myjif) && !rrpriv->fw_running) 691 cpu_relax(); 692 693 netif_start_queue(dev); 694 695 return ecode; 696 697 error: 698 /* 699 * We might have gotten here because we are out of memory, 700 * make sure we release everything we allocated before failing 701 */ 702 for (i = 0; i < RX_RING_ENTRIES; i++) { 703 struct sk_buff *skb = rrpriv->rx_skbuff[i]; 704 705 if (skb) { 706 pci_unmap_single(rrpriv->pci_dev, 707 rrpriv->rx_ring[i].addr.addrlo, 708 dev->mtu + HIPPI_HLEN, 709 PCI_DMA_FROMDEVICE); 710 rrpriv->rx_ring[i].size = 0; 711 set_rraddr(&rrpriv->rx_ring[i].addr, 0); 712 dev_kfree_skb(skb); 713 rrpriv->rx_skbuff[i] = NULL; 714 } 715 } 716 return ecode; 717 } 718 719 720 /* 721 * All events are considered to be slow (RX/TX ints do not generate 722 * events) and are handled here, outside the main interrupt handler, 723 * to reduce the size of the handler. 724 */ 725 static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx) 726 { 727 struct rr_private *rrpriv; 728 struct rr_regs __iomem *regs; 729 u32 tmp; 730 731 rrpriv = netdev_priv(dev); 732 regs = rrpriv->regs; 733 734 while (prodidx != eidx){ 735 switch (rrpriv->evt_ring[eidx].code){ 736 case E_NIC_UP: 737 tmp = readl(®s->FwRev); 738 printk(KERN_INFO "%s: Firmware revision %i.%i.%i " 739 "up and running\n", dev->name, 740 (tmp >> 16), ((tmp >> 8) & 0xff), (tmp & 0xff)); 741 rrpriv->fw_running = 1; 742 writel(RX_RING_ENTRIES - 1, ®s->IpRxPi); 743 wmb(); 744 break; 745 case E_LINK_ON: 746 printk(KERN_INFO "%s: Optical link ON\n", dev->name); 747 break; 748 case E_LINK_OFF: 749 printk(KERN_INFO "%s: Optical link OFF\n", dev->name); 750 break; 751 case E_RX_IDLE: 752 printk(KERN_WARNING "%s: RX data not moving\n", 753 dev->name); 754 goto drop; 755 case E_WATCHDOG: 756 printk(KERN_INFO "%s: The watchdog is here to see " 757 "us\n", dev->name); 758 break; 759 case E_INTERN_ERR: 760 printk(KERN_ERR "%s: HIPPI Internal NIC error\n", 761 dev->name); 762 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, 763 ®s->HostCtrl); 764 wmb(); 765 break; 766 case E_HOST_ERR: 767 printk(KERN_ERR "%s: Host software error\n", 768 dev->name); 769 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, 770 ®s->HostCtrl); 771 wmb(); 772 break; 773 /* 774 * TX events. 775 */ 776 case E_CON_REJ: 777 printk(KERN_WARNING "%s: Connection rejected\n", 778 dev->name); 779 dev->stats.tx_aborted_errors++; 780 break; 781 case E_CON_TMOUT: 782 printk(KERN_WARNING "%s: Connection timeout\n", 783 dev->name); 784 break; 785 case E_DISC_ERR: 786 printk(KERN_WARNING "%s: HIPPI disconnect error\n", 787 dev->name); 788 dev->stats.tx_aborted_errors++; 789 break; 790 case E_INT_PRTY: 791 printk(KERN_ERR "%s: HIPPI Internal Parity error\n", 792 dev->name); 793 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, 794 ®s->HostCtrl); 795 wmb(); 796 break; 797 case E_TX_IDLE: 798 printk(KERN_WARNING "%s: Transmitter idle\n", 799 dev->name); 800 break; 801 case E_TX_LINK_DROP: 802 printk(KERN_WARNING "%s: Link lost during transmit\n", 803 dev->name); 804 dev->stats.tx_aborted_errors++; 805 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, 806 ®s->HostCtrl); 807 wmb(); 808 break; 809 case E_TX_INV_RNG: 810 printk(KERN_ERR "%s: Invalid send ring block\n", 811 dev->name); 812 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, 813 ®s->HostCtrl); 814 wmb(); 815 break; 816 case E_TX_INV_BUF: 817 printk(KERN_ERR "%s: Invalid send buffer address\n", 818 dev->name); 819 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, 820 ®s->HostCtrl); 821 wmb(); 822 break; 823 case E_TX_INV_DSC: 824 printk(KERN_ERR "%s: Invalid descriptor address\n", 825 dev->name); 826 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, 827 ®s->HostCtrl); 828 wmb(); 829 break; 830 /* 831 * RX events. 832 */ 833 case E_RX_RNG_OUT: 834 printk(KERN_INFO "%s: Receive ring full\n", dev->name); 835 break; 836 837 case E_RX_PAR_ERR: 838 printk(KERN_WARNING "%s: Receive parity error\n", 839 dev->name); 840 goto drop; 841 case E_RX_LLRC_ERR: 842 printk(KERN_WARNING "%s: Receive LLRC error\n", 843 dev->name); 844 goto drop; 845 case E_PKT_LN_ERR: 846 printk(KERN_WARNING "%s: Receive packet length " 847 "error\n", dev->name); 848 goto drop; 849 case E_DTA_CKSM_ERR: 850 printk(KERN_WARNING "%s: Data checksum error\n", 851 dev->name); 852 goto drop; 853 case E_SHT_BST: 854 printk(KERN_WARNING "%s: Unexpected short burst " 855 "error\n", dev->name); 856 goto drop; 857 case E_STATE_ERR: 858 printk(KERN_WARNING "%s: Recv. state transition" 859 " error\n", dev->name); 860 goto drop; 861 case E_UNEXP_DATA: 862 printk(KERN_WARNING "%s: Unexpected data error\n", 863 dev->name); 864 goto drop; 865 case E_LST_LNK_ERR: 866 printk(KERN_WARNING "%s: Link lost error\n", 867 dev->name); 868 goto drop; 869 case E_FRM_ERR: 870 printk(KERN_WARNING "%s: Framming Error\n", 871 dev->name); 872 goto drop; 873 case E_FLG_SYN_ERR: 874 printk(KERN_WARNING "%s: Flag sync. lost during " 875 "packet\n", dev->name); 876 goto drop; 877 case E_RX_INV_BUF: 878 printk(KERN_ERR "%s: Invalid receive buffer " 879 "address\n", dev->name); 880 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, 881 ®s->HostCtrl); 882 wmb(); 883 break; 884 case E_RX_INV_DSC: 885 printk(KERN_ERR "%s: Invalid receive descriptor " 886 "address\n", dev->name); 887 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, 888 ®s->HostCtrl); 889 wmb(); 890 break; 891 case E_RNG_BLK: 892 printk(KERN_ERR "%s: Invalid ring block\n", 893 dev->name); 894 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, 895 ®s->HostCtrl); 896 wmb(); 897 break; 898 drop: 899 /* Label packet to be dropped. 900 * Actual dropping occurs in rx 901 * handling. 902 * 903 * The index of packet we get to drop is 904 * the index of the packet following 905 * the bad packet. -kbf 906 */ 907 { 908 u16 index = rrpriv->evt_ring[eidx].index; 909 index = (index + (RX_RING_ENTRIES - 1)) % 910 RX_RING_ENTRIES; 911 rrpriv->rx_ring[index].mode |= 912 (PACKET_BAD | PACKET_END); 913 } 914 break; 915 default: 916 printk(KERN_WARNING "%s: Unhandled event 0x%02x\n", 917 dev->name, rrpriv->evt_ring[eidx].code); 918 } 919 eidx = (eidx + 1) % EVT_RING_ENTRIES; 920 } 921 922 rrpriv->info->evt_ctrl.pi = eidx; 923 wmb(); 924 return eidx; 925 } 926 927 928 static void rx_int(struct net_device *dev, u32 rxlimit, u32 index) 929 { 930 struct rr_private *rrpriv = netdev_priv(dev); 931 struct rr_regs __iomem *regs = rrpriv->regs; 932 933 do { 934 struct rx_desc *desc; 935 u32 pkt_len; 936 937 desc = &(rrpriv->rx_ring[index]); 938 pkt_len = desc->size; 939 #if (DEBUG > 2) 940 printk("index %i, rxlimit %i\n", index, rxlimit); 941 printk("len %x, mode %x\n", pkt_len, desc->mode); 942 #endif 943 if ( (rrpriv->rx_ring[index].mode & PACKET_BAD) == PACKET_BAD){ 944 dev->stats.rx_dropped++; 945 goto defer; 946 } 947 948 if (pkt_len > 0){ 949 struct sk_buff *skb, *rx_skb; 950 951 rx_skb = rrpriv->rx_skbuff[index]; 952 953 if (pkt_len < PKT_COPY_THRESHOLD) { 954 skb = alloc_skb(pkt_len, GFP_ATOMIC); 955 if (skb == NULL){ 956 printk(KERN_WARNING "%s: Unable to allocate skb (%i bytes), deferring packet\n", dev->name, pkt_len); 957 dev->stats.rx_dropped++; 958 goto defer; 959 } else { 960 pci_dma_sync_single_for_cpu(rrpriv->pci_dev, 961 desc->addr.addrlo, 962 pkt_len, 963 PCI_DMA_FROMDEVICE); 964 965 memcpy(skb_put(skb, pkt_len), 966 rx_skb->data, pkt_len); 967 968 pci_dma_sync_single_for_device(rrpriv->pci_dev, 969 desc->addr.addrlo, 970 pkt_len, 971 PCI_DMA_FROMDEVICE); 972 } 973 }else{ 974 struct sk_buff *newskb; 975 976 newskb = alloc_skb(dev->mtu + HIPPI_HLEN, 977 GFP_ATOMIC); 978 if (newskb){ 979 dma_addr_t addr; 980 981 pci_unmap_single(rrpriv->pci_dev, 982 desc->addr.addrlo, dev->mtu + 983 HIPPI_HLEN, PCI_DMA_FROMDEVICE); 984 skb = rx_skb; 985 skb_put(skb, pkt_len); 986 rrpriv->rx_skbuff[index] = newskb; 987 addr = pci_map_single(rrpriv->pci_dev, 988 newskb->data, 989 dev->mtu + HIPPI_HLEN, 990 PCI_DMA_FROMDEVICE); 991 set_rraddr(&desc->addr, addr); 992 } else { 993 printk("%s: Out of memory, deferring " 994 "packet\n", dev->name); 995 dev->stats.rx_dropped++; 996 goto defer; 997 } 998 } 999 skb->protocol = hippi_type_trans(skb, dev); 1000 1001 netif_rx(skb); /* send it up */ 1002 1003 dev->stats.rx_packets++; 1004 dev->stats.rx_bytes += pkt_len; 1005 } 1006 defer: 1007 desc->mode = 0; 1008 desc->size = dev->mtu + HIPPI_HLEN; 1009 1010 if ((index & 7) == 7) 1011 writel(index, ®s->IpRxPi); 1012 1013 index = (index + 1) % RX_RING_ENTRIES; 1014 } while(index != rxlimit); 1015 1016 rrpriv->cur_rx = index; 1017 wmb(); 1018 } 1019 1020 1021 static irqreturn_t rr_interrupt(int irq, void *dev_id) 1022 { 1023 struct rr_private *rrpriv; 1024 struct rr_regs __iomem *regs; 1025 struct net_device *dev = (struct net_device *)dev_id; 1026 u32 prodidx, rxindex, eidx, txcsmr, rxlimit, txcon; 1027 1028 rrpriv = netdev_priv(dev); 1029 regs = rrpriv->regs; 1030 1031 if (!(readl(®s->HostCtrl) & RR_INT)) 1032 return IRQ_NONE; 1033 1034 spin_lock(&rrpriv->lock); 1035 1036 prodidx = readl(®s->EvtPrd); 1037 txcsmr = (prodidx >> 8) & 0xff; 1038 rxlimit = (prodidx >> 16) & 0xff; 1039 prodidx &= 0xff; 1040 1041 #if (DEBUG > 2) 1042 printk("%s: interrupt, prodidx = %i, eidx = %i\n", dev->name, 1043 prodidx, rrpriv->info->evt_ctrl.pi); 1044 #endif 1045 /* 1046 * Order here is important. We must handle events 1047 * before doing anything else in order to catch 1048 * such things as LLRC errors, etc -kbf 1049 */ 1050 1051 eidx = rrpriv->info->evt_ctrl.pi; 1052 if (prodidx != eidx) 1053 eidx = rr_handle_event(dev, prodidx, eidx); 1054 1055 rxindex = rrpriv->cur_rx; 1056 if (rxindex != rxlimit) 1057 rx_int(dev, rxlimit, rxindex); 1058 1059 txcon = rrpriv->dirty_tx; 1060 if (txcsmr != txcon) { 1061 do { 1062 /* Due to occational firmware TX producer/consumer out 1063 * of sync. error need to check entry in ring -kbf 1064 */ 1065 if(rrpriv->tx_skbuff[txcon]){ 1066 struct tx_desc *desc; 1067 struct sk_buff *skb; 1068 1069 desc = &(rrpriv->tx_ring[txcon]); 1070 skb = rrpriv->tx_skbuff[txcon]; 1071 1072 dev->stats.tx_packets++; 1073 dev->stats.tx_bytes += skb->len; 1074 1075 pci_unmap_single(rrpriv->pci_dev, 1076 desc->addr.addrlo, skb->len, 1077 PCI_DMA_TODEVICE); 1078 dev_kfree_skb_irq(skb); 1079 1080 rrpriv->tx_skbuff[txcon] = NULL; 1081 desc->size = 0; 1082 set_rraddr(&rrpriv->tx_ring[txcon].addr, 0); 1083 desc->mode = 0; 1084 } 1085 txcon = (txcon + 1) % TX_RING_ENTRIES; 1086 } while (txcsmr != txcon); 1087 wmb(); 1088 1089 rrpriv->dirty_tx = txcon; 1090 if (rrpriv->tx_full && rr_if_busy(dev) && 1091 (((rrpriv->info->tx_ctrl.pi + 1) % TX_RING_ENTRIES) 1092 != rrpriv->dirty_tx)){ 1093 rrpriv->tx_full = 0; 1094 netif_wake_queue(dev); 1095 } 1096 } 1097 1098 eidx |= ((txcsmr << 8) | (rxlimit << 16)); 1099 writel(eidx, ®s->EvtCon); 1100 wmb(); 1101 1102 spin_unlock(&rrpriv->lock); 1103 return IRQ_HANDLED; 1104 } 1105 1106 static inline void rr_raz_tx(struct rr_private *rrpriv, 1107 struct net_device *dev) 1108 { 1109 int i; 1110 1111 for (i = 0; i < TX_RING_ENTRIES; i++) { 1112 struct sk_buff *skb = rrpriv->tx_skbuff[i]; 1113 1114 if (skb) { 1115 struct tx_desc *desc = &(rrpriv->tx_ring[i]); 1116 1117 pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo, 1118 skb->len, PCI_DMA_TODEVICE); 1119 desc->size = 0; 1120 set_rraddr(&desc->addr, 0); 1121 dev_kfree_skb(skb); 1122 rrpriv->tx_skbuff[i] = NULL; 1123 } 1124 } 1125 } 1126 1127 1128 static inline void rr_raz_rx(struct rr_private *rrpriv, 1129 struct net_device *dev) 1130 { 1131 int i; 1132 1133 for (i = 0; i < RX_RING_ENTRIES; i++) { 1134 struct sk_buff *skb = rrpriv->rx_skbuff[i]; 1135 1136 if (skb) { 1137 struct rx_desc *desc = &(rrpriv->rx_ring[i]); 1138 1139 pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo, 1140 dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE); 1141 desc->size = 0; 1142 set_rraddr(&desc->addr, 0); 1143 dev_kfree_skb(skb); 1144 rrpriv->rx_skbuff[i] = NULL; 1145 } 1146 } 1147 } 1148 1149 static void rr_timer(unsigned long data) 1150 { 1151 struct net_device *dev = (struct net_device *)data; 1152 struct rr_private *rrpriv = netdev_priv(dev); 1153 struct rr_regs __iomem *regs = rrpriv->regs; 1154 unsigned long flags; 1155 1156 if (readl(®s->HostCtrl) & NIC_HALTED){ 1157 printk("%s: Restarting nic\n", dev->name); 1158 memset(rrpriv->rx_ctrl, 0, 256 * sizeof(struct ring_ctrl)); 1159 memset(rrpriv->info, 0, sizeof(struct rr_info)); 1160 wmb(); 1161 1162 rr_raz_tx(rrpriv, dev); 1163 rr_raz_rx(rrpriv, dev); 1164 1165 if (rr_init1(dev)) { 1166 spin_lock_irqsave(&rrpriv->lock, flags); 1167 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, 1168 ®s->HostCtrl); 1169 spin_unlock_irqrestore(&rrpriv->lock, flags); 1170 } 1171 } 1172 rrpriv->timer.expires = RUN_AT(5*HZ); 1173 add_timer(&rrpriv->timer); 1174 } 1175 1176 1177 static int rr_open(struct net_device *dev) 1178 { 1179 struct rr_private *rrpriv = netdev_priv(dev); 1180 struct pci_dev *pdev = rrpriv->pci_dev; 1181 struct rr_regs __iomem *regs; 1182 int ecode = 0; 1183 unsigned long flags; 1184 dma_addr_t dma_addr; 1185 1186 regs = rrpriv->regs; 1187 1188 if (rrpriv->fw_rev < 0x00020000) { 1189 printk(KERN_WARNING "%s: trying to configure device with " 1190 "obsolete firmware\n", dev->name); 1191 ecode = -EBUSY; 1192 goto error; 1193 } 1194 1195 rrpriv->rx_ctrl = pci_alloc_consistent(pdev, 1196 256 * sizeof(struct ring_ctrl), 1197 &dma_addr); 1198 if (!rrpriv->rx_ctrl) { 1199 ecode = -ENOMEM; 1200 goto error; 1201 } 1202 rrpriv->rx_ctrl_dma = dma_addr; 1203 memset(rrpriv->rx_ctrl, 0, 256*sizeof(struct ring_ctrl)); 1204 1205 rrpriv->info = pci_alloc_consistent(pdev, sizeof(struct rr_info), 1206 &dma_addr); 1207 if (!rrpriv->info) { 1208 ecode = -ENOMEM; 1209 goto error; 1210 } 1211 rrpriv->info_dma = dma_addr; 1212 memset(rrpriv->info, 0, sizeof(struct rr_info)); 1213 wmb(); 1214 1215 spin_lock_irqsave(&rrpriv->lock, flags); 1216 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, ®s->HostCtrl); 1217 readl(®s->HostCtrl); 1218 spin_unlock_irqrestore(&rrpriv->lock, flags); 1219 1220 if (request_irq(pdev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) { 1221 printk(KERN_WARNING "%s: Requested IRQ %d is busy\n", 1222 dev->name, pdev->irq); 1223 ecode = -EAGAIN; 1224 goto error; 1225 } 1226 1227 if ((ecode = rr_init1(dev))) 1228 goto error; 1229 1230 /* Set the timer to switch to check for link beat and perhaps switch 1231 to an alternate media type. */ 1232 init_timer(&rrpriv->timer); 1233 rrpriv->timer.expires = RUN_AT(5*HZ); /* 5 sec. watchdog */ 1234 rrpriv->timer.data = (unsigned long)dev; 1235 rrpriv->timer.function = rr_timer; /* timer handler */ 1236 add_timer(&rrpriv->timer); 1237 1238 netif_start_queue(dev); 1239 1240 return ecode; 1241 1242 error: 1243 spin_lock_irqsave(&rrpriv->lock, flags); 1244 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, ®s->HostCtrl); 1245 spin_unlock_irqrestore(&rrpriv->lock, flags); 1246 1247 if (rrpriv->info) { 1248 pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info, 1249 rrpriv->info_dma); 1250 rrpriv->info = NULL; 1251 } 1252 if (rrpriv->rx_ctrl) { 1253 pci_free_consistent(pdev, sizeof(struct ring_ctrl), 1254 rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma); 1255 rrpriv->rx_ctrl = NULL; 1256 } 1257 1258 netif_stop_queue(dev); 1259 1260 return ecode; 1261 } 1262 1263 1264 static void rr_dump(struct net_device *dev) 1265 { 1266 struct rr_private *rrpriv; 1267 struct rr_regs __iomem *regs; 1268 u32 index, cons; 1269 short i; 1270 int len; 1271 1272 rrpriv = netdev_priv(dev); 1273 regs = rrpriv->regs; 1274 1275 printk("%s: dumping NIC TX rings\n", dev->name); 1276 1277 printk("RxPrd %08x, TxPrd %02x, EvtPrd %08x, TxPi %02x, TxCtrlPi %02x\n", 1278 readl(®s->RxPrd), readl(®s->TxPrd), 1279 readl(®s->EvtPrd), readl(®s->TxPi), 1280 rrpriv->info->tx_ctrl.pi); 1281 1282 printk("Error code 0x%x\n", readl(®s->Fail1)); 1283 1284 index = (((readl(®s->EvtPrd) >> 8) & 0xff) - 1) % TX_RING_ENTRIES; 1285 cons = rrpriv->dirty_tx; 1286 printk("TX ring index %i, TX consumer %i\n", 1287 index, cons); 1288 1289 if (rrpriv->tx_skbuff[index]){ 1290 len = min_t(int, 0x80, rrpriv->tx_skbuff[index]->len); 1291 printk("skbuff for index %i is valid - dumping data (0x%x bytes - DMA len 0x%x)\n", index, len, rrpriv->tx_ring[index].size); 1292 for (i = 0; i < len; i++){ 1293 if (!(i & 7)) 1294 printk("\n"); 1295 printk("%02x ", (unsigned char) rrpriv->tx_skbuff[index]->data[i]); 1296 } 1297 printk("\n"); 1298 } 1299 1300 if (rrpriv->tx_skbuff[cons]){ 1301 len = min_t(int, 0x80, rrpriv->tx_skbuff[cons]->len); 1302 printk("skbuff for cons %i is valid - dumping data (0x%x bytes - skbuff len 0x%x)\n", cons, len, rrpriv->tx_skbuff[cons]->len); 1303 printk("mode 0x%x, size 0x%x,\n phys %08Lx, skbuff-addr %08lx, truesize 0x%x\n", 1304 rrpriv->tx_ring[cons].mode, 1305 rrpriv->tx_ring[cons].size, 1306 (unsigned long long) rrpriv->tx_ring[cons].addr.addrlo, 1307 (unsigned long)rrpriv->tx_skbuff[cons]->data, 1308 (unsigned int)rrpriv->tx_skbuff[cons]->truesize); 1309 for (i = 0; i < len; i++){ 1310 if (!(i & 7)) 1311 printk("\n"); 1312 printk("%02x ", (unsigned char)rrpriv->tx_ring[cons].size); 1313 } 1314 printk("\n"); 1315 } 1316 1317 printk("dumping TX ring info:\n"); 1318 for (i = 0; i < TX_RING_ENTRIES; i++) 1319 printk("mode 0x%x, size 0x%x, phys-addr %08Lx\n", 1320 rrpriv->tx_ring[i].mode, 1321 rrpriv->tx_ring[i].size, 1322 (unsigned long long) rrpriv->tx_ring[i].addr.addrlo); 1323 1324 } 1325 1326 1327 static int rr_close(struct net_device *dev) 1328 { 1329 struct rr_private *rrpriv = netdev_priv(dev); 1330 struct rr_regs __iomem *regs = rrpriv->regs; 1331 struct pci_dev *pdev = rrpriv->pci_dev; 1332 unsigned long flags; 1333 u32 tmp; 1334 short i; 1335 1336 netif_stop_queue(dev); 1337 1338 1339 /* 1340 * Lock to make sure we are not cleaning up while another CPU 1341 * is handling interrupts. 1342 */ 1343 spin_lock_irqsave(&rrpriv->lock, flags); 1344 1345 tmp = readl(®s->HostCtrl); 1346 if (tmp & NIC_HALTED){ 1347 printk("%s: NIC already halted\n", dev->name); 1348 rr_dump(dev); 1349 }else{ 1350 tmp |= HALT_NIC | RR_CLEAR_INT; 1351 writel(tmp, ®s->HostCtrl); 1352 readl(®s->HostCtrl); 1353 } 1354 1355 rrpriv->fw_running = 0; 1356 1357 del_timer_sync(&rrpriv->timer); 1358 1359 writel(0, ®s->TxPi); 1360 writel(0, ®s->IpRxPi); 1361 1362 writel(0, ®s->EvtCon); 1363 writel(0, ®s->EvtPrd); 1364 1365 for (i = 0; i < CMD_RING_ENTRIES; i++) 1366 writel(0, ®s->CmdRing[i]); 1367 1368 rrpriv->info->tx_ctrl.entries = 0; 1369 rrpriv->info->cmd_ctrl.pi = 0; 1370 rrpriv->info->evt_ctrl.pi = 0; 1371 rrpriv->rx_ctrl[4].entries = 0; 1372 1373 rr_raz_tx(rrpriv, dev); 1374 rr_raz_rx(rrpriv, dev); 1375 1376 pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl), 1377 rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma); 1378 rrpriv->rx_ctrl = NULL; 1379 1380 pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info, 1381 rrpriv->info_dma); 1382 rrpriv->info = NULL; 1383 1384 free_irq(pdev->irq, dev); 1385 spin_unlock_irqrestore(&rrpriv->lock, flags); 1386 1387 return 0; 1388 } 1389 1390 1391 static netdev_tx_t rr_start_xmit(struct sk_buff *skb, 1392 struct net_device *dev) 1393 { 1394 struct rr_private *rrpriv = netdev_priv(dev); 1395 struct rr_regs __iomem *regs = rrpriv->regs; 1396 struct hippi_cb *hcb = (struct hippi_cb *) skb->cb; 1397 struct ring_ctrl *txctrl; 1398 unsigned long flags; 1399 u32 index, len = skb->len; 1400 u32 *ifield; 1401 struct sk_buff *new_skb; 1402 1403 if (readl(®s->Mode) & FATAL_ERR) 1404 printk("error codes Fail1 %02x, Fail2 %02x\n", 1405 readl(®s->Fail1), readl(®s->Fail2)); 1406 1407 /* 1408 * We probably need to deal with tbusy here to prevent overruns. 1409 */ 1410 1411 if (skb_headroom(skb) < 8){ 1412 printk("incoming skb too small - reallocating\n"); 1413 if (!(new_skb = dev_alloc_skb(len + 8))) { 1414 dev_kfree_skb(skb); 1415 netif_wake_queue(dev); 1416 return NETDEV_TX_OK; 1417 } 1418 skb_reserve(new_skb, 8); 1419 skb_put(new_skb, len); 1420 skb_copy_from_linear_data(skb, new_skb->data, len); 1421 dev_kfree_skb(skb); 1422 skb = new_skb; 1423 } 1424 1425 ifield = (u32 *)skb_push(skb, 8); 1426 1427 ifield[0] = 0; 1428 ifield[1] = hcb->ifield; 1429 1430 /* 1431 * We don't need the lock before we are actually going to start 1432 * fiddling with the control blocks. 1433 */ 1434 spin_lock_irqsave(&rrpriv->lock, flags); 1435 1436 txctrl = &rrpriv->info->tx_ctrl; 1437 1438 index = txctrl->pi; 1439 1440 rrpriv->tx_skbuff[index] = skb; 1441 set_rraddr(&rrpriv->tx_ring[index].addr, pci_map_single( 1442 rrpriv->pci_dev, skb->data, len + 8, PCI_DMA_TODEVICE)); 1443 rrpriv->tx_ring[index].size = len + 8; /* include IFIELD */ 1444 rrpriv->tx_ring[index].mode = PACKET_START | PACKET_END; 1445 txctrl->pi = (index + 1) % TX_RING_ENTRIES; 1446 wmb(); 1447 writel(txctrl->pi, ®s->TxPi); 1448 1449 if (txctrl->pi == rrpriv->dirty_tx){ 1450 rrpriv->tx_full = 1; 1451 netif_stop_queue(dev); 1452 } 1453 1454 spin_unlock_irqrestore(&rrpriv->lock, flags); 1455 1456 return NETDEV_TX_OK; 1457 } 1458 1459 1460 /* 1461 * Read the firmware out of the EEPROM and put it into the SRAM 1462 * (or from user space - later) 1463 * 1464 * This operation requires the NIC to be halted and is performed with 1465 * interrupts disabled and with the spinlock hold. 1466 */ 1467 static int rr_load_firmware(struct net_device *dev) 1468 { 1469 struct rr_private *rrpriv; 1470 struct rr_regs __iomem *regs; 1471 size_t eptr, segptr; 1472 int i, j; 1473 u32 localctrl, sptr, len, tmp; 1474 u32 p2len, p2size, nr_seg, revision, io, sram_size; 1475 1476 rrpriv = netdev_priv(dev); 1477 regs = rrpriv->regs; 1478 1479 if (dev->flags & IFF_UP) 1480 return -EBUSY; 1481 1482 if (!(readl(®s->HostCtrl) & NIC_HALTED)){ 1483 printk("%s: Trying to load firmware to a running NIC.\n", 1484 dev->name); 1485 return -EBUSY; 1486 } 1487 1488 localctrl = readl(®s->LocalCtrl); 1489 writel(0, ®s->LocalCtrl); 1490 1491 writel(0, ®s->EvtPrd); 1492 writel(0, ®s->RxPrd); 1493 writel(0, ®s->TxPrd); 1494 1495 /* 1496 * First wipe the entire SRAM, otherwise we might run into all 1497 * kinds of trouble ... sigh, this took almost all afternoon 1498 * to track down ;-( 1499 */ 1500 io = readl(®s->ExtIo); 1501 writel(0, ®s->ExtIo); 1502 sram_size = rr_read_eeprom_word(rrpriv, 8); 1503 1504 for (i = 200; i < sram_size / 4; i++){ 1505 writel(i * 4, ®s->WinBase); 1506 mb(); 1507 writel(0, ®s->WinData); 1508 mb(); 1509 } 1510 writel(io, ®s->ExtIo); 1511 mb(); 1512 1513 eptr = rr_read_eeprom_word(rrpriv, 1514 offsetof(struct eeprom, rncd_info.AddrRunCodeSegs)); 1515 eptr = ((eptr & 0x1fffff) >> 3); 1516 1517 p2len = rr_read_eeprom_word(rrpriv, 0x83*4); 1518 p2len = (p2len << 2); 1519 p2size = rr_read_eeprom_word(rrpriv, 0x84*4); 1520 p2size = ((p2size & 0x1fffff) >> 3); 1521 1522 if ((eptr < p2size) || (eptr > (p2size + p2len))){ 1523 printk("%s: eptr is invalid\n", dev->name); 1524 goto out; 1525 } 1526 1527 revision = rr_read_eeprom_word(rrpriv, 1528 offsetof(struct eeprom, manf.HeaderFmt)); 1529 1530 if (revision != 1){ 1531 printk("%s: invalid firmware format (%i)\n", 1532 dev->name, revision); 1533 goto out; 1534 } 1535 1536 nr_seg = rr_read_eeprom_word(rrpriv, eptr); 1537 eptr +=4; 1538 #if (DEBUG > 1) 1539 printk("%s: nr_seg %i\n", dev->name, nr_seg); 1540 #endif 1541 1542 for (i = 0; i < nr_seg; i++){ 1543 sptr = rr_read_eeprom_word(rrpriv, eptr); 1544 eptr += 4; 1545 len = rr_read_eeprom_word(rrpriv, eptr); 1546 eptr += 4; 1547 segptr = rr_read_eeprom_word(rrpriv, eptr); 1548 segptr = ((segptr & 0x1fffff) >> 3); 1549 eptr += 4; 1550 #if (DEBUG > 1) 1551 printk("%s: segment %i, sram address %06x, length %04x, segptr %06x\n", 1552 dev->name, i, sptr, len, segptr); 1553 #endif 1554 for (j = 0; j < len; j++){ 1555 tmp = rr_read_eeprom_word(rrpriv, segptr); 1556 writel(sptr, ®s->WinBase); 1557 mb(); 1558 writel(tmp, ®s->WinData); 1559 mb(); 1560 segptr += 4; 1561 sptr += 4; 1562 } 1563 } 1564 1565 out: 1566 writel(localctrl, ®s->LocalCtrl); 1567 mb(); 1568 return 0; 1569 } 1570 1571 1572 static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1573 { 1574 struct rr_private *rrpriv; 1575 unsigned char *image, *oldimage; 1576 unsigned long flags; 1577 unsigned int i; 1578 int error = -EOPNOTSUPP; 1579 1580 rrpriv = netdev_priv(dev); 1581 1582 switch(cmd){ 1583 case SIOCRRGFW: 1584 if (!capable(CAP_SYS_RAWIO)){ 1585 return -EPERM; 1586 } 1587 1588 image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL); 1589 if (!image) 1590 return -ENOMEM; 1591 1592 if (rrpriv->fw_running){ 1593 printk("%s: Firmware already running\n", dev->name); 1594 error = -EPERM; 1595 goto gf_out; 1596 } 1597 1598 spin_lock_irqsave(&rrpriv->lock, flags); 1599 i = rr_read_eeprom(rrpriv, 0, image, EEPROM_BYTES); 1600 spin_unlock_irqrestore(&rrpriv->lock, flags); 1601 if (i != EEPROM_BYTES){ 1602 printk(KERN_ERR "%s: Error reading EEPROM\n", 1603 dev->name); 1604 error = -EFAULT; 1605 goto gf_out; 1606 } 1607 error = copy_to_user(rq->ifr_data, image, EEPROM_BYTES); 1608 if (error) 1609 error = -EFAULT; 1610 gf_out: 1611 kfree(image); 1612 return error; 1613 1614 case SIOCRRPFW: 1615 if (!capable(CAP_SYS_RAWIO)){ 1616 return -EPERM; 1617 } 1618 1619 image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL); 1620 oldimage = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL); 1621 if (!image || !oldimage) { 1622 error = -ENOMEM; 1623 goto wf_out; 1624 } 1625 1626 error = copy_from_user(image, rq->ifr_data, EEPROM_BYTES); 1627 if (error) { 1628 error = -EFAULT; 1629 goto wf_out; 1630 } 1631 1632 if (rrpriv->fw_running){ 1633 printk("%s: Firmware already running\n", dev->name); 1634 error = -EPERM; 1635 goto wf_out; 1636 } 1637 1638 printk("%s: Updating EEPROM firmware\n", dev->name); 1639 1640 spin_lock_irqsave(&rrpriv->lock, flags); 1641 error = write_eeprom(rrpriv, 0, image, EEPROM_BYTES); 1642 if (error) 1643 printk(KERN_ERR "%s: Error writing EEPROM\n", 1644 dev->name); 1645 1646 i = rr_read_eeprom(rrpriv, 0, oldimage, EEPROM_BYTES); 1647 spin_unlock_irqrestore(&rrpriv->lock, flags); 1648 1649 if (i != EEPROM_BYTES) 1650 printk(KERN_ERR "%s: Error reading back EEPROM " 1651 "image\n", dev->name); 1652 1653 error = memcmp(image, oldimage, EEPROM_BYTES); 1654 if (error){ 1655 printk(KERN_ERR "%s: Error verifying EEPROM image\n", 1656 dev->name); 1657 error = -EFAULT; 1658 } 1659 wf_out: 1660 kfree(oldimage); 1661 kfree(image); 1662 return error; 1663 1664 case SIOCRRID: 1665 return put_user(0x52523032, (int __user *)rq->ifr_data); 1666 default: 1667 return error; 1668 } 1669 } 1670 1671 static DEFINE_PCI_DEVICE_TABLE(rr_pci_tbl) = { 1672 { PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER, 1673 PCI_ANY_ID, PCI_ANY_ID, }, 1674 { 0,} 1675 }; 1676 MODULE_DEVICE_TABLE(pci, rr_pci_tbl); 1677 1678 static struct pci_driver rr_driver = { 1679 .name = "rrunner", 1680 .id_table = rr_pci_tbl, 1681 .probe = rr_init_one, 1682 .remove = rr_remove_one, 1683 }; 1684 1685 module_pci_driver(rr_driver); 1686